utils.py (201336B)
- #!/usr/bin/env python
- # coding: utf-8
- from __future__ import unicode_literals
- import base64
- import binascii
- import calendar
- import codecs
- import collections
- import contextlib
- import ctypes
- import datetime
- import email.utils
- import email.header
- import errno
- import functools
- import inspect
- import io
- import itertools
- import json
- import locale
- import math
- import operator
- import os
- import platform
- import random
- import re
- import socket
- import ssl
- import subprocess
- import sys
- import tempfile
- import time
- import traceback
- import unicodedata
- import xml.etree.ElementTree
- import zlib
- from .compat import (
- compat_HTMLParseError,
- compat_HTMLParser,
- compat_basestring,
- compat_brotli as brotli,
- compat_casefold,
- compat_chr,
- compat_collections_abc,
- compat_cookiejar,
- compat_ctypes_WINFUNCTYPE,
- compat_datetime_timedelta_total_seconds,
- compat_etree_fromstring,
- compat_expanduser,
- compat_html_entities,
- compat_html_entities_html5,
- compat_http_client,
- compat_integer_types,
- compat_kwargs,
- compat_ncompress as ncompress,
- compat_os_name,
- compat_re_Match,
- compat_re_Pattern,
- compat_shlex_quote,
- compat_str,
- compat_struct_pack,
- compat_struct_unpack,
- compat_urllib_error,
- compat_urllib_HTTPError,
- compat_urllib_parse,
- compat_urllib_parse_parse_qs as compat_parse_qs,
- compat_urllib_parse_urlencode,
- compat_urllib_parse_urlparse,
- compat_urllib_parse_unquote_plus,
- compat_urllib_request,
- compat_xpath,
- )
- from .socks import (
- ProxyType,
- sockssocket,
- )
- def register_socks_protocols():
- # "Register" SOCKS protocols
- # In Python < 2.6.5, urlsplit() suffers from bug https://bugs.python.org/issue7904
- # URLs with protocols not in urlparse.uses_netloc are not handled correctly
- for scheme in ('socks', 'socks4', 'socks4a', 'socks5'):
- if scheme not in compat_urllib_parse.uses_netloc:
- compat_urllib_parse.uses_netloc.append(scheme)
- # Unfavoured alias
- compiled_regex_type = compat_re_Pattern
- def random_user_agent():
- _USER_AGENT_TPL = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/%s Safari/537.36'
- _CHROME_VERSIONS = (
- '74.0.3729.129',
- '76.0.3780.3',
- '76.0.3780.2',
- '74.0.3729.128',
- '76.0.3780.1',
- '76.0.3780.0',
- '75.0.3770.15',
- '74.0.3729.127',
- '74.0.3729.126',
- '76.0.3779.1',
- '76.0.3779.0',
- '75.0.3770.14',
- '74.0.3729.125',
- '76.0.3778.1',
- '76.0.3778.0',
- '75.0.3770.13',
- '74.0.3729.124',
- '74.0.3729.123',
- '73.0.3683.121',
- '76.0.3777.1',
- '76.0.3777.0',
- '75.0.3770.12',
- '74.0.3729.122',
- '76.0.3776.4',
- '75.0.3770.11',
- '74.0.3729.121',
- '76.0.3776.3',
- '76.0.3776.2',
- '73.0.3683.120',
- '74.0.3729.120',
- '74.0.3729.119',
- '74.0.3729.118',
- '76.0.3776.1',
- '76.0.3776.0',
- '76.0.3775.5',
- '75.0.3770.10',
- '74.0.3729.117',
- '76.0.3775.4',
- '76.0.3775.3',
- '74.0.3729.116',
- '75.0.3770.9',
- '76.0.3775.2',
- '76.0.3775.1',
- '76.0.3775.0',
- '75.0.3770.8',
- '74.0.3729.115',
- '74.0.3729.114',
- '76.0.3774.1',
- '76.0.3774.0',
- '75.0.3770.7',
- '74.0.3729.113',
- '74.0.3729.112',
- '74.0.3729.111',
- '76.0.3773.1',
- '76.0.3773.0',
- '75.0.3770.6',
- '74.0.3729.110',
- '74.0.3729.109',
- '76.0.3772.1',
- '76.0.3772.0',
- '75.0.3770.5',
- '74.0.3729.108',
- '74.0.3729.107',
- '76.0.3771.1',
- '76.0.3771.0',
- '75.0.3770.4',
- '74.0.3729.106',
- '74.0.3729.105',
- '75.0.3770.3',
- '74.0.3729.104',
- '74.0.3729.103',
- '74.0.3729.102',
- '75.0.3770.2',
- '74.0.3729.101',
- '75.0.3770.1',
- '75.0.3770.0',
- '74.0.3729.100',
- '75.0.3769.5',
- '75.0.3769.4',
- '74.0.3729.99',
- '75.0.3769.3',
- '75.0.3769.2',
- '75.0.3768.6',
- '74.0.3729.98',
- '75.0.3769.1',
- '75.0.3769.0',
- '74.0.3729.97',
- '73.0.3683.119',
- '73.0.3683.118',
- '74.0.3729.96',
- '75.0.3768.5',
- '75.0.3768.4',
- '75.0.3768.3',
- '75.0.3768.2',
- '74.0.3729.95',
- '74.0.3729.94',
- '75.0.3768.1',
- '75.0.3768.0',
- '74.0.3729.93',
- '74.0.3729.92',
- '73.0.3683.117',
- '74.0.3729.91',
- '75.0.3766.3',
- '74.0.3729.90',
- '75.0.3767.2',
- '75.0.3767.1',
- '75.0.3767.0',
- '74.0.3729.89',
- '73.0.3683.116',
- '75.0.3766.2',
- '74.0.3729.88',
- '75.0.3766.1',
- '75.0.3766.0',
- '74.0.3729.87',
- '73.0.3683.115',
- '74.0.3729.86',
- '75.0.3765.1',
- '75.0.3765.0',
- '74.0.3729.85',
- '73.0.3683.114',
- '74.0.3729.84',
- '75.0.3764.1',
- '75.0.3764.0',
- '74.0.3729.83',
- '73.0.3683.113',
- '75.0.3763.2',
- '75.0.3761.4',
- '74.0.3729.82',
- '75.0.3763.1',
- '75.0.3763.0',
- '74.0.3729.81',
- '73.0.3683.112',
- '75.0.3762.1',
- '75.0.3762.0',
- '74.0.3729.80',
- '75.0.3761.3',
- '74.0.3729.79',
- '73.0.3683.111',
- '75.0.3761.2',
- '74.0.3729.78',
- '74.0.3729.77',
- '75.0.3761.1',
- '75.0.3761.0',
- '73.0.3683.110',
- '74.0.3729.76',
- '74.0.3729.75',
- '75.0.3760.0',
- '74.0.3729.74',
- '75.0.3759.8',
- '75.0.3759.7',
- '75.0.3759.6',
- '74.0.3729.73',
- '75.0.3759.5',
- '74.0.3729.72',
- '73.0.3683.109',
- '75.0.3759.4',
- '75.0.3759.3',
- '74.0.3729.71',
- '75.0.3759.2',
- '74.0.3729.70',
- '73.0.3683.108',
- '74.0.3729.69',
- '75.0.3759.1',
- '75.0.3759.0',
- '74.0.3729.68',
- '73.0.3683.107',
- '74.0.3729.67',
- '75.0.3758.1',
- '75.0.3758.0',
- '74.0.3729.66',
- '73.0.3683.106',
- '74.0.3729.65',
- '75.0.3757.1',
- '75.0.3757.0',
- '74.0.3729.64',
- '73.0.3683.105',
- '74.0.3729.63',
- '75.0.3756.1',
- '75.0.3756.0',
- '74.0.3729.62',
- '73.0.3683.104',
- '75.0.3755.3',
- '75.0.3755.2',
- '73.0.3683.103',
- '75.0.3755.1',
- '75.0.3755.0',
- '74.0.3729.61',
- '73.0.3683.102',
- '74.0.3729.60',
- '75.0.3754.2',
- '74.0.3729.59',
- '75.0.3753.4',
- '74.0.3729.58',
- '75.0.3754.1',
- '75.0.3754.0',
- '74.0.3729.57',
- '73.0.3683.101',
- '75.0.3753.3',
- '75.0.3752.2',
- '75.0.3753.2',
- '74.0.3729.56',
- '75.0.3753.1',
- '75.0.3753.0',
- '74.0.3729.55',
- '73.0.3683.100',
- '74.0.3729.54',
- '75.0.3752.1',
- '75.0.3752.0',
- '74.0.3729.53',
- '73.0.3683.99',
- '74.0.3729.52',
- '75.0.3751.1',
- '75.0.3751.0',
- '74.0.3729.51',
- '73.0.3683.98',
- '74.0.3729.50',
- '75.0.3750.0',
- '74.0.3729.49',
- '74.0.3729.48',
- '74.0.3729.47',
- '75.0.3749.3',
- '74.0.3729.46',
- '73.0.3683.97',
- '75.0.3749.2',
- '74.0.3729.45',
- '75.0.3749.1',
- '75.0.3749.0',
- '74.0.3729.44',
- '73.0.3683.96',
- '74.0.3729.43',
- '74.0.3729.42',
- '75.0.3748.1',
- '75.0.3748.0',
- '74.0.3729.41',
- '75.0.3747.1',
- '73.0.3683.95',
- '75.0.3746.4',
- '74.0.3729.40',
- '74.0.3729.39',
- '75.0.3747.0',
- '75.0.3746.3',
- '75.0.3746.2',
- '74.0.3729.38',
- '75.0.3746.1',
- '75.0.3746.0',
- '74.0.3729.37',
- '73.0.3683.94',
- '75.0.3745.5',
- '75.0.3745.4',
- '75.0.3745.3',
- '75.0.3745.2',
- '74.0.3729.36',
- '75.0.3745.1',
- '75.0.3745.0',
- '75.0.3744.2',
- '74.0.3729.35',
- '73.0.3683.93',
- '74.0.3729.34',
- '75.0.3744.1',
- '75.0.3744.0',
- '74.0.3729.33',
- '73.0.3683.92',
- '74.0.3729.32',
- '74.0.3729.31',
- '73.0.3683.91',
- '75.0.3741.2',
- '75.0.3740.5',
- '74.0.3729.30',
- '75.0.3741.1',
- '75.0.3741.0',
- '74.0.3729.29',
- '75.0.3740.4',
- '73.0.3683.90',
- '74.0.3729.28',
- '75.0.3740.3',
- '73.0.3683.89',
- '75.0.3740.2',
- '74.0.3729.27',
- '75.0.3740.1',
- '75.0.3740.0',
- '74.0.3729.26',
- '73.0.3683.88',
- '73.0.3683.87',
- '74.0.3729.25',
- '75.0.3739.1',
- '75.0.3739.0',
- '73.0.3683.86',
- '74.0.3729.24',
- '73.0.3683.85',
- '75.0.3738.4',
- '75.0.3738.3',
- '75.0.3738.2',
- '75.0.3738.1',
- '75.0.3738.0',
- '74.0.3729.23',
- '73.0.3683.84',
- '74.0.3729.22',
- '74.0.3729.21',
- '75.0.3737.1',
- '75.0.3737.0',
- '74.0.3729.20',
- '73.0.3683.83',
- '74.0.3729.19',
- '75.0.3736.1',
- '75.0.3736.0',
- '74.0.3729.18',
- '73.0.3683.82',
- '74.0.3729.17',
- '75.0.3735.1',
- '75.0.3735.0',
- '74.0.3729.16',
- '73.0.3683.81',
- '75.0.3734.1',
- '75.0.3734.0',
- '74.0.3729.15',
- '73.0.3683.80',
- '74.0.3729.14',
- '75.0.3733.1',
- '75.0.3733.0',
- '75.0.3732.1',
- '74.0.3729.13',
- '74.0.3729.12',
- '73.0.3683.79',
- '74.0.3729.11',
- '75.0.3732.0',
- '74.0.3729.10',
- '73.0.3683.78',
- '74.0.3729.9',
- '74.0.3729.8',
- '74.0.3729.7',
- '75.0.3731.3',
- '75.0.3731.2',
- '75.0.3731.0',
- '74.0.3729.6',
- '73.0.3683.77',
- '73.0.3683.76',
- '75.0.3730.5',
- '75.0.3730.4',
- '73.0.3683.75',
- '74.0.3729.5',
- '73.0.3683.74',
- '75.0.3730.3',
- '75.0.3730.2',
- '74.0.3729.4',
- '73.0.3683.73',
- '73.0.3683.72',
- '75.0.3730.1',
- '75.0.3730.0',
- '74.0.3729.3',
- '73.0.3683.71',
- '74.0.3729.2',
- '73.0.3683.70',
- '74.0.3729.1',
- '74.0.3729.0',
- '74.0.3726.4',
- '73.0.3683.69',
- '74.0.3726.3',
- '74.0.3728.0',
- '74.0.3726.2',
- '73.0.3683.68',
- '74.0.3726.1',
- '74.0.3726.0',
- '74.0.3725.4',
- '73.0.3683.67',
- '73.0.3683.66',
- '74.0.3725.3',
- '74.0.3725.2',
- '74.0.3725.1',
- '74.0.3724.8',
- '74.0.3725.0',
- '73.0.3683.65',
- '74.0.3724.7',
- '74.0.3724.6',
- '74.0.3724.5',
- '74.0.3724.4',
- '74.0.3724.3',
- '74.0.3724.2',
- '74.0.3724.1',
- '74.0.3724.0',
- '73.0.3683.64',
- '74.0.3723.1',
- '74.0.3723.0',
- '73.0.3683.63',
- '74.0.3722.1',
- '74.0.3722.0',
- '73.0.3683.62',
- '74.0.3718.9',
- '74.0.3702.3',
- '74.0.3721.3',
- '74.0.3721.2',
- '74.0.3721.1',
- '74.0.3721.0',
- '74.0.3720.6',
- '73.0.3683.61',
- '72.0.3626.122',
- '73.0.3683.60',
- '74.0.3720.5',
- '72.0.3626.121',
- '74.0.3718.8',
- '74.0.3720.4',
- '74.0.3720.3',
- '74.0.3718.7',
- '74.0.3720.2',
- '74.0.3720.1',
- '74.0.3720.0',
- '74.0.3718.6',
- '74.0.3719.5',
- '73.0.3683.59',
- '74.0.3718.5',
- '74.0.3718.4',
- '74.0.3719.4',
- '74.0.3719.3',
- '74.0.3719.2',
- '74.0.3719.1',
- '73.0.3683.58',
- '74.0.3719.0',
- '73.0.3683.57',
- '73.0.3683.56',
- '74.0.3718.3',
- '73.0.3683.55',
- '74.0.3718.2',
- '74.0.3718.1',
- '74.0.3718.0',
- '73.0.3683.54',
- '74.0.3717.2',
- '73.0.3683.53',
- '74.0.3717.1',
- '74.0.3717.0',
- '73.0.3683.52',
- '74.0.3716.1',
- '74.0.3716.0',
- '73.0.3683.51',
- '74.0.3715.1',
- '74.0.3715.0',
- '73.0.3683.50',
- '74.0.3711.2',
- '74.0.3714.2',
- '74.0.3713.3',
- '74.0.3714.1',
- '74.0.3714.0',
- '73.0.3683.49',
- '74.0.3713.1',
- '74.0.3713.0',
- '72.0.3626.120',
- '73.0.3683.48',
- '74.0.3712.2',
- '74.0.3712.1',
- '74.0.3712.0',
- '73.0.3683.47',
- '72.0.3626.119',
- '73.0.3683.46',
- '74.0.3710.2',
- '72.0.3626.118',
- '74.0.3711.1',
- '74.0.3711.0',
- '73.0.3683.45',
- '72.0.3626.117',
- '74.0.3710.1',
- '74.0.3710.0',
- '73.0.3683.44',
- '72.0.3626.116',
- '74.0.3709.1',
- '74.0.3709.0',
- '74.0.3704.9',
- '73.0.3683.43',
- '72.0.3626.115',
- '74.0.3704.8',
- '74.0.3704.7',
- '74.0.3708.0',
- '74.0.3706.7',
- '74.0.3704.6',
- '73.0.3683.42',
- '72.0.3626.114',
- '74.0.3706.6',
- '72.0.3626.113',
- '74.0.3704.5',
- '74.0.3706.5',
- '74.0.3706.4',
- '74.0.3706.3',
- '74.0.3706.2',
- '74.0.3706.1',
- '74.0.3706.0',
- '73.0.3683.41',
- '72.0.3626.112',
- '74.0.3705.1',
- '74.0.3705.0',
- '73.0.3683.40',
- '72.0.3626.111',
- '73.0.3683.39',
- '74.0.3704.4',
- '73.0.3683.38',
- '74.0.3704.3',
- '74.0.3704.2',
- '74.0.3704.1',
- '74.0.3704.0',
- '73.0.3683.37',
- '72.0.3626.110',
- '72.0.3626.109',
- '74.0.3703.3',
- '74.0.3703.2',
- '73.0.3683.36',
- '74.0.3703.1',
- '74.0.3703.0',
- '73.0.3683.35',
- '72.0.3626.108',
- '74.0.3702.2',
- '74.0.3699.3',
- '74.0.3702.1',
- '74.0.3702.0',
- '73.0.3683.34',
- '72.0.3626.107',
- '73.0.3683.33',
- '74.0.3701.1',
- '74.0.3701.0',
- '73.0.3683.32',
- '73.0.3683.31',
- '72.0.3626.105',
- '74.0.3700.1',
- '74.0.3700.0',
- '73.0.3683.29',
- '72.0.3626.103',
- '74.0.3699.2',
- '74.0.3699.1',
- '74.0.3699.0',
- '73.0.3683.28',
- '72.0.3626.102',
- '73.0.3683.27',
- '73.0.3683.26',
- '74.0.3698.0',
- '74.0.3696.2',
- '72.0.3626.101',
- '73.0.3683.25',
- '74.0.3696.1',
- '74.0.3696.0',
- '74.0.3694.8',
- '72.0.3626.100',
- '74.0.3694.7',
- '74.0.3694.6',
- '74.0.3694.5',
- '74.0.3694.4',
- '72.0.3626.99',
- '72.0.3626.98',
- '74.0.3694.3',
- '73.0.3683.24',
- '72.0.3626.97',
- '72.0.3626.96',
- '72.0.3626.95',
- '73.0.3683.23',
- '72.0.3626.94',
- '73.0.3683.22',
- '73.0.3683.21',
- '72.0.3626.93',
- '74.0.3694.2',
- '72.0.3626.92',
- '74.0.3694.1',
- '74.0.3694.0',
- '74.0.3693.6',
- '73.0.3683.20',
- '72.0.3626.91',
- '74.0.3693.5',
- '74.0.3693.4',
- '74.0.3693.3',
- '74.0.3693.2',
- '73.0.3683.19',
- '74.0.3693.1',
- '74.0.3693.0',
- '73.0.3683.18',
- '72.0.3626.90',
- '74.0.3692.1',
- '74.0.3692.0',
- '73.0.3683.17',
- '72.0.3626.89',
- '74.0.3687.3',
- '74.0.3691.1',
- '74.0.3691.0',
- '73.0.3683.16',
- '72.0.3626.88',
- '72.0.3626.87',
- '73.0.3683.15',
- '74.0.3690.1',
- '74.0.3690.0',
- '73.0.3683.14',
- '72.0.3626.86',
- '73.0.3683.13',
- '73.0.3683.12',
- '74.0.3689.1',
- '74.0.3689.0',
- '73.0.3683.11',
- '72.0.3626.85',
- '73.0.3683.10',
- '72.0.3626.84',
- '73.0.3683.9',
- '74.0.3688.1',
- '74.0.3688.0',
- '73.0.3683.8',
- '72.0.3626.83',
- '74.0.3687.2',
- '74.0.3687.1',
- '74.0.3687.0',
- '73.0.3683.7',
- '72.0.3626.82',
- '74.0.3686.4',
- '72.0.3626.81',
- '74.0.3686.3',
- '74.0.3686.2',
- '74.0.3686.1',
- '74.0.3686.0',
- '73.0.3683.6',
- '72.0.3626.80',
- '74.0.3685.1',
- '74.0.3685.0',
- '73.0.3683.5',
- '72.0.3626.79',
- '74.0.3684.1',
- '74.0.3684.0',
- '73.0.3683.4',
- '72.0.3626.78',
- '72.0.3626.77',
- '73.0.3683.3',
- '73.0.3683.2',
- '72.0.3626.76',
- '73.0.3683.1',
- '73.0.3683.0',
- '72.0.3626.75',
- '71.0.3578.141',
- '73.0.3682.1',
- '73.0.3682.0',
- '72.0.3626.74',
- '71.0.3578.140',
- '73.0.3681.4',
- '73.0.3681.3',
- '73.0.3681.2',
- '73.0.3681.1',
- '73.0.3681.0',
- '72.0.3626.73',
- '71.0.3578.139',
- '72.0.3626.72',
- '72.0.3626.71',
- '73.0.3680.1',
- '73.0.3680.0',
- '72.0.3626.70',
- '71.0.3578.138',
- '73.0.3678.2',
- '73.0.3679.1',
- '73.0.3679.0',
- '72.0.3626.69',
- '71.0.3578.137',
- '73.0.3678.1',
- '73.0.3678.0',
- '71.0.3578.136',
- '73.0.3677.1',
- '73.0.3677.0',
- '72.0.3626.68',
- '72.0.3626.67',
- '71.0.3578.135',
- '73.0.3676.1',
- '73.0.3676.0',
- '73.0.3674.2',
- '72.0.3626.66',
- '71.0.3578.134',
- '73.0.3674.1',
- '73.0.3674.0',
- '72.0.3626.65',
- '71.0.3578.133',
- '73.0.3673.2',
- '73.0.3673.1',
- '73.0.3673.0',
- '72.0.3626.64',
- '71.0.3578.132',
- '72.0.3626.63',
- '72.0.3626.62',
- '72.0.3626.61',
- '72.0.3626.60',
- '73.0.3672.1',
- '73.0.3672.0',
- '72.0.3626.59',
- '71.0.3578.131',
- '73.0.3671.3',
- '73.0.3671.2',
- '73.0.3671.1',
- '73.0.3671.0',
- '72.0.3626.58',
- '71.0.3578.130',
- '73.0.3670.1',
- '73.0.3670.0',
- '72.0.3626.57',
- '71.0.3578.129',
- '73.0.3669.1',
- '73.0.3669.0',
- '72.0.3626.56',
- '71.0.3578.128',
- '73.0.3668.2',
- '73.0.3668.1',
- '73.0.3668.0',
- '72.0.3626.55',
- '71.0.3578.127',
- '73.0.3667.2',
- '73.0.3667.1',
- '73.0.3667.0',
- '72.0.3626.54',
- '71.0.3578.126',
- '73.0.3666.1',
- '73.0.3666.0',
- '72.0.3626.53',
- '71.0.3578.125',
- '73.0.3665.4',
- '73.0.3665.3',
- '72.0.3626.52',
- '73.0.3665.2',
- '73.0.3664.4',
- '73.0.3665.1',
- '73.0.3665.0',
- '72.0.3626.51',
- '71.0.3578.124',
- '72.0.3626.50',
- '73.0.3664.3',
- '73.0.3664.2',
- '73.0.3664.1',
- '73.0.3664.0',
- '73.0.3663.2',
- '72.0.3626.49',
- '71.0.3578.123',
- '73.0.3663.1',
- '73.0.3663.0',
- '72.0.3626.48',
- '71.0.3578.122',
- '73.0.3662.1',
- '73.0.3662.0',
- '72.0.3626.47',
- '71.0.3578.121',
- '73.0.3661.1',
- '72.0.3626.46',
- '73.0.3661.0',
- '72.0.3626.45',
- '71.0.3578.120',
- '73.0.3660.2',
- '73.0.3660.1',
- '73.0.3660.0',
- '72.0.3626.44',
- '71.0.3578.119',
- '73.0.3659.1',
- '73.0.3659.0',
- '72.0.3626.43',
- '71.0.3578.118',
- '73.0.3658.1',
- '73.0.3658.0',
- '72.0.3626.42',
- '71.0.3578.117',
- '73.0.3657.1',
- '73.0.3657.0',
- '72.0.3626.41',
- '71.0.3578.116',
- '73.0.3656.1',
- '73.0.3656.0',
- '72.0.3626.40',
- '71.0.3578.115',
- '73.0.3655.1',
- '73.0.3655.0',
- '72.0.3626.39',
- '71.0.3578.114',
- '73.0.3654.1',
- '73.0.3654.0',
- '72.0.3626.38',
- '71.0.3578.113',
- '73.0.3653.1',
- '73.0.3653.0',
- '72.0.3626.37',
- '71.0.3578.112',
- '73.0.3652.1',
- '73.0.3652.0',
- '72.0.3626.36',
- '71.0.3578.111',
- '73.0.3651.1',
- '73.0.3651.0',
- '72.0.3626.35',
- '71.0.3578.110',
- '73.0.3650.1',
- '73.0.3650.0',
- '72.0.3626.34',
- '71.0.3578.109',
- '73.0.3649.1',
- '73.0.3649.0',
- '72.0.3626.33',
- '71.0.3578.108',
- '73.0.3648.2',
- '73.0.3648.1',
- '73.0.3648.0',
- '72.0.3626.32',
- '71.0.3578.107',
- '73.0.3647.2',
- '73.0.3647.1',
- '73.0.3647.0',
- '72.0.3626.31',
- '71.0.3578.106',
- '73.0.3635.3',
- '73.0.3646.2',
- '73.0.3646.1',
- '73.0.3646.0',
- '72.0.3626.30',
- '71.0.3578.105',
- '72.0.3626.29',
- '73.0.3645.2',
- '73.0.3645.1',
- '73.0.3645.0',
- '72.0.3626.28',
- '71.0.3578.104',
- '72.0.3626.27',
- '72.0.3626.26',
- '72.0.3626.25',
- '72.0.3626.24',
- '73.0.3644.0',
- '73.0.3643.2',
- '72.0.3626.23',
- '71.0.3578.103',
- '73.0.3643.1',
- '73.0.3643.0',
- '72.0.3626.22',
- '71.0.3578.102',
- '73.0.3642.1',
- '73.0.3642.0',
- '72.0.3626.21',
- '71.0.3578.101',
- '73.0.3641.1',
- '73.0.3641.0',
- '72.0.3626.20',
- '71.0.3578.100',
- '72.0.3626.19',
- '73.0.3640.1',
- '73.0.3640.0',
- '72.0.3626.18',
- '73.0.3639.1',
- '71.0.3578.99',
- '73.0.3639.0',
- '72.0.3626.17',
- '73.0.3638.2',
- '72.0.3626.16',
- '73.0.3638.1',
- '73.0.3638.0',
- '72.0.3626.15',
- '71.0.3578.98',
- '73.0.3635.2',
- '71.0.3578.97',
- '73.0.3637.1',
- '73.0.3637.0',
- '72.0.3626.14',
- '71.0.3578.96',
- '71.0.3578.95',
- '72.0.3626.13',
- '71.0.3578.94',
- '73.0.3636.2',
- '71.0.3578.93',
- '73.0.3636.1',
- '73.0.3636.0',
- '72.0.3626.12',
- '71.0.3578.92',
- '73.0.3635.1',
- '73.0.3635.0',
- '72.0.3626.11',
- '71.0.3578.91',
- '73.0.3634.2',
- '73.0.3634.1',
- '73.0.3634.0',
- '72.0.3626.10',
- '71.0.3578.90',
- '71.0.3578.89',
- '73.0.3633.2',
- '73.0.3633.1',
- '73.0.3633.0',
- '72.0.3610.4',
- '72.0.3626.9',
- '71.0.3578.88',
- '73.0.3632.5',
- '73.0.3632.4',
- '73.0.3632.3',
- '73.0.3632.2',
- '73.0.3632.1',
- '73.0.3632.0',
- '72.0.3626.8',
- '71.0.3578.87',
- '73.0.3631.2',
- '73.0.3631.1',
- '73.0.3631.0',
- '72.0.3626.7',
- '71.0.3578.86',
- '72.0.3626.6',
- '73.0.3630.1',
- '73.0.3630.0',
- '72.0.3626.5',
- '71.0.3578.85',
- '72.0.3626.4',
- '73.0.3628.3',
- '73.0.3628.2',
- '73.0.3629.1',
- '73.0.3629.0',
- '72.0.3626.3',
- '71.0.3578.84',
- '73.0.3628.1',
- '73.0.3628.0',
- '71.0.3578.83',
- '73.0.3627.1',
- '73.0.3627.0',
- '72.0.3626.2',
- '71.0.3578.82',
- '71.0.3578.81',
- '71.0.3578.80',
- '72.0.3626.1',
- '72.0.3626.0',
- '71.0.3578.79',
- '70.0.3538.124',
- '71.0.3578.78',
- '72.0.3623.4',
- '72.0.3625.2',
- '72.0.3625.1',
- '72.0.3625.0',
- '71.0.3578.77',
- '70.0.3538.123',
- '72.0.3624.4',
- '72.0.3624.3',
- '72.0.3624.2',
- '71.0.3578.76',
- '72.0.3624.1',
- '72.0.3624.0',
- '72.0.3623.3',
- '71.0.3578.75',
- '70.0.3538.122',
- '71.0.3578.74',
- '72.0.3623.2',
- '72.0.3610.3',
- '72.0.3623.1',
- '72.0.3623.0',
- '72.0.3622.3',
- '72.0.3622.2',
- '71.0.3578.73',
- '70.0.3538.121',
- '72.0.3622.1',
- '72.0.3622.0',
- '71.0.3578.72',
- '70.0.3538.120',
- '72.0.3621.1',
- '72.0.3621.0',
- '71.0.3578.71',
- '70.0.3538.119',
- '72.0.3620.1',
- '72.0.3620.0',
- '71.0.3578.70',
- '70.0.3538.118',
- '71.0.3578.69',
- '72.0.3619.1',
- '72.0.3619.0',
- '71.0.3578.68',
- '70.0.3538.117',
- '71.0.3578.67',
- '72.0.3618.1',
- '72.0.3618.0',
- '71.0.3578.66',
- '70.0.3538.116',
- '72.0.3617.1',
- '72.0.3617.0',
- '71.0.3578.65',
- '70.0.3538.115',
- '72.0.3602.3',
- '71.0.3578.64',
- '72.0.3616.1',
- '72.0.3616.0',
- '71.0.3578.63',
- '70.0.3538.114',
- '71.0.3578.62',
- '72.0.3615.1',
- '72.0.3615.0',
- '71.0.3578.61',
- '70.0.3538.113',
- '72.0.3614.1',
- '72.0.3614.0',
- '71.0.3578.60',
- '70.0.3538.112',
- '72.0.3613.1',
- '72.0.3613.0',
- '71.0.3578.59',
- '70.0.3538.111',
- '72.0.3612.2',
- '72.0.3612.1',
- '72.0.3612.0',
- '70.0.3538.110',
- '71.0.3578.58',
- '70.0.3538.109',
- '72.0.3611.2',
- '72.0.3611.1',
- '72.0.3611.0',
- '71.0.3578.57',
- '70.0.3538.108',
- '72.0.3610.2',
- '71.0.3578.56',
- '71.0.3578.55',
- '72.0.3610.1',
- '72.0.3610.0',
- '71.0.3578.54',
- '70.0.3538.107',
- '71.0.3578.53',
- '72.0.3609.3',
- '71.0.3578.52',
- '72.0.3609.2',
- '71.0.3578.51',
- '72.0.3608.5',
- '72.0.3609.1',
- '72.0.3609.0',
- '71.0.3578.50',
- '70.0.3538.106',
- '72.0.3608.4',
- '72.0.3608.3',
- '72.0.3608.2',
- '71.0.3578.49',
- '72.0.3608.1',
- '72.0.3608.0',
- '70.0.3538.105',
- '71.0.3578.48',
- '72.0.3607.1',
- '72.0.3607.0',
- '71.0.3578.47',
- '70.0.3538.104',
- '72.0.3606.2',
- '72.0.3606.1',
- '72.0.3606.0',
- '71.0.3578.46',
- '70.0.3538.103',
- '70.0.3538.102',
- '72.0.3605.3',
- '72.0.3605.2',
- '72.0.3605.1',
- '72.0.3605.0',
- '71.0.3578.45',
- '70.0.3538.101',
- '71.0.3578.44',
- '71.0.3578.43',
- '70.0.3538.100',
- '70.0.3538.99',
- '71.0.3578.42',
- '72.0.3604.1',
- '72.0.3604.0',
- '71.0.3578.41',
- '70.0.3538.98',
- '71.0.3578.40',
- '72.0.3603.2',
- '72.0.3603.1',
- '72.0.3603.0',
- '71.0.3578.39',
- '70.0.3538.97',
- '72.0.3602.2',
- '71.0.3578.38',
- '71.0.3578.37',
- '72.0.3602.1',
- '72.0.3602.0',
- '71.0.3578.36',
- '70.0.3538.96',
- '72.0.3601.1',
- '72.0.3601.0',
- '71.0.3578.35',
- '70.0.3538.95',
- '72.0.3600.1',
- '72.0.3600.0',
- '71.0.3578.34',
- '70.0.3538.94',
- '72.0.3599.3',
- '72.0.3599.2',
- '72.0.3599.1',
- '72.0.3599.0',
- '71.0.3578.33',
- '70.0.3538.93',
- '72.0.3598.1',
- '72.0.3598.0',
- '71.0.3578.32',
- '70.0.3538.87',
- '72.0.3597.1',
- '72.0.3597.0',
- '72.0.3596.2',
- '71.0.3578.31',
- '70.0.3538.86',
- '71.0.3578.30',
- '71.0.3578.29',
- '72.0.3596.1',
- '72.0.3596.0',
- '71.0.3578.28',
- '70.0.3538.85',
- '72.0.3595.2',
- '72.0.3591.3',
- '72.0.3595.1',
- '72.0.3595.0',
- '71.0.3578.27',
- '70.0.3538.84',
- '72.0.3594.1',
- '72.0.3594.0',
- '71.0.3578.26',
- '70.0.3538.83',
- '72.0.3593.2',
- '72.0.3593.1',
- '72.0.3593.0',
- '71.0.3578.25',
- '70.0.3538.82',
- '72.0.3589.3',
- '72.0.3592.2',
- '72.0.3592.1',
- '72.0.3592.0',
- '71.0.3578.24',
- '72.0.3589.2',
- '70.0.3538.81',
- '70.0.3538.80',
- '72.0.3591.2',
- '72.0.3591.1',
- '72.0.3591.0',
- '71.0.3578.23',
- '70.0.3538.79',
- '71.0.3578.22',
- '72.0.3590.1',
- '72.0.3590.0',
- '71.0.3578.21',
- '70.0.3538.78',
- '70.0.3538.77',
- '72.0.3589.1',
- '72.0.3589.0',
- '71.0.3578.20',
- '70.0.3538.76',
- '71.0.3578.19',
- '70.0.3538.75',
- '72.0.3588.1',
- '72.0.3588.0',
- '71.0.3578.18',
- '70.0.3538.74',
- '72.0.3586.2',
- '72.0.3587.0',
- '71.0.3578.17',
- '70.0.3538.73',
- '72.0.3586.1',
- '72.0.3586.0',
- '71.0.3578.16',
- '70.0.3538.72',
- '72.0.3585.1',
- '72.0.3585.0',
- '71.0.3578.15',
- '70.0.3538.71',
- '71.0.3578.14',
- '72.0.3584.1',
- '72.0.3584.0',
- '71.0.3578.13',
- '70.0.3538.70',
- '72.0.3583.2',
- '71.0.3578.12',
- '72.0.3583.1',
- '72.0.3583.0',
- '71.0.3578.11',
- '70.0.3538.69',
- '71.0.3578.10',
- '72.0.3582.0',
- '72.0.3581.4',
- '71.0.3578.9',
- '70.0.3538.67',
- '72.0.3581.3',
- '72.0.3581.2',
- '72.0.3581.1',
- '72.0.3581.0',
- '71.0.3578.8',
- '70.0.3538.66',
- '72.0.3580.1',
- '72.0.3580.0',
- '71.0.3578.7',
- '70.0.3538.65',
- '71.0.3578.6',
- '72.0.3579.1',
- '72.0.3579.0',
- '71.0.3578.5',
- '70.0.3538.64',
- '71.0.3578.4',
- '71.0.3578.3',
- '71.0.3578.2',
- '71.0.3578.1',
- '71.0.3578.0',
- '70.0.3538.63',
- '69.0.3497.128',
- '70.0.3538.62',
- '70.0.3538.61',
- '70.0.3538.60',
- '70.0.3538.59',
- '71.0.3577.1',
- '71.0.3577.0',
- '70.0.3538.58',
- '69.0.3497.127',
- '71.0.3576.2',
- '71.0.3576.1',
- '71.0.3576.0',
- '70.0.3538.57',
- '70.0.3538.56',
- '71.0.3575.2',
- '70.0.3538.55',
- '69.0.3497.126',
- '70.0.3538.54',
- '71.0.3575.1',
- '71.0.3575.0',
- '71.0.3574.1',
- '71.0.3574.0',
- '70.0.3538.53',
- '69.0.3497.125',
- '70.0.3538.52',
- '71.0.3573.1',
- '71.0.3573.0',
- '70.0.3538.51',
- '69.0.3497.124',
- '71.0.3572.1',
- '71.0.3572.0',
- '70.0.3538.50',
- '69.0.3497.123',
- '71.0.3571.2',
- '70.0.3538.49',
- '69.0.3497.122',
- '71.0.3571.1',
- '71.0.3571.0',
- '70.0.3538.48',
- '69.0.3497.121',
- '71.0.3570.1',
- '71.0.3570.0',
- '70.0.3538.47',
- '69.0.3497.120',
- '71.0.3568.2',
- '71.0.3569.1',
- '71.0.3569.0',
- '70.0.3538.46',
- '69.0.3497.119',
- '70.0.3538.45',
- '71.0.3568.1',
- '71.0.3568.0',
- '70.0.3538.44',
- '69.0.3497.118',
- '70.0.3538.43',
- '70.0.3538.42',
- '71.0.3567.1',
- '71.0.3567.0',
- '70.0.3538.41',
- '69.0.3497.117',
- '71.0.3566.1',
- '71.0.3566.0',
- '70.0.3538.40',
- '69.0.3497.116',
- '71.0.3565.1',
- '71.0.3565.0',
- '70.0.3538.39',
- '69.0.3497.115',
- '71.0.3564.1',
- '71.0.3564.0',
- '70.0.3538.38',
- '69.0.3497.114',
- '71.0.3563.0',
- '71.0.3562.2',
- '70.0.3538.37',
- '69.0.3497.113',
- '70.0.3538.36',
- '70.0.3538.35',
- '71.0.3562.1',
- '71.0.3562.0',
- '70.0.3538.34',
- '69.0.3497.112',
- '70.0.3538.33',
- '71.0.3561.1',
- '71.0.3561.0',
- '70.0.3538.32',
- '69.0.3497.111',
- '71.0.3559.6',
- '71.0.3560.1',
- '71.0.3560.0',
- '71.0.3559.5',
- '71.0.3559.4',
- '70.0.3538.31',
- '69.0.3497.110',
- '71.0.3559.3',
- '70.0.3538.30',
- '69.0.3497.109',
- '71.0.3559.2',
- '71.0.3559.1',
- '71.0.3559.0',
- '70.0.3538.29',
- '69.0.3497.108',
- '71.0.3558.2',
- '71.0.3558.1',
- '71.0.3558.0',
- '70.0.3538.28',
- '69.0.3497.107',
- '71.0.3557.2',
- '71.0.3557.1',
- '71.0.3557.0',
- '70.0.3538.27',
- '69.0.3497.106',
- '71.0.3554.4',
- '70.0.3538.26',
- '71.0.3556.1',
- '71.0.3556.0',
- '70.0.3538.25',
- '71.0.3554.3',
- '69.0.3497.105',
- '71.0.3554.2',
- '70.0.3538.24',
- '69.0.3497.104',
- '71.0.3555.2',
- '70.0.3538.23',
- '71.0.3555.1',
- '71.0.3555.0',
- '70.0.3538.22',
- '69.0.3497.103',
- '71.0.3554.1',
- '71.0.3554.0',
- '70.0.3538.21',
- '69.0.3497.102',
- '71.0.3553.3',
- '70.0.3538.20',
- '69.0.3497.101',
- '71.0.3553.2',
- '69.0.3497.100',
- '71.0.3553.1',
- '71.0.3553.0',
- '70.0.3538.19',
- '69.0.3497.99',
- '69.0.3497.98',
- '69.0.3497.97',
- '71.0.3552.6',
- '71.0.3552.5',
- '71.0.3552.4',
- '71.0.3552.3',
- '71.0.3552.2',
- '71.0.3552.1',
- '71.0.3552.0',
- '70.0.3538.18',
- '69.0.3497.96',
- '71.0.3551.3',
- '71.0.3551.2',
- '71.0.3551.1',
- '71.0.3551.0',
- '70.0.3538.17',
- '69.0.3497.95',
- '71.0.3550.3',
- '71.0.3550.2',
- '71.0.3550.1',
- '71.0.3550.0',
- '70.0.3538.16',
- '69.0.3497.94',
- '71.0.3549.1',
- '71.0.3549.0',
- '70.0.3538.15',
- '69.0.3497.93',
- '69.0.3497.92',
- '71.0.3548.1',
- '71.0.3548.0',
- '70.0.3538.14',
- '69.0.3497.91',
- '71.0.3547.1',
- '71.0.3547.0',
- '70.0.3538.13',
- '69.0.3497.90',
- '71.0.3546.2',
- '69.0.3497.89',
- '71.0.3546.1',
- '71.0.3546.0',
- '70.0.3538.12',
- '69.0.3497.88',
- '71.0.3545.4',
- '71.0.3545.3',
- '71.0.3545.2',
- '71.0.3545.1',
- '71.0.3545.0',
- '70.0.3538.11',
- '69.0.3497.87',
- '71.0.3544.5',
- '71.0.3544.4',
- '71.0.3544.3',
- '71.0.3544.2',
- '71.0.3544.1',
- '71.0.3544.0',
- '69.0.3497.86',
- '70.0.3538.10',
- '69.0.3497.85',
- '70.0.3538.9',
- '69.0.3497.84',
- '71.0.3543.4',
- '70.0.3538.8',
- '71.0.3543.3',
- '71.0.3543.2',
- '71.0.3543.1',
- '71.0.3543.0',
- '70.0.3538.7',
- '69.0.3497.83',
- '71.0.3542.2',
- '71.0.3542.1',
- '71.0.3542.0',
- '70.0.3538.6',
- '69.0.3497.82',
- '69.0.3497.81',
- '71.0.3541.1',
- '71.0.3541.0',
- '70.0.3538.5',
- '69.0.3497.80',
- '71.0.3540.1',
- '71.0.3540.0',
- '70.0.3538.4',
- '69.0.3497.79',
- '70.0.3538.3',
- '71.0.3539.1',
- '71.0.3539.0',
- '69.0.3497.78',
- '68.0.3440.134',
- '69.0.3497.77',
- '70.0.3538.2',
- '70.0.3538.1',
- '70.0.3538.0',
- '69.0.3497.76',
- '68.0.3440.133',
- '69.0.3497.75',
- '70.0.3537.2',
- '70.0.3537.1',
- '70.0.3537.0',
- '69.0.3497.74',
- '68.0.3440.132',
- '70.0.3536.0',
- '70.0.3535.5',
- '70.0.3535.4',
- '70.0.3535.3',
- '69.0.3497.73',
- '68.0.3440.131',
- '70.0.3532.8',
- '70.0.3532.7',
- '69.0.3497.72',
- '69.0.3497.71',
- '70.0.3535.2',
- '70.0.3535.1',
- '70.0.3535.0',
- '69.0.3497.70',
- '68.0.3440.130',
- '69.0.3497.69',
- '68.0.3440.129',
- '70.0.3534.4',
- '70.0.3534.3',
- '70.0.3534.2',
- '70.0.3534.1',
- '70.0.3534.0',
- '69.0.3497.68',
- '68.0.3440.128',
- '70.0.3533.2',
- '70.0.3533.1',
- '70.0.3533.0',
- '69.0.3497.67',
- '68.0.3440.127',
- '70.0.3532.6',
- '70.0.3532.5',
- '70.0.3532.4',
- '69.0.3497.66',
- '68.0.3440.126',
- '70.0.3532.3',
- '70.0.3532.2',
- '70.0.3532.1',
- '69.0.3497.60',
- '69.0.3497.65',
- '69.0.3497.64',
- '70.0.3532.0',
- '70.0.3531.0',
- '70.0.3530.4',
- '70.0.3530.3',
- '70.0.3530.2',
- '69.0.3497.58',
- '68.0.3440.125',
- '69.0.3497.57',
- '69.0.3497.56',
- '69.0.3497.55',
- '69.0.3497.54',
- '70.0.3530.1',
- '70.0.3530.0',
- '69.0.3497.53',
- '68.0.3440.124',
- '69.0.3497.52',
- '70.0.3529.3',
- '70.0.3529.2',
- '70.0.3529.1',
- '70.0.3529.0',
- '69.0.3497.51',
- '70.0.3528.4',
- '68.0.3440.123',
- '70.0.3528.3',
- '70.0.3528.2',
- '70.0.3528.1',
- '70.0.3528.0',
- '69.0.3497.50',
- '68.0.3440.122',
- '70.0.3527.1',
- '70.0.3527.0',
- '69.0.3497.49',
- '68.0.3440.121',
- '70.0.3526.1',
- '70.0.3526.0',
- '68.0.3440.120',
- '69.0.3497.48',
- '69.0.3497.47',
- '68.0.3440.119',
- '68.0.3440.118',
- '70.0.3525.5',
- '70.0.3525.4',
- '70.0.3525.3',
- '68.0.3440.117',
- '69.0.3497.46',
- '70.0.3525.2',
- '70.0.3525.1',
- '70.0.3525.0',
- '69.0.3497.45',
- '68.0.3440.116',
- '70.0.3524.4',
- '70.0.3524.3',
- '69.0.3497.44',
- '70.0.3524.2',
- '70.0.3524.1',
- '70.0.3524.0',
- '70.0.3523.2',
- '69.0.3497.43',
- '68.0.3440.115',
- '70.0.3505.9',
- '69.0.3497.42',
- '70.0.3505.8',
- '70.0.3523.1',
- '70.0.3523.0',
- '69.0.3497.41',
- '68.0.3440.114',
- '70.0.3505.7',
- '69.0.3497.40',
- '70.0.3522.1',
- '70.0.3522.0',
- '70.0.3521.2',
- '69.0.3497.39',
- '68.0.3440.113',
- '70.0.3505.6',
- '70.0.3521.1',
- '70.0.3521.0',
- '69.0.3497.38',
- '68.0.3440.112',
- '70.0.3520.1',
- '70.0.3520.0',
- '69.0.3497.37',
- '68.0.3440.111',
- '70.0.3519.3',
- '70.0.3519.2',
- '70.0.3519.1',
- '70.0.3519.0',
- '69.0.3497.36',
- '68.0.3440.110',
- '70.0.3518.1',
- '70.0.3518.0',
- '69.0.3497.35',
- '69.0.3497.34',
- '68.0.3440.109',
- '70.0.3517.1',
- '70.0.3517.0',
- '69.0.3497.33',
- '68.0.3440.108',
- '69.0.3497.32',
- '70.0.3516.3',
- '70.0.3516.2',
- '70.0.3516.1',
- '70.0.3516.0',
- '69.0.3497.31',
- '68.0.3440.107',
- '70.0.3515.4',
- '68.0.3440.106',
- '70.0.3515.3',
- '70.0.3515.2',
- '70.0.3515.1',
- '70.0.3515.0',
- '69.0.3497.30',
- '68.0.3440.105',
- '68.0.3440.104',
- '70.0.3514.2',
- '70.0.3514.1',
- '70.0.3514.0',
- '69.0.3497.29',
- '68.0.3440.103',
- '70.0.3513.1',
- '70.0.3513.0',
- '69.0.3497.28',
- )
- return _USER_AGENT_TPL % random.choice(_CHROME_VERSIONS)
- std_headers = {
- 'User-Agent': random_user_agent(),
- 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
- 'Accept-Language': 'en-us,en;q=0.5',
- }
- USER_AGENTS = {
- 'Safari': 'Mozilla/5.0 (X11; Linux x86_64; rv:10.0) AppleWebKit/533.20.25 (KHTML, like Gecko) Version/5.0.4 Safari/533.20.27',
- }
- NO_DEFAULT = object()
- IDENTITY = lambda x: x
- ENGLISH_MONTH_NAMES = [
- 'January', 'February', 'March', 'April', 'May', 'June',
- 'July', 'August', 'September', 'October', 'November', 'December']
- MONTH_NAMES = {
- 'en': ENGLISH_MONTH_NAMES,
- 'fr': [
- 'janvier', 'février', 'mars', 'avril', 'mai', 'juin',
- 'juillet', 'août', 'septembre', 'octobre', 'novembre', 'décembre'],
- }
- # Timezone names for RFC2822 obs-zone
- # From https://github.com/python/cpython/blob/3.11/Lib/email/_parseaddr.py#L36-L42
- TIMEZONE_NAMES = {
- 'UT': 0, 'UTC': 0, 'GMT': 0, 'Z': 0,
- 'AST': -4, 'ADT': -3, # Atlantic (used in Canada)
- 'EST': -5, 'EDT': -4, # Eastern
- 'CST': -6, 'CDT': -5, # Central
- 'MST': -7, 'MDT': -6, # Mountain
- 'PST': -8, 'PDT': -7 # Pacific
- }
- KNOWN_EXTENSIONS = (
- 'mp4', 'm4a', 'm4p', 'm4b', 'm4r', 'm4v', 'aac',
- 'flv', 'f4v', 'f4a', 'f4b',
- 'webm', 'ogg', 'ogv', 'oga', 'ogx', 'spx', 'opus',
- 'mkv', 'mka', 'mk3d',
- 'avi', 'divx',
- 'mov',
- 'asf', 'wmv', 'wma',
- '3gp', '3g2',
- 'mp3',
- 'flac',
- 'ape',
- 'wav',
- 'f4f', 'f4m', 'm3u8', 'smil')
- # needed for sanitizing filenames in restricted mode
- ACCENT_CHARS = dict(zip('ÂÃÄÀÁÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖŐØŒÙÚÛÜŰÝÞßàáâãäåæçèéêëìíîïðñòóôõöőøœùúûüűýþÿ',
- itertools.chain('AAAAAA', ['AE'], 'CEEEEIIIIDNOOOOOOO', ['OE'], 'UUUUUY', ['TH', 'ss'],
- 'aaaaaa', ['ae'], 'ceeeeiiiionooooooo', ['oe'], 'uuuuuy', ['th'], 'y')))
- DATE_FORMATS = (
- '%d %B %Y',
- '%d %b %Y',
- '%B %d %Y',
- '%B %dst %Y',
- '%B %dnd %Y',
- '%B %drd %Y',
- '%B %dth %Y',
- '%b %d %Y',
- '%b %dst %Y',
- '%b %dnd %Y',
- '%b %drd %Y',
- '%b %dth %Y',
- '%b %dst %Y %I:%M',
- '%b %dnd %Y %I:%M',
- '%b %drd %Y %I:%M',
- '%b %dth %Y %I:%M',
- '%Y %m %d',
- '%Y-%m-%d',
- '%Y.%m.%d.',
- '%Y/%m/%d',
- '%Y/%m/%d %H:%M',
- '%Y/%m/%d %H:%M:%S',
- '%Y%m%d%H%M',
- '%Y%m%d%H%M%S',
- '%Y%m%d',
- '%Y-%m-%d %H:%M',
- '%Y-%m-%d %H:%M:%S',
- '%Y-%m-%d %H:%M:%S.%f',
- '%Y-%m-%d %H:%M:%S:%f',
- '%d.%m.%Y %H:%M',
- '%d.%m.%Y %H.%M',
- '%Y-%m-%dT%H:%M:%SZ',
- '%Y-%m-%dT%H:%M:%S.%fZ',
- '%Y-%m-%dT%H:%M:%S.%f0Z',
- '%Y-%m-%dT%H:%M:%S',
- '%Y-%m-%dT%H:%M:%S.%f',
- '%Y-%m-%dT%H:%M',
- '%b %d %Y at %H:%M',
- '%b %d %Y at %H:%M:%S',
- '%B %d %Y at %H:%M',
- '%B %d %Y at %H:%M:%S',
- '%H:%M %d-%b-%Y',
- )
- DATE_FORMATS_DAY_FIRST = list(DATE_FORMATS)
- DATE_FORMATS_DAY_FIRST.extend([
- '%d-%m-%Y',
- '%d.%m.%Y',
- '%d.%m.%y',
- '%d/%m/%Y',
- '%d/%m/%y',
- '%d/%m/%Y %H:%M:%S',
- '%d-%m-%Y %H:%M',
- ])
- DATE_FORMATS_MONTH_FIRST = list(DATE_FORMATS)
- DATE_FORMATS_MONTH_FIRST.extend([
- '%m-%d-%Y',
- '%m.%d.%Y',
- '%m/%d/%Y',
- '%m/%d/%y',
- '%m/%d/%Y %H:%M:%S',
- ])
- PACKED_CODES_RE = r"}\('(.+)',(\d+),(\d+),'([^']+)'\.split\('\|'\)"
- JSON_LD_RE = r'(?is)<script[^>]+type=(["\']?)application/ld\+json\1[^>]*>(?P<json_ld>.+?)</script>'
- def preferredencoding():
- """Get preferred encoding.
- Returns the best encoding scheme for the system, based on
- locale.getpreferredencoding() and some further tweaks.
- """
- try:
- pref = locale.getpreferredencoding()
- 'TEST'.encode(pref)
- except Exception:
- pref = 'UTF-8'
- return pref
- def write_json_file(obj, fn):
- """ Encode obj as JSON and write it to fn, atomically if possible """
- fn = encodeFilename(fn)
- if sys.version_info < (3, 0) and sys.platform != 'win32':
- encoding = get_filesystem_encoding()
- # os.path.basename returns a bytes object, but NamedTemporaryFile
- # will fail if the filename contains non-ascii characters unless we
- # use a unicode object
- path_basename = lambda f: os.path.basename(f).decode(encoding)
- # the same for os.path.dirname
- path_dirname = lambda f: os.path.dirname(f).decode(encoding)
- else:
- path_basename = os.path.basename
- path_dirname = os.path.dirname
- args = {
- 'suffix': '.tmp',
- 'prefix': path_basename(fn) + '.',
- 'dir': path_dirname(fn),
- 'delete': False,
- }
- # In Python 2.x, json.dump expects a bytestream.
- # In Python 3.x, it writes to a character stream
- if sys.version_info < (3, 0):
- args['mode'] = 'wb'
- else:
- args.update({
- 'mode': 'w',
- 'encoding': 'utf-8',
- })
- tf = tempfile.NamedTemporaryFile(**compat_kwargs(args))
- try:
- with tf:
- json.dump(obj, tf)
- if sys.platform == 'win32':
- # Need to remove existing file on Windows, else os.rename raises
- # WindowsError or FileExistsError.
- try:
- os.unlink(fn)
- except OSError:
- pass
- try:
- mask = os.umask(0)
- os.umask(mask)
- os.chmod(tf.name, 0o666 & ~mask)
- except OSError:
- pass
- os.rename(tf.name, fn)
- except Exception:
- try:
- os.remove(tf.name)
- except OSError:
- pass
- raise
- if sys.version_info >= (2, 7):
- def find_xpath_attr(node, xpath, key, val=None):
- """ Find the xpath xpath[@key=val] """
- assert re.match(r'^[a-zA-Z_-]+$', key)
- expr = xpath + ('[@%s]' % key if val is None else "[@%s='%s']" % (key, val))
- return node.find(expr)
- else:
- def find_xpath_attr(node, xpath, key, val=None):
- for f in node.findall(compat_xpath(xpath)):
- if key not in f.attrib:
- continue
- if val is None or f.attrib.get(key) == val:
- return f
- return None
- # On python2.6 the xml.etree.ElementTree.Element methods don't support
- # the namespace parameter
- def xpath_with_ns(path, ns_map):
- components = [c.split(':') for c in path.split('/')]
- replaced = []
- for c in components:
- if len(c) == 1:
- replaced.append(c[0])
- else:
- ns, tag = c
- replaced.append('{%s}%s' % (ns_map[ns], tag))
- return '/'.join(replaced)
- def xpath_element(node, xpath, name=None, fatal=False, default=NO_DEFAULT):
- def _find_xpath(xpath):
- return node.find(compat_xpath(xpath))
- if isinstance(xpath, compat_basestring):
- n = _find_xpath(xpath)
- else:
- for xp in xpath:
- n = _find_xpath(xp)
- if n is not None:
- break
- if n is None:
- if default is not NO_DEFAULT:
- return default
- elif fatal:
- name = xpath if name is None else name
- raise ExtractorError('Could not find XML element %s' % name)
- else:
- return None
- return n
- def xpath_text(node, xpath, name=None, fatal=False, default=NO_DEFAULT):
- n = xpath_element(node, xpath, name, fatal=fatal, default=default)
- if n is None or n == default:
- return n
- if n.text is None:
- if default is not NO_DEFAULT:
- return default
- elif fatal:
- name = xpath if name is None else name
- raise ExtractorError('Could not find XML element\'s text %s' % name)
- else:
- return None
- return n.text
- def xpath_attr(node, xpath, key, name=None, fatal=False, default=NO_DEFAULT):
- n = find_xpath_attr(node, xpath, key)
- if n is None:
- if default is not NO_DEFAULT:
- return default
- elif fatal:
- name = '%s[@%s]' % (xpath, key) if name is None else name
- raise ExtractorError('Could not find XML attribute %s' % name)
- else:
- return None
- return n.attrib[key]
- def get_element_by_id(id, html):
- """Return the content of the tag with the specified ID in the passed HTML document"""
- return get_element_by_attribute('id', id, html)
- def get_element_by_class(class_name, html):
- """Return the content of the first tag with the specified class in the passed HTML document"""
- retval = get_elements_by_class(class_name, html)
- return retval[0] if retval else None
- def get_element_by_attribute(attribute, value, html, escape_value=True):
- retval = get_elements_by_attribute(attribute, value, html, escape_value)
- return retval[0] if retval else None
- def get_elements_by_class(class_name, html):
- """Return the content of all tags with the specified class in the passed HTML document as a list"""
- return get_elements_by_attribute(
- 'class', r'[^\'"]*\b%s\b[^\'"]*' % re.escape(class_name),
- html, escape_value=False)
- def get_elements_by_attribute(attribute, value, html, escape_value=True):
- """Return the content of the tag with the specified attribute in the passed HTML document"""
- value = re.escape(value) if escape_value else value
- retlist = []
- for m in re.finditer(r'''(?xs)
- <([a-zA-Z0-9:._-]+)
- (?:\s+[a-zA-Z0-9:._-]+(?:=[a-zA-Z0-9:._-]*|="[^"]*"|='[^']*'|))*?
- \s+%s=['"]?%s['"]?
- (?:\s+[a-zA-Z0-9:._-]+(?:=[a-zA-Z0-9:._-]*|="[^"]*"|='[^']*'|))*?
- \s*>
- (?P<content>.*?)
- </\1>
- ''' % (re.escape(attribute), value), html):
- res = m.group('content')
- if res.startswith('"') or res.startswith("'"):
- res = res[1:-1]
- retlist.append(unescapeHTML(res))
- return retlist
- class HTMLAttributeParser(compat_HTMLParser):
- """Trivial HTML parser to gather the attributes for a single element"""
- def __init__(self):
- self.attrs = {}
- compat_HTMLParser.__init__(self)
- def handle_starttag(self, tag, attrs):
- self.attrs = dict(attrs)
- def extract_attributes(html_element):
- """Given a string for an HTML element such as
- <el
- a="foo" B="bar" c="&98;az" d=boz
- empty= noval entity="&"
- sq='"' dq="'"
- >
- Decode and return a dictionary of attributes.
- {
- 'a': 'foo', 'b': 'bar', c: 'baz', d: 'boz',
- 'empty': '', 'noval': None, 'entity': '&',
- 'sq': '"', 'dq': '\''
- }.
- NB HTMLParser is stricter in Python 2.6 & 3.2 than in later versions,
- but the cases in the unit test will work for all of 2.6, 2.7, 3.2-3.5.
- """
- parser = HTMLAttributeParser()
- try:
- parser.feed(html_element)
- parser.close()
- # Older Python may throw HTMLParseError in case of malformed HTML
- except compat_HTMLParseError:
- pass
- return parser.attrs
- def clean_html(html):
- """Clean an HTML snippet into a readable string"""
- if html is None: # Convenience for sanitizing descriptions etc.
- return html
- # Newline vs <br />
- html = html.replace('\n', ' ')
- html = re.sub(r'(?u)\s*<\s*br\s*/?\s*>\s*', '\n', html)
- html = re.sub(r'(?u)<\s*/\s*p\s*>\s*<\s*p[^>]*>', '\n', html)
- # Strip html tags
- html = re.sub('<.*?>', '', html)
- # Replace html entities
- html = unescapeHTML(html)
- return html.strip()
- def sanitize_open(filename, open_mode):
- """Try to open the given filename, and slightly tweak it if this fails.
- Attempts to open the given filename. If this fails, it tries to change
- the filename slightly, step by step, until it's either able to open it
- or it fails and raises a final exception, like the standard open()
- function.
- It returns the tuple (stream, definitive_file_name).
- """
- try:
- if filename == '-':
- if sys.platform == 'win32':
- import msvcrt
- msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
- return (sys.stdout.buffer if hasattr(sys.stdout, 'buffer') else sys.stdout, filename)
- stream = open(encodeFilename(filename), open_mode)
- return (stream, filename)
- except (IOError, OSError) as err:
- if err.errno in (errno.EACCES,):
- raise
- # In case of error, try to remove win32 forbidden chars
- alt_filename = sanitize_path(filename)
- if alt_filename == filename:
- raise
- else:
- # An exception here should be caught in the caller
- stream = open(encodeFilename(alt_filename), open_mode)
- return (stream, alt_filename)
- def timeconvert(timestr):
- """Convert RFC 2822 defined time string into system timestamp"""
- timestamp = None
- timetuple = email.utils.parsedate_tz(timestr)
- if timetuple is not None:
- timestamp = email.utils.mktime_tz(timetuple)
- return timestamp
- def sanitize_filename(s, restricted=False, is_id=False):
- """Sanitizes a string so it could be used as part of a filename.
- If restricted is set, use a stricter subset of allowed characters.
- Set is_id if this is not an arbitrary string, but an ID that should be kept
- if possible.
- """
- def replace_insane(char):
- if restricted and char in ACCENT_CHARS:
- return ACCENT_CHARS[char]
- if char == '?' or ord(char) < 32 or ord(char) == 127:
- return ''
- elif char == '"':
- return '' if restricted else '\''
- elif char == ':':
- return '_-' if restricted else ' -'
- elif char in '\\/|*<>':
- return '_'
- if restricted and (char in '!&\'()[]{}$;`^,#' or char.isspace()):
- return '_'
- if restricted and ord(char) > 127:
- return '' if unicodedata.category(char)[0] in 'CM' else '_'
- return char
- # Replace look-alike Unicode glyphs
- if restricted and not is_id:
- s = unicodedata.normalize('NFKC', s)
- # Handle timestamps
- s = re.sub(r'[0-9]+(?::[0-9]+)+', lambda m: m.group(0).replace(':', '_'), s)
- result = ''.join(map(replace_insane, s))
- if not is_id:
- while '__' in result:
- result = result.replace('__', '_')
- result = result.strip('_')
- # Common case of "Foreign band name - English song title"
- if restricted and result.startswith('-_'):
- result = result[2:]
- if result.startswith('-'):
- result = '_' + result[len('-'):]
- result = result.lstrip('.')
- if not result:
- result = '_'
- return result
- def sanitize_path(s):
- """Sanitizes and normalizes path on Windows"""
- if sys.platform != 'win32':
- return s
- drive_or_unc, _ = os.path.splitdrive(s)
- if sys.version_info < (2, 7) and not drive_or_unc:
- drive_or_unc, _ = os.path.splitunc(s)
- norm_path = os.path.normpath(remove_start(s, drive_or_unc)).split(os.path.sep)
- if drive_or_unc:
- norm_path.pop(0)
- sanitized_path = [
- path_part if path_part in ['.', '..'] else re.sub(r'(?:[/<>:"\|\\?\*]|[\s.]$)', '#', path_part)
- for path_part in norm_path]
- if drive_or_unc:
- sanitized_path.insert(0, drive_or_unc + os.path.sep)
- return os.path.join(*sanitized_path)
- def sanitize_url(url):
- # Prepend protocol-less URLs with `http:` scheme in order to mitigate
- # the number of unwanted failures due to missing protocol
- if url.startswith('//'):
- return 'http:%s' % url
- # Fix some common typos seen so far
- COMMON_TYPOS = (
- # https://github.com/ytdl-org/youtube-dl/issues/15649
- (r'^httpss://', r'https://'),
- # https://bx1.be/lives/direct-tv/
- (r'^rmtp([es]?)://', r'rtmp\1://'),
- )
- for mistake, fixup in COMMON_TYPOS:
- if re.match(mistake, url):
- return re.sub(mistake, fixup, url)
- return url
- def extract_basic_auth(url):
- parts = compat_urllib_parse.urlsplit(url)
- if parts.username is None:
- return url, None
- url = compat_urllib_parse.urlunsplit(parts._replace(netloc=(
- parts.hostname if parts.port is None
- else '%s:%d' % (parts.hostname, parts.port))))
- auth_payload = base64.b64encode(
- ('%s:%s' % (parts.username, parts.password or '')).encode('utf-8'))
- return url, 'Basic {0}'.format(auth_payload.decode('ascii'))
- def sanitized_Request(url, *args, **kwargs):
- url, auth_header = extract_basic_auth(escape_url(sanitize_url(url)))
- if auth_header is not None:
- headers = args[1] if len(args) > 1 else kwargs.get('headers')
- headers = headers or {}
- headers['Authorization'] = auth_header
- if len(args) <= 1 and kwargs.get('headers') is None:
- kwargs['headers'] = headers
- kwargs = compat_kwargs(kwargs)
- return compat_urllib_request.Request(url, *args, **kwargs)
- def expand_path(s):
- """Expand shell variables and ~"""
- return os.path.expandvars(compat_expanduser(s))
- def orderedSet(iterable):
- """ Remove all duplicates from the input iterable """
- res = []
- for el in iterable:
- if el not in res:
- res.append(el)
- return res
- def _htmlentity_transform(entity_with_semicolon):
- """Transforms an HTML entity to a character."""
- entity = entity_with_semicolon[:-1]
- # Known non-numeric HTML entity
- if entity in compat_html_entities.name2codepoint:
- return compat_chr(compat_html_entities.name2codepoint[entity])
- # TODO: HTML5 allows entities without a semicolon. For example,
- # 'Éric' should be decoded as 'Éric'.
- if entity_with_semicolon in compat_html_entities_html5:
- return compat_html_entities_html5[entity_with_semicolon]
- mobj = re.match(r'#(x[0-9a-fA-F]+|[0-9]+)', entity)
- if mobj is not None:
- numstr = mobj.group(1)
- if numstr.startswith('x'):
- base = 16
- numstr = '0%s' % numstr
- else:
- base = 10
- # See https://github.com/ytdl-org/youtube-dl/issues/7518
- try:
- return compat_chr(int(numstr, base))
- except ValueError:
- pass
- # Unknown entity in name, return its literal representation
- return '&%s;' % entity
- def unescapeHTML(s):
- if s is None:
- return None
- assert isinstance(s, compat_str)
- return re.sub(
- r'&([^&;]+;)', lambda m: _htmlentity_transform(m.group(1)), s)
- def process_communicate_or_kill(p, *args, **kwargs):
- try:
- return p.communicate(*args, **kwargs)
- except BaseException: # Including KeyboardInterrupt
- p.kill()
- p.wait()
- raise
- def get_subprocess_encoding():
- if sys.platform == 'win32' and sys.getwindowsversion()[0] >= 5:
- # For subprocess calls, encode with locale encoding
- # Refer to http://stackoverflow.com/a/9951851/35070
- encoding = preferredencoding()
- else:
- encoding = sys.getfilesystemencoding()
- if encoding is None:
- encoding = 'utf-8'
- return encoding
- # Jython assumes filenames are Unicode strings though reported as Python 2.x compatible
- if sys.version_info < (3, 0) and not sys.platform.startswith('java'):
- def encodeFilename(s, for_subprocess=False):
- """
- @param s The name of the file
- """
- # Pass '' directly to use Unicode APIs on Windows 2000 and up
- # (Detecting Windows NT 4 is tricky because 'major >= 4' would
- # match Windows 9x series as well. Besides, NT 4 is obsolete.)
- if (not for_subprocess
- and sys.platform == 'win32'
- and sys.getwindowsversion()[0] >= 5
- and isinstance(s, compat_str)):
- return s
- return _encode_compat_str(s, get_subprocess_encoding(), 'ignore')
- def decodeFilename(b, for_subprocess=False):
- return _decode_compat_str(b, get_subprocess_encoding(), 'ignore')
- else:
- # Python 3 has a Unicode API
- encodeFilename = decodeFilename = lambda *s, **k: s[0]
- def encodeArgument(s):
- if not isinstance(s, compat_str):
- # Legacy code that uses byte strings
- # Uncomment the following line after fixing all post processors
- # assert False, 'Internal error: %r should be of type %r, is %r' % (s, compat_str, type(s))
- s = s.decode('ascii')
- return encodeFilename(s, True)
- def decodeArgument(b):
- return decodeFilename(b, True)
- def decodeOption(optval):
- if optval is None:
- return optval
- return _decode_compat_str(optval)
- def formatSeconds(secs):
- if secs > 3600:
- return '%d:%02d:%02d' % (secs // 3600, (secs % 3600) // 60, secs % 60)
- elif secs > 60:
- return '%d:%02d' % (secs // 60, secs % 60)
- else:
- return '%d' % secs
- def make_HTTPS_handler(params, **kwargs):
- # https://www.rfc-editor.org/info/rfc7301
- ALPN_PROTOCOLS = ['http/1.1']
- def set_alpn_protocols(ctx):
- # From https://github.com/yt-dlp/yt-dlp/commit/2c6dcb65fb612fc5bc5c61937bf438d3c473d8d0
- # Thanks @coletdjnz
- # Some servers may (wrongly) reject requests if ALPN extension is not sent. See:
- # https://github.com/python/cpython/issues/85140
- # https://github.com/yt-dlp/yt-dlp/issues/3878
- try:
- ctx.set_alpn_protocols(ALPN_PROTOCOLS)
- except (AttributeError, NotImplementedError):
- # Python < 2.7.10, not ssl.HAS_ALPN
- pass
- opts_no_check_certificate = params.get('nocheckcertificate', False)
- if hasattr(ssl, 'create_default_context'): # Python >= 3.4 or 2.7.9
- context = ssl.create_default_context(ssl.Purpose.SERVER_AUTH)
- set_alpn_protocols(context)
- if opts_no_check_certificate:
- context.check_hostname = False
- context.verify_mode = ssl.CERT_NONE
- try:
- return YoutubeDLHTTPSHandler(params, context=context, **kwargs)
- except TypeError:
- # Python 2.7.8
- # (create_default_context present but HTTPSHandler has no context=)
- pass
- if sys.version_info < (3, 2):
- return YoutubeDLHTTPSHandler(params, **kwargs)
- else: # Python3 < 3.4
- context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
- context.verify_mode = (ssl.CERT_NONE
- if opts_no_check_certificate
- else ssl.CERT_REQUIRED)
- context.set_default_verify_paths()
- set_alpn_protocols(context)
- return YoutubeDLHTTPSHandler(params, context=context, **kwargs)
- def bug_reports_message():
- if ytdl_is_updateable():
- update_cmd = 'type youtube-dl -U to update'
- else:
- update_cmd = 'see https://yt-dl.org/update on how to update'
- msg = '; please report this issue on https://yt-dl.org/bug .'
- msg += ' Make sure you are using the latest version; %s.' % update_cmd
- msg += ' Be sure to call youtube-dl with the --verbose flag and include its complete output.'
- return msg
- class YoutubeDLError(Exception):
- """Base exception for YoutubeDL errors."""
- pass
- class ExtractorError(YoutubeDLError):
- """Error during info extraction."""
- def __init__(self, msg, tb=None, expected=False, cause=None, video_id=None):
- """ tb, if given, is the original traceback (so that it can be printed out).
- If expected is set, this is a normal error message and most likely not a bug in youtube-dl.
- """
- if sys.exc_info()[0] in (compat_urllib_error.URLError, socket.timeout, UnavailableVideoError):
- expected = True
- if video_id is not None:
- msg = video_id + ': ' + msg
- if cause:
- msg += ' (caused by %r)' % cause
- if not expected:
- msg += bug_reports_message()
- super(ExtractorError, self).__init__(msg)
- self.traceback = tb
- self.exc_info = sys.exc_info() # preserve original exception
- self.cause = cause
- self.video_id = video_id
- def format_traceback(self):
- if self.traceback is None:
- return None
- return ''.join(traceback.format_tb(self.traceback))
- class UnsupportedError(ExtractorError):
- def __init__(self, url):
- super(UnsupportedError, self).__init__(
- 'Unsupported URL: %s' % url, expected=True)
- self.url = url
- class RegexNotFoundError(ExtractorError):
- """Error when a regex didn't match"""
- pass
- class GeoRestrictedError(ExtractorError):
- """Geographic restriction Error exception.
- This exception may be thrown when a video is not available from your
- geographic location due to geographic restrictions imposed by a website.
- """
- def __init__(self, msg, countries=None):
- super(GeoRestrictedError, self).__init__(msg, expected=True)
- self.msg = msg
- self.countries = countries
- class DownloadError(YoutubeDLError):
- """Download Error exception.
- This exception may be thrown by FileDownloader objects if they are not
- configured to continue on errors. They will contain the appropriate
- error message.
- """
- def __init__(self, msg, exc_info=None):
- """ exc_info, if given, is the original exception that caused the trouble (as returned by sys.exc_info()). """
- super(DownloadError, self).__init__(msg)
- self.exc_info = exc_info
- class SameFileError(YoutubeDLError):
- """Same File exception.
- This exception will be thrown by FileDownloader objects if they detect
- multiple files would have to be downloaded to the same file on disk.
- """
- pass
- class PostProcessingError(YoutubeDLError):
- """Post Processing exception.
- This exception may be raised by PostProcessor's .run() method to
- indicate an error in the postprocessing task.
- """
- def __init__(self, msg):
- super(PostProcessingError, self).__init__(msg)
- self.msg = msg
- class MaxDownloadsReached(YoutubeDLError):
- """ --max-downloads limit has been reached. """
- pass
- class UnavailableVideoError(YoutubeDLError):
- """Unavailable Format exception.
- This exception will be thrown when a video is requested
- in a format that is not available for that video.
- """
- pass
- class ContentTooShortError(YoutubeDLError):
- """Content Too Short exception.
- This exception may be raised by FileDownloader objects when a file they
- download is too small for what the server announced first, indicating
- the connection was probably interrupted.
- """
- def __init__(self, downloaded, expected):
- super(ContentTooShortError, self).__init__(
- 'Downloaded {0} bytes, expected {1} bytes'.format(downloaded, expected)
- )
- # Both in bytes
- self.downloaded = downloaded
- self.expected = expected
- class XAttrMetadataError(YoutubeDLError):
- def __init__(self, code=None, msg='Unknown error'):
- super(XAttrMetadataError, self).__init__(msg)
- self.code = code
- self.msg = msg
- # Parsing code and msg
- if (self.code in (errno.ENOSPC, errno.EDQUOT)
- or 'No space left' in self.msg or 'Disk quota exceeded' in self.msg):
- self.reason = 'NO_SPACE'
- elif self.code == errno.E2BIG or 'Argument list too long' in self.msg:
- self.reason = 'VALUE_TOO_LONG'
- else:
- self.reason = 'NOT_SUPPORTED'
- class XAttrUnavailableError(YoutubeDLError):
- pass
- def _create_http_connection(ydl_handler, http_class, is_https, *args, **kwargs):
- # Working around python 2 bug (see http://bugs.python.org/issue17849) by limiting
- # expected HTTP responses to meet HTTP/1.0 or later (see also
- # https://github.com/ytdl-org/youtube-dl/issues/6727)
- if sys.version_info < (3, 0):
- kwargs['strict'] = True
- hc = http_class(*args, **compat_kwargs(kwargs))
- source_address = ydl_handler._params.get('source_address')
- if source_address is not None:
- # This is to workaround _create_connection() from socket where it will try all
- # address data from getaddrinfo() including IPv6. This filters the result from
- # getaddrinfo() based on the source_address value.
- # This is based on the cpython socket.create_connection() function.
- # https://github.com/python/cpython/blob/master/Lib/socket.py#L691
- def _create_connection(address, timeout=socket._GLOBAL_DEFAULT_TIMEOUT, source_address=None):
- host, port = address
- err = None
- addrs = socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM)
- af = socket.AF_INET if '.' in source_address[0] else socket.AF_INET6
- ip_addrs = [addr for addr in addrs if addr[0] == af]
- if addrs and not ip_addrs:
- ip_version = 'v4' if af == socket.AF_INET else 'v6'
- raise socket.error(
- "No remote IP%s addresses available for connect, can't use '%s' as source address"
- % (ip_version, source_address[0]))
- for res in ip_addrs:
- af, socktype, proto, canonname, sa = res
- sock = None
- try:
- sock = socket.socket(af, socktype, proto)
- if timeout is not socket._GLOBAL_DEFAULT_TIMEOUT:
- sock.settimeout(timeout)
- sock.bind(source_address)
- sock.connect(sa)
- err = None # Explicitly break reference cycle
- return sock
- except socket.error as _:
- err = _
- if sock is not None:
- sock.close()
- if err is not None:
- raise err
- else:
- raise socket.error('getaddrinfo returns an empty list')
- if hasattr(hc, '_create_connection'):
- hc._create_connection = _create_connection
- sa = (source_address, 0)
- if hasattr(hc, 'source_address'): # Python 2.7+
- hc.source_address = sa
- else: # Python 2.6
- def _hc_connect(self, *args, **kwargs):
- sock = _create_connection(
- (self.host, self.port), self.timeout, sa)
- if is_https:
- self.sock = ssl.wrap_socket(
- sock, self.key_file, self.cert_file,
- ssl_version=ssl.PROTOCOL_TLSv1)
- else:
- self.sock = sock
- hc.connect = functools.partial(_hc_connect, hc)
- return hc
- def handle_youtubedl_headers(headers):
- filtered_headers = headers
- if 'Youtubedl-no-compression' in filtered_headers:
- filtered_headers = filter_dict(filtered_headers, cndn=lambda k, _: k.lower() != 'accept-encoding')
- del filtered_headers['Youtubedl-no-compression']
- return filtered_headers
- class YoutubeDLHandler(compat_urllib_request.HTTPHandler):
- """Handler for HTTP requests and responses.
- This class, when installed with an OpenerDirector, automatically adds
- the standard headers to every HTTP request and handles gzipped and
- deflated responses from web servers. If compression is to be avoided in
- a particular request, the original request in the program code only has
- to include the HTTP header "Youtubedl-no-compression", which will be
- removed before making the real request.
- Part of this code was copied from:
- http://techknack.net/python-urllib2-handlers/, archived at
- https://web.archive.org/web/20130527205558/http://techknack.net/python-urllib2-handlers/
- Andrew Rowls, the author of that code, agreed to release it to the
- public domain.
- """
- def __init__(self, params, *args, **kwargs):
- compat_urllib_request.HTTPHandler.__init__(self, *args, **kwargs)
- self._params = params
- def http_open(self, req):
- conn_class = compat_http_client.HTTPConnection
- socks_proxy = req.headers.get('Ytdl-socks-proxy')
- if socks_proxy:
- conn_class = make_socks_conn_class(conn_class, socks_proxy)
- del req.headers['Ytdl-socks-proxy']
- return self.do_open(functools.partial(
- _create_http_connection, self, conn_class, False),
- req)
- @staticmethod
- def deflate_gz(data):
- try:
- # format:zlib,gzip + windowsize:32768
- return data and zlib.decompress(data, 32 + zlib.MAX_WBITS)
- except zlib.error:
- # raw zlib * windowsize:32768 (RFC 9110: "non-conformant")
- return zlib.decompress(data, -zlib.MAX_WBITS)
- @staticmethod
- def gzip(data):
- from gzip import GzipFile
- def _gzip(data):
- with io.BytesIO(data) as data_buf:
- gz = GzipFile(fileobj=data_buf, mode='rb')
- return gz.read()
- try:
- return _gzip(data)
- except IOError as original_ioerror:
- # There may be junk at the end of the file
- # See http://stackoverflow.com/q/4928560/35070 for details
- for i in range(1, 1024):
- try:
- return _gzip(data[:-i])
- except IOError:
- continue
- else:
- raise original_ioerror
- @staticmethod
- def brotli(data):
- return data and brotli.decompress(data)
- @staticmethod
- def compress(data):
- return data and ncompress.decompress(data)
- @staticmethod
- def _fix_path(url):
- # an embedded /../ or /./ sequence is not automatically handled by urllib2
- # see https://github.com/yt-dlp/yt-dlp/issues/3355
- parsed_url = compat_urllib_parse.urlsplit(url)
- path = parsed_url.path
- if not path.endswith('/'):
- path += '/'
- parts = path.partition('/./')
- if not parts[1]:
- parts = path.partition('/../')
- if parts[1]:
- path = compat_urllib_parse.urljoin(
- parts[0] + parts[1][:1],
- parts[1][1:] + (parts[2] if parsed_url.path.endswith('/') else parts[2][:-1]))
- url = parsed_url._replace(path=path).geturl()
- if '/.' in url:
- # worse, URL path may have initial /../ against RFCs: work-around
- # by stripping such prefixes, like eg Firefox
- path = parsed_url.path + '/'
- while path.startswith('/.'):
- if path.startswith('/../'):
- path = path[3:]
- elif path.startswith('/./'):
- path = path[2:]
- else:
- break
- path = path[:-1]
- if not path.startswith('/') and parsed_url.path.startswith('/'):
- path = '/' + path
- url = parsed_url._replace(path=path).geturl()
- return url
- def http_request(self, req):
- url = req.get_full_url()
- # resolve embedded . and ..
- url_fixed = self._fix_path(url)
- # According to RFC 3986, URLs can not contain non-ASCII characters; however this is not
- # always respected by websites: some tend to give out URLs with non percent-encoded
- # non-ASCII characters (see telemb.py, ard.py [#3412])
- # urllib chokes on URLs with non-ASCII characters (see http://bugs.python.org/issue3991)
- # To work around aforementioned issue we will replace request's original URL with
- # percent-encoded one
- # Since redirects are also affected (e.g. http://www.southpark.de/alle-episoden/s18e09)
- # the code of this workaround has been moved here from YoutubeDL.urlopen()
- url_escaped = escape_url(url_fixed)
- # Substitute URL if any change after escaping
- if url != url_escaped:
- req = update_Request(req, url=url_escaped)
- for h, v in std_headers.items():
- # Capitalize is needed because of Python bug 2275: http://bugs.python.org/issue2275
- # The dict keys are capitalized because of this bug by urllib
- if h.capitalize() not in req.headers:
- req.add_header(h, v)
- # Similarly, 'Accept-encoding'
- if 'Accept-encoding' not in req.headers:
- req.add_header(
- 'Accept-Encoding', join_nonempty(
- 'gzip', 'deflate', brotli and 'br', ncompress and 'compress',
- delim=', '))
- req.headers = handle_youtubedl_headers(req.headers)
- if sys.version_info < (2, 7):
- # avoid possible race where __r_type may be unset
- req.get_type()
- if '#' in req.get_full_url():
- # Python 2.6 is brain-dead when it comes to fragments
- req._Request__original = req._Request__original.partition('#')[0]
- req._Request__r_type = req._Request__r_type.partition('#')[0]
- # Use the totally undocumented AbstractHTTPHandler per
- # https://github.com/yt-dlp/yt-dlp/pull/4158
- return compat_urllib_request.AbstractHTTPHandler.do_request_(self, req)
- def http_response(self, req, resp):
- old_resp = resp
- # Content-Encoding header lists the encodings in order that they were applied [1].
- # To decompress, we simply do the reverse.
- # [1]: https://datatracker.ietf.org/doc/html/rfc9110#name-content-encoding
- decoded_response = None
- decoders = {
- 'gzip': self.deflate_gz,
- 'deflate': self.deflate_gz,
- }
- if brotli:
- decoders['br'] = self.brotli
- if ncompress:
- decoders['compress'] = self.compress
- if sys.platform.startswith('java'):
- # Jython zlib implementation misses gzip
- decoders['gzip'] = self.gzip
- def encodings(hdrs):
- # A header field that allows multiple values can have multiple instances [2].
- # [2]: https://datatracker.ietf.org/doc/html/rfc9110#name-fields
- for e in reversed(','.join(hdrs).split(',')):
- if e:
- yield e.strip()
- encodings_left = []
- try:
- resp.headers.get_all
- hdrs = resp.headers
- except AttributeError:
- # Py2 has no get_all() method: headers are rfc822.Message
- from email.message import Message
- hdrs = Message()
- for k, v in resp.headers.items():
- hdrs[k] = v
- decoder, decoded_response = True, None
- for encoding in encodings(hdrs.get_all('Content-Encoding', [])):
- # "SHOULD consider" x-compress, x-gzip as compress, gzip
- decoder = decoder and decoders.get(remove_start(encoding, 'x-'))
- if not decoder:
- encodings_left.insert(0, encoding)
- continue
- decoded_response = decoder(decoded_response or resp.read())
- if decoded_response is not None:
- resp = compat_urllib_request.addinfourl(
- io.BytesIO(decoded_response), old_resp.headers, old_resp.url, old_resp.code)
- resp.msg = old_resp.msg
- del resp.headers['Content-Length']
- resp.headers['Content-Length'] = '%d' % len(decoded_response)
- del resp.headers['Content-Encoding']
- if encodings_left:
- resp.headers['Content-Encoding'] = ', '.join(encodings_left)
- # Percent-encode redirect URL of Location HTTP header to satisfy RFC 3986 (see
- # https://github.com/ytdl-org/youtube-dl/issues/6457).
- if 300 <= resp.code < 400:
- location = resp.headers.get('Location')
- if location:
- # As of RFC 2616 default charset is iso-8859-1 that is respected by python 3
- if sys.version_info >= (3, 0):
- location = location.encode('iso-8859-1')
- location = location.decode('utf-8')
- # resolve embedded . and ..
- location_fixed = self._fix_path(location)
- location_escaped = escape_url(location_fixed)
- if location != location_escaped:
- del resp.headers['Location']
- if not isinstance(location_escaped, str): # Py 2 case
- location_escaped = location_escaped.encode('utf-8')
- resp.headers['Location'] = location_escaped
- return resp
- https_request = http_request
- https_response = http_response
- def make_socks_conn_class(base_class, socks_proxy):
- assert issubclass(base_class, (
- compat_http_client.HTTPConnection, compat_http_client.HTTPSConnection))
- url_components = compat_urllib_parse.urlparse(socks_proxy)
- if url_components.scheme.lower() == 'socks5':
- socks_type = ProxyType.SOCKS5
- elif url_components.scheme.lower() in ('socks', 'socks4'):
- socks_type = ProxyType.SOCKS4
- elif url_components.scheme.lower() == 'socks4a':
- socks_type = ProxyType.SOCKS4A
- def unquote_if_non_empty(s):
- if not s:
- return s
- return compat_urllib_parse_unquote_plus(s)
- proxy_args = (
- socks_type,
- url_components.hostname, url_components.port or 1080,
- True, # Remote DNS
- unquote_if_non_empty(url_components.username),
- unquote_if_non_empty(url_components.password),
- )
- class SocksConnection(base_class):
- def connect(self):
- self.sock = sockssocket()
- self.sock.setproxy(*proxy_args)
- if type(self.timeout) in (int, float):
- self.sock.settimeout(self.timeout)
- self.sock.connect((self.host, self.port))
- if isinstance(self, compat_http_client.HTTPSConnection):
- if hasattr(self, '_context'): # Python > 2.6
- self.sock = self._context.wrap_socket(
- self.sock, server_hostname=self.host)
- else:
- self.sock = ssl.wrap_socket(self.sock)
- return SocksConnection
- class YoutubeDLHTTPSHandler(compat_urllib_request.HTTPSHandler):
- def __init__(self, params, https_conn_class=None, *args, **kwargs):
- compat_urllib_request.HTTPSHandler.__init__(self, *args, **kwargs)
- self._https_conn_class = https_conn_class or compat_http_client.HTTPSConnection
- self._params = params
- def https_open(self, req):
- kwargs = {}
- conn_class = self._https_conn_class
- if hasattr(self, '_context'): # python > 2.6
- kwargs['context'] = self._context
- if hasattr(self, '_check_hostname'): # python 3.x
- kwargs['check_hostname'] = self._check_hostname
- socks_proxy = req.headers.get('Ytdl-socks-proxy')
- if socks_proxy:
- conn_class = make_socks_conn_class(conn_class, socks_proxy)
- del req.headers['Ytdl-socks-proxy']
- return self.do_open(functools.partial(
- _create_http_connection, self, conn_class, True),
- req, **kwargs)
- class YoutubeDLCookieJar(compat_cookiejar.MozillaCookieJar):
- """
- See [1] for cookie file format.
- 1. https://curl.haxx.se/docs/http-cookies.html
- """
- _HTTPONLY_PREFIX = '#HttpOnly_'
- _ENTRY_LEN = 7
- _HEADER = '''# Netscape HTTP Cookie File
- # This file is generated by youtube-dl. Do not edit.
- '''
- _CookieFileEntry = collections.namedtuple(
- 'CookieFileEntry',
- ('domain_name', 'include_subdomains', 'path', 'https_only', 'expires_at', 'name', 'value'))
- def save(self, filename=None, ignore_discard=False, ignore_expires=False):
- """
- Save cookies to a file.
- Most of the code is taken from CPython 3.8 and slightly adapted
- to support cookie files with UTF-8 in both python 2 and 3.
- """
- if filename is None:
- if self.filename is not None:
- filename = self.filename
- else:
- raise ValueError(compat_cookiejar.MISSING_FILENAME_TEXT)
- # Store session cookies with `expires` set to 0 instead of an empty
- # string
- for cookie in self:
- if cookie.expires is None:
- cookie.expires = 0
- with io.open(filename, 'w', encoding='utf-8') as f:
- f.write(self._HEADER)
- now = time.time()
- for cookie in self:
- if not ignore_discard and cookie.discard:
- continue
- if not ignore_expires and cookie.is_expired(now):
- continue
- if cookie.secure:
- secure = 'TRUE'
- else:
- secure = 'FALSE'
- if cookie.domain.startswith('.'):
- initial_dot = 'TRUE'
- else:
- initial_dot = 'FALSE'
- if cookie.expires is not None:
- expires = compat_str(cookie.expires)
- else:
- expires = ''
- if cookie.value is None:
- # cookies.txt regards 'Set-Cookie: foo' as a cookie
- # with no name, whereas http.cookiejar regards it as a
- # cookie with no value.
- name = ''
- value = cookie.name
- else:
- name = cookie.name
- value = cookie.value
- f.write(
- '\t'.join([cookie.domain, initial_dot, cookie.path,
- secure, expires, name, value]) + '\n')
- def load(self, filename=None, ignore_discard=False, ignore_expires=False):
- """Load cookies from a file."""
- if filename is None:
- if self.filename is not None:
- filename = self.filename
- else:
- raise ValueError(compat_cookiejar.MISSING_FILENAME_TEXT)
- def prepare_line(line):
- if line.startswith(self._HTTPONLY_PREFIX):
- line = line[len(self._HTTPONLY_PREFIX):]
- # comments and empty lines are fine
- if line.startswith('#') or not line.strip():
- return line
- cookie_list = line.split('\t')
- if len(cookie_list) != self._ENTRY_LEN:
- raise compat_cookiejar.LoadError('invalid length %d' % len(cookie_list))
- cookie = self._CookieFileEntry(*cookie_list)
- if cookie.expires_at and not cookie.expires_at.isdigit():
- raise compat_cookiejar.LoadError('invalid expires at %s' % cookie.expires_at)
- return line
- cf = io.StringIO()
- with io.open(filename, encoding='utf-8') as f:
- for line in f:
- try:
- cf.write(prepare_line(line))
- except compat_cookiejar.LoadError as e:
- write_string(
- 'WARNING: skipping cookie file entry due to %s: %r\n'
- % (e, line), sys.stderr)
- continue
- cf.seek(0)
- self._really_load(cf, filename, ignore_discard, ignore_expires)
- # Session cookies are denoted by either `expires` field set to
- # an empty string or 0. MozillaCookieJar only recognizes the former
- # (see [1]). So we need force the latter to be recognized as session
- # cookies on our own.
- # Session cookies may be important for cookies-based authentication,
- # e.g. usually, when user does not check 'Remember me' check box while
- # logging in on a site, some important cookies are stored as session
- # cookies so that not recognizing them will result in failed login.
- # 1. https://bugs.python.org/issue17164
- for cookie in self:
- # Treat `expires=0` cookies as session cookies
- if cookie.expires == 0:
- cookie.expires = None
- cookie.discard = True
- def get_cookie_header(self, url):
- """Generate a Cookie HTTP header for a given url"""
- cookie_req = sanitized_Request(url)
- self.add_cookie_header(cookie_req)
- return cookie_req.get_header('Cookie')
- def get_cookies_for_url(self, url):
- """Generate a list of Cookie objects for a given url"""
- # Policy `_now` attribute must be set before calling `_cookies_for_request`
- # Ref: https://github.com/python/cpython/blob/3.7/Lib/http/cookiejar.py#L1360
- self._policy._now = self._now = int(time.time())
- return self._cookies_for_request(sanitized_Request(url))
- class YoutubeDLCookieProcessor(compat_urllib_request.HTTPCookieProcessor):
- def __init__(self, cookiejar=None):
- compat_urllib_request.HTTPCookieProcessor.__init__(self, cookiejar)
- def http_response(self, request, response):
- # Python 2 will choke on next HTTP request in row if there are non-ASCII
- # characters in Set-Cookie HTTP header of last response (see
- # https://github.com/ytdl-org/youtube-dl/issues/6769).
- # In order to at least prevent crashing we will percent encode Set-Cookie
- # header before HTTPCookieProcessor starts processing it.
- # if sys.version_info < (3, 0) and response.headers:
- # for set_cookie_header in ('Set-Cookie', 'Set-Cookie2'):
- # set_cookie = response.headers.get(set_cookie_header)
- # if set_cookie:
- # set_cookie_escaped = compat_urllib_parse.quote(set_cookie, b"%/;:@&=+$,!~*'()?#[] ")
- # if set_cookie != set_cookie_escaped:
- # del response.headers[set_cookie_header]
- # response.headers[set_cookie_header] = set_cookie_escaped
- return compat_urllib_request.HTTPCookieProcessor.http_response(self, request, response)
- https_request = compat_urllib_request.HTTPCookieProcessor.http_request
- https_response = http_response
- class YoutubeDLRedirectHandler(compat_urllib_request.HTTPRedirectHandler):
- """YoutubeDL redirect handler
- The code is based on HTTPRedirectHandler implementation from CPython [1].
- This redirect handler fixes and improves the logic to better align with RFC7261
- and what browsers tend to do [2][3]
- 1. https://github.com/python/cpython/blob/master/Lib/urllib/request.py
- 2. https://datatracker.ietf.org/doc/html/rfc7231
- 3. https://github.com/python/cpython/issues/91306
- """
- # Supply possibly missing alias
- http_error_308 = compat_urllib_request.HTTPRedirectHandler.http_error_302
- def redirect_request(self, req, fp, code, msg, headers, newurl):
- """Return a Request or None in response to a redirect.
- This is called by the http_error_30x methods when a
- redirection response is received. If a redirection should
- take place, return a new Request to allow http_error_30x to
- perform the redirect. Otherwise, raise HTTPError if no-one
- else should try to handle this url. Return None if you can't
- but another Handler might.
- """
- if code not in (301, 302, 303, 307, 308):
- raise compat_urllib_HTTPError(req.full_url, code, msg, headers, fp)
- new_method = req.get_method()
- new_data = req.data
- # On python 2 urlh.geturl() may sometimes return redirect URL
- # as a byte string instead of unicode. This workaround forces
- # it to return unicode.
- newurl = _decode_compat_str(newurl)
- # Be conciliant with URIs containing a space. This is mainly
- # redundant with the more complete encoding done in http_error_302(),
- # but it is kept for compatibility with other callers.
- newurl = newurl.replace(' ', '%20')
- # Technically the Cookie header should be in unredirected_hdrs;
- # however in practice some may set it in normal headers anyway.
- # We will remove it here to prevent any leaks.
- remove_headers = ['Cookie']
- # A 303 must either use GET or HEAD for subsequent request
- # https://datatracker.ietf.org/doc/html/rfc7231#section-6.4.4
- if code == 303 and req.get_method() != 'HEAD':
- new_method = 'GET'
- # 301 and 302 redirects are commonly turned into a GET from a POST
- # for subsequent requests by browsers, so we'll do the same.
- # https://datatracker.ietf.org/doc/html/rfc7231#section-6.4.2
- # https://datatracker.ietf.org/doc/html/rfc7231#section-6.4.3
- elif code in (301, 302) and req.get_method() == 'POST':
- new_method = 'GET'
- # only remove payload if method changed (e.g. POST to GET)
- if new_method != req.get_method():
- new_data = None
- remove_headers.extend(['Content-Length', 'Content-Type'])
- new_headers = filter_dict(req.headers, cndn=lambda k, _: k.title() not in remove_headers)
- return compat_urllib_request.Request(
- newurl, headers=new_headers, origin_req_host=req.origin_req_host,
- unverifiable=True, method=new_method, data=new_data)
- def extract_timezone(date_str):
- m = re.search(
- r'''(?x)
- ^.{8,}? # >=8 char non-TZ prefix, if present
- (?P<tz>Z| # just the UTC Z, or
- (?:(?<=.\b\d{4}|\b\d{2}:\d\d)| # preceded by 4 digits or hh:mm or
- (?<!.\b[a-zA-Z]{3}|[a-zA-Z]{4}|..\b\d\d)) # not preceded by 3 alpha word or >= 4 alpha or 2 digits
- [ ]? # optional space
- (?P<sign>\+|-) # +/-
- (?P<hours>[0-9]{2}):?(?P<minutes>[0-9]{2}) # hh[:]mm
- $)
- ''', date_str)
- if not m:
- m = re.search(r'\d{1,2}:\d{1,2}(?:\.\d+)?(?P<tz>\s*[A-Z]+)$', date_str)
- timezone = TIMEZONE_NAMES.get(m and m.group('tz').strip())
- if timezone is not None:
- date_str = date_str[:-len(m.group('tz'))]
- timezone = datetime.timedelta(hours=timezone or 0)
- else:
- date_str = date_str[:-len(m.group('tz'))]
- if not m.group('sign'):
- timezone = datetime.timedelta()
- else:
- sign = 1 if m.group('sign') == '+' else -1
- timezone = datetime.timedelta(
- hours=sign * int(m.group('hours')),
- minutes=sign * int(m.group('minutes')))
- return timezone, date_str
- def parse_iso8601(date_str, delimiter='T', timezone=None):
- """ Return a UNIX timestamp from the given date """
- if date_str is None:
- return None
- date_str = re.sub(r'\.[0-9]+', '', date_str)
- if timezone is None:
- timezone, date_str = extract_timezone(date_str)
- try:
- date_format = '%Y-%m-%d{0}%H:%M:%S'.format(delimiter)
- dt = datetime.datetime.strptime(date_str, date_format) - timezone
- return calendar.timegm(dt.timetuple())
- except ValueError:
- pass
- def date_formats(day_first=True):
- return DATE_FORMATS_DAY_FIRST if day_first else DATE_FORMATS_MONTH_FIRST
- def unified_strdate(date_str, day_first=True):
- """Return a string with the date in the format YYYYMMDD"""
- if date_str is None:
- return None
- upload_date = None
- # Replace commas
- date_str = date_str.replace(',', ' ')
- # Remove AM/PM + timezone
- date_str = re.sub(r'(?i)\s*(?:AM|PM)(?:\s+[A-Z]+)?', '', date_str)
- _, date_str = extract_timezone(date_str)
- for expression in date_formats(day_first):
- try:
- upload_date = datetime.datetime.strptime(date_str, expression).strftime('%Y%m%d')
- except ValueError:
- pass
- if upload_date is None:
- timetuple = email.utils.parsedate_tz(date_str)
- if timetuple:
- try:
- upload_date = datetime.datetime(*timetuple[:6]).strftime('%Y%m%d')
- except ValueError:
- pass
- if upload_date is not None:
- return compat_str(upload_date)
- def unified_timestamp(date_str, day_first=True):
- if date_str is None:
- return None
- date_str = re.sub(r'\s+', ' ', re.sub(
- r'(?i)[,|]|(mon|tues?|wed(nes)?|thu(rs)?|fri|sat(ur)?)(day)?', '', date_str))
- pm_delta = 12 if re.search(r'(?i)PM', date_str) else 0
- timezone, date_str = extract_timezone(date_str)
- # Remove AM/PM + timezone
- date_str = re.sub(r'(?i)\s*(?:AM|PM)(?:\s+[A-Z]+)?', '', date_str)
- # Remove unrecognized timezones from ISO 8601 alike timestamps
- m = re.search(r'\d{1,2}:\d{1,2}(?:\.\d+)?(?P<tz>\s*[A-Z]+)$', date_str)
- if m:
- date_str = date_str[:-len(m.group('tz'))]
- # Python only supports microseconds, so remove nanoseconds
- m = re.search(r'^([0-9]{4,}-[0-9]{1,2}-[0-9]{1,2}T[0-9]{1,2}:[0-9]{1,2}:[0-9]{1,2}\.[0-9]{6})[0-9]+$', date_str)
- if m:
- date_str = m.group(1)
- for expression in date_formats(day_first):
- try:
- dt = datetime.datetime.strptime(date_str, expression) - timezone + datetime.timedelta(hours=pm_delta)
- return calendar.timegm(dt.timetuple())
- except ValueError:
- pass
- timetuple = email.utils.parsedate_tz(date_str)
- if timetuple:
- return calendar.timegm(timetuple) + pm_delta * 3600 - compat_datetime_timedelta_total_seconds(timezone)
- def determine_ext(url, default_ext='unknown_video'):
- if url is None or '.' not in url:
- return default_ext
- guess = url.partition('?')[0].rpartition('.')[2]
- if re.match(r'^[A-Za-z0-9]+$', guess):
- return guess
- # Try extract ext from URLs like http://example.com/foo/bar.mp4/?download
- elif guess.rstrip('/') in KNOWN_EXTENSIONS:
- return guess.rstrip('/')
- else:
- return default_ext
- def subtitles_filename(filename, sub_lang, sub_format, expected_real_ext=None):
- return replace_extension(filename, sub_lang + '.' + sub_format, expected_real_ext)
- def date_from_str(date_str):
- """
- Return a datetime object from a string in the format YYYYMMDD or
- (now|today)[+-][0-9](day|week|month|year)(s)?"""
- today = datetime.date.today()
- if date_str in ('now', 'today'):
- return today
- if date_str == 'yesterday':
- return today - datetime.timedelta(days=1)
- match = re.match(r'(now|today)(?P<sign>[+-])(?P<time>\d+)(?P<unit>day|week|month|year)(s)?', date_str)
- if match is not None:
- sign = match.group('sign')
- time = int(match.group('time'))
- if sign == '-':
- time = -time
- unit = match.group('unit')
- # A bad approximation?
- if unit == 'month':
- unit = 'day'
- time *= 30
- elif unit == 'year':
- unit = 'day'
- time *= 365
- unit += 's'
- delta = datetime.timedelta(**{unit: time})
- return today + delta
- return datetime.datetime.strptime(date_str, '%Y%m%d').date()
- def hyphenate_date(date_str):
- """
- Convert a date in 'YYYYMMDD' format to 'YYYY-MM-DD' format"""
- match = re.match(r'^(\d\d\d\d)(\d\d)(\d\d)$', date_str)
- if match is not None:
- return '-'.join(match.groups())
- else:
- return date_str
- class DateRange(object):
- """Represents a time interval between two dates"""
- def __init__(self, start=None, end=None):
- """start and end must be strings in the format accepted by date"""
- if start is not None:
- self.start = date_from_str(start)
- else:
- self.start = datetime.datetime.min.date()
- if end is not None:
- self.end = date_from_str(end)
- else:
- self.end = datetime.datetime.max.date()
- if self.start > self.end:
- raise ValueError('Date range: "%s" , the start date must be before the end date' % self)
- @classmethod
- def day(cls, day):
- """Returns a range that only contains the given day"""
- return cls(day, day)
- def __contains__(self, date):
- """Check if the date is in the range"""
- if not isinstance(date, datetime.date):
- date = date_from_str(date)
- return self.start <= date <= self.end
- def __str__(self):
- return '%s - %s' % (self.start.isoformat(), self.end.isoformat())
- def __eq__(self, other):
- return (isinstance(other, DateRange)
- and self.start == other.start and self.end == other.end)
- def platform_name():
- """ Returns the platform name as a compat_str """
- res = platform.platform()
- return _decode_compat_str(res)
- def _windows_write_string(s, out):
- """ Returns True if the string was written using special methods,
- False if it has yet to be written out."""
- # Adapted from http://stackoverflow.com/a/3259271/35070
- import ctypes
- import ctypes.wintypes
- WIN_OUTPUT_IDS = {
- 1: -11,
- 2: -12,
- }
- try:
- fileno = out.fileno()
- except AttributeError:
- # If the output stream doesn't have a fileno, it's virtual
- return False
- except io.UnsupportedOperation:
- # Some strange Windows pseudo files?
- return False
- if fileno not in WIN_OUTPUT_IDS:
- return False
- GetStdHandle = compat_ctypes_WINFUNCTYPE(
- ctypes.wintypes.HANDLE, ctypes.wintypes.DWORD)(
- ('GetStdHandle', ctypes.windll.kernel32))
- h = GetStdHandle(WIN_OUTPUT_IDS[fileno])
- WriteConsoleW = compat_ctypes_WINFUNCTYPE(
- ctypes.wintypes.BOOL, ctypes.wintypes.HANDLE, ctypes.wintypes.LPWSTR,
- ctypes.wintypes.DWORD, ctypes.POINTER(ctypes.wintypes.DWORD),
- ctypes.wintypes.LPVOID)(('WriteConsoleW', ctypes.windll.kernel32))
- written = ctypes.wintypes.DWORD(0)
- GetFileType = compat_ctypes_WINFUNCTYPE(ctypes.wintypes.DWORD, ctypes.wintypes.DWORD)(('GetFileType', ctypes.windll.kernel32))
- FILE_TYPE_CHAR = 0x0002
- FILE_TYPE_REMOTE = 0x8000
- GetConsoleMode = compat_ctypes_WINFUNCTYPE(
- ctypes.wintypes.BOOL, ctypes.wintypes.HANDLE,
- ctypes.POINTER(ctypes.wintypes.DWORD))(
- ('GetConsoleMode', ctypes.windll.kernel32))
- INVALID_HANDLE_VALUE = ctypes.wintypes.DWORD(-1).value
- def not_a_console(handle):
- if handle == INVALID_HANDLE_VALUE or handle is None:
- return True
- return ((GetFileType(handle) & ~FILE_TYPE_REMOTE) != FILE_TYPE_CHAR
- or GetConsoleMode(handle, ctypes.byref(ctypes.wintypes.DWORD())) == 0)
- if not_a_console(h):
- return False
- def next_nonbmp_pos(s):
- try:
- return next(i for i, c in enumerate(s) if ord(c) > 0xffff)
- except StopIteration:
- return len(s)
- while s:
- count = min(next_nonbmp_pos(s), 1024)
- ret = WriteConsoleW(
- h, s, count if count else 2, ctypes.byref(written), None)
- if ret == 0:
- raise OSError('Failed to write string')
- if not count: # We just wrote a non-BMP character
- assert written.value == 2
- s = s[1:]
- else:
- assert written.value > 0
- s = s[written.value:]
- return True
- def write_string(s, out=None, encoding=None):
- if out is None:
- out = sys.stderr
- assert isinstance(s, compat_str)
- if sys.platform == 'win32' and encoding is None and hasattr(out, 'fileno'):
- if _windows_write_string(s, out):
- return
- if ('b' in getattr(out, 'mode', '')
- or sys.version_info[0] < 3): # Python 2 lies about mode of sys.stderr
- byt = s.encode(encoding or preferredencoding(), 'ignore')
- out.write(byt)
- elif hasattr(out, 'buffer'):
- enc = encoding or getattr(out, 'encoding', None) or preferredencoding()
- byt = s.encode(enc, 'ignore')
- out.buffer.write(byt)
- else:
- out.write(s)
- out.flush()
- def bytes_to_intlist(bs):
- if not bs:
- return []
- if isinstance(bs[0], int): # Python 3
- return list(bs)
- else:
- return [ord(c) for c in bs]
- def intlist_to_bytes(xs):
- if not xs:
- return b''
- return compat_struct_pack('%dB' % len(xs), *xs)
- # Cross-platform file locking
- if sys.platform == 'win32':
- import ctypes.wintypes
- import msvcrt
- class OVERLAPPED(ctypes.Structure):
- _fields_ = [
- ('Internal', ctypes.wintypes.LPVOID),
- ('InternalHigh', ctypes.wintypes.LPVOID),
- ('Offset', ctypes.wintypes.DWORD),
- ('OffsetHigh', ctypes.wintypes.DWORD),
- ('hEvent', ctypes.wintypes.HANDLE),
- ]
- kernel32 = ctypes.windll.kernel32
- LockFileEx = kernel32.LockFileEx
- LockFileEx.argtypes = [
- ctypes.wintypes.HANDLE, # hFile
- ctypes.wintypes.DWORD, # dwFlags
- ctypes.wintypes.DWORD, # dwReserved
- ctypes.wintypes.DWORD, # nNumberOfBytesToLockLow
- ctypes.wintypes.DWORD, # nNumberOfBytesToLockHigh
- ctypes.POINTER(OVERLAPPED) # Overlapped
- ]
- LockFileEx.restype = ctypes.wintypes.BOOL
- UnlockFileEx = kernel32.UnlockFileEx
- UnlockFileEx.argtypes = [
- ctypes.wintypes.HANDLE, # hFile
- ctypes.wintypes.DWORD, # dwReserved
- ctypes.wintypes.DWORD, # nNumberOfBytesToLockLow
- ctypes.wintypes.DWORD, # nNumberOfBytesToLockHigh
- ctypes.POINTER(OVERLAPPED) # Overlapped
- ]
- UnlockFileEx.restype = ctypes.wintypes.BOOL
- whole_low = 0xffffffff
- whole_high = 0x7fffffff
- def _lock_file(f, exclusive):
- overlapped = OVERLAPPED()
- overlapped.Offset = 0
- overlapped.OffsetHigh = 0
- overlapped.hEvent = 0
- f._lock_file_overlapped_p = ctypes.pointer(overlapped)
- handle = msvcrt.get_osfhandle(f.fileno())
- if not LockFileEx(handle, 0x2 if exclusive else 0x0, 0,
- whole_low, whole_high, f._lock_file_overlapped_p):
- raise OSError('Locking file failed: %r' % ctypes.FormatError())
- def _unlock_file(f):
- assert f._lock_file_overlapped_p
- handle = msvcrt.get_osfhandle(f.fileno())
- if not UnlockFileEx(handle, 0,
- whole_low, whole_high, f._lock_file_overlapped_p):
- raise OSError('Unlocking file failed: %r' % ctypes.FormatError())
- else:
- # Some platforms, such as Jython, is missing fcntl
- try:
- import fcntl
- def _lock_file(f, exclusive):
- fcntl.flock(f, fcntl.LOCK_EX if exclusive else fcntl.LOCK_SH)
- def _unlock_file(f):
- fcntl.flock(f, fcntl.LOCK_UN)
- except ImportError:
- UNSUPPORTED_MSG = 'file locking is not supported on this platform'
- def _lock_file(f, exclusive):
- raise IOError(UNSUPPORTED_MSG)
- def _unlock_file(f):
- raise IOError(UNSUPPORTED_MSG)
- class locked_file(object):
- def __init__(self, filename, mode, encoding=None):
- assert mode in ['r', 'a', 'w']
- self.f = io.open(filename, mode, encoding=encoding)
- self.mode = mode
- def __enter__(self):
- exclusive = self.mode != 'r'
- try:
- _lock_file(self.f, exclusive)
- except IOError:
- self.f.close()
- raise
- return self
- def __exit__(self, etype, value, traceback):
- try:
- _unlock_file(self.f)
- finally:
- self.f.close()
- def __iter__(self):
- return iter(self.f)
- def write(self, *args):
- return self.f.write(*args)
- def read(self, *args):
- return self.f.read(*args)
- def get_filesystem_encoding():
- encoding = sys.getfilesystemencoding()
- return encoding if encoding is not None else 'utf-8'
- def shell_quote(args):
- quoted_args = []
- encoding = get_filesystem_encoding()
- for a in args:
- # We may get a filename encoded with 'encodeFilename'
- a = _decode_compat_str(a, encoding)
- quoted_args.append(compat_shlex_quote(a))
- return ' '.join(quoted_args)
- def smuggle_url(url, data):
- """ Pass additional data in a URL for internal use. """
- url, idata = unsmuggle_url(url, {})
- data.update(idata)
- sdata = compat_urllib_parse_urlencode(
- {'__youtubedl_smuggle': json.dumps(data)})
- return url + '#' + sdata
- def unsmuggle_url(smug_url, default=None):
- if '#__youtubedl_smuggle' not in smug_url:
- return smug_url, default
- url, _, sdata = smug_url.rpartition('#')
- jsond = compat_parse_qs(sdata)['__youtubedl_smuggle'][0]
- data = json.loads(jsond)
- return url, data
- def format_bytes(bytes):
- if bytes is None:
- return 'N/A'
- if type(bytes) is str:
- bytes = float(bytes)
- if bytes == 0.0:
- exponent = 0
- else:
- exponent = int(math.log(bytes, 1024.0))
- suffix = ['B', 'KiB', 'MiB', 'GiB', 'TiB', 'PiB', 'EiB', 'ZiB', 'YiB'][exponent]
- converted = float(bytes) / float(1024 ** exponent)
- return '%.2f%s' % (converted, suffix)
- def lookup_unit_table(unit_table, s):
- units_re = '|'.join(re.escape(u) for u in unit_table)
- m = re.match(
- r'(?P<num>[0-9]+(?:[,.][0-9]*)?)\s*(?P<unit>%s)\b' % units_re, s)
- if not m:
- return None
- num_str = m.group('num').replace(',', '.')
- mult = unit_table[m.group('unit')]
- return int(float(num_str) * mult)
- def parse_filesize(s):
- if s is None:
- return None
- # The lower-case forms are of course incorrect and unofficial,
- # but we support those too
- _UNIT_TABLE = {
- 'B': 1,
- 'b': 1,
- 'bytes': 1,
- 'KiB': 1024,
- 'KB': 1000,
- 'kB': 1024,
- 'Kb': 1000,
- 'kb': 1000,
- 'kilobytes': 1000,
- 'kibibytes': 1024,
- 'MiB': 1024 ** 2,
- 'MB': 1000 ** 2,
- 'mB': 1024 ** 2,
- 'Mb': 1000 ** 2,
- 'mb': 1000 ** 2,
- 'megabytes': 1000 ** 2,
- 'mebibytes': 1024 ** 2,
- 'GiB': 1024 ** 3,
- 'GB': 1000 ** 3,
- 'gB': 1024 ** 3,
- 'Gb': 1000 ** 3,
- 'gb': 1000 ** 3,
- 'gigabytes': 1000 ** 3,
- 'gibibytes': 1024 ** 3,
- 'TiB': 1024 ** 4,
- 'TB': 1000 ** 4,
- 'tB': 1024 ** 4,
- 'Tb': 1000 ** 4,
- 'tb': 1000 ** 4,
- 'terabytes': 1000 ** 4,
- 'tebibytes': 1024 ** 4,
- 'PiB': 1024 ** 5,
- 'PB': 1000 ** 5,
- 'pB': 1024 ** 5,
- 'Pb': 1000 ** 5,
- 'pb': 1000 ** 5,
- 'petabytes': 1000 ** 5,
- 'pebibytes': 1024 ** 5,
- 'EiB': 1024 ** 6,
- 'EB': 1000 ** 6,
- 'eB': 1024 ** 6,
- 'Eb': 1000 ** 6,
- 'eb': 1000 ** 6,
- 'exabytes': 1000 ** 6,
- 'exbibytes': 1024 ** 6,
- 'ZiB': 1024 ** 7,
- 'ZB': 1000 ** 7,
- 'zB': 1024 ** 7,
- 'Zb': 1000 ** 7,
- 'zb': 1000 ** 7,
- 'zettabytes': 1000 ** 7,
- 'zebibytes': 1024 ** 7,
- 'YiB': 1024 ** 8,
- 'YB': 1000 ** 8,
- 'yB': 1024 ** 8,
- 'Yb': 1000 ** 8,
- 'yb': 1000 ** 8,
- 'yottabytes': 1000 ** 8,
- 'yobibytes': 1024 ** 8,
- }
- return lookup_unit_table(_UNIT_TABLE, s)
- def parse_count(s):
- if s is None:
- return None
- s = s.strip()
- if re.match(r'^[\d,.]+$', s):
- return str_to_int(s)
- _UNIT_TABLE = {
- 'k': 1000,
- 'K': 1000,
- 'm': 1000 ** 2,
- 'M': 1000 ** 2,
- 'kk': 1000 ** 2,
- 'KK': 1000 ** 2,
- }
- return lookup_unit_table(_UNIT_TABLE, s)
- def parse_resolution(s):
- if s is None:
- return {}
- mobj = re.search(r'\b(?P<w>\d+)\s*[xX×]\s*(?P<h>\d+)\b', s)
- if mobj:
- return {
- 'width': int(mobj.group('w')),
- 'height': int(mobj.group('h')),
- }
- mobj = re.search(r'\b(\d+)[pPiI]\b', s)
- if mobj:
- return {'height': int(mobj.group(1))}
- mobj = re.search(r'\b([48])[kK]\b', s)
- if mobj:
- return {'height': int(mobj.group(1)) * 540}
- return {}
- def parse_bitrate(s):
- s = txt_or_none(s)
- if not s:
- return None
- mobj = re.search(r'\b(\d+)\s*kbps', s)
- if mobj:
- return int(mobj.group(1))
- def month_by_name(name, lang='en'):
- """ Return the number of a month by (locale-independently) English name """
- month_names = MONTH_NAMES.get(lang, MONTH_NAMES['en'])
- try:
- return month_names.index(name) + 1
- except ValueError:
- return None
- def month_by_abbreviation(abbrev):
- """ Return the number of a month by (locale-independently) English
- abbreviations """
- try:
- return [s[:3] for s in ENGLISH_MONTH_NAMES].index(abbrev) + 1
- except ValueError:
- return None
- def fix_xml_ampersands(xml_str):
- """Replace all the '&' by '&' in XML"""
- return re.sub(
- r'&(?!amp;|lt;|gt;|apos;|quot;|#x[0-9a-fA-F]{,4};|#[0-9]{,4};)',
- '&',
- xml_str)
- def setproctitle(title):
- assert isinstance(title, compat_str)
- # ctypes in Jython is not complete
- # http://bugs.jython.org/issue2148
- if sys.platform.startswith('java'):
- return
- try:
- libc = ctypes.cdll.LoadLibrary('libc.so.6')
- except OSError:
- return
- except TypeError:
- # LoadLibrary in Windows Python 2.7.13 only expects
- # a bytestring, but since unicode_literals turns
- # every string into a unicode string, it fails.
- return
- title_bytes = title.encode('utf-8')
- buf = ctypes.create_string_buffer(len(title_bytes))
- buf.value = title_bytes
- try:
- libc.prctl(15, buf, 0, 0, 0)
- except AttributeError:
- return # Strange libc, just skip this
- def remove_start(s, start):
- return s[len(start):] if s is not None and s.startswith(start) else s
- def remove_end(s, end):
- return s[:-len(end)] if s is not None and s.endswith(end) else s
- def remove_quotes(s):
- if s is None or len(s) < 2:
- return s
- for quote in ('"', "'", ):
- if s[0] == quote and s[-1] == quote:
- return s[1:-1]
- return s
- def url_basename(url):
- path = compat_urllib_parse.urlparse(url).path
- return path.strip('/').split('/')[-1]
- def base_url(url):
- return re.match(r'https?://[^?#&]+/', url).group()
- def urljoin(base, path):
- path = _decode_compat_str(path, encoding='utf-8', or_none=True)
- if not path:
- return None
- if re.match(r'^(?:[a-zA-Z][a-zA-Z0-9+-.]*:)?//', path):
- return path
- base = _decode_compat_str(base, encoding='utf-8', or_none=True)
- if not base:
- return None
- return (
- re.match(r'^(?:https?:)?//', base)
- and compat_urllib_parse.urljoin(base, path))
- class HEADRequest(compat_urllib_request.Request):
- def get_method(self):
- return 'HEAD'
- class PUTRequest(compat_urllib_request.Request):
- def get_method(self):
- return 'PUT'
- def int_or_none(v, scale=1, default=None, get_attr=None, invscale=1, base=None):
- if get_attr:
- if v is not None:
- v = getattr(v, get_attr, None)
- if v in (None, ''):
- return default
- try:
- # like int, raise if base is specified and v is not a string
- return (int(v) if base is None else int(v, base=base)) * invscale // scale
- except (ValueError, TypeError, OverflowError):
- return default
- def str_or_none(v, default=None):
- return default if v is None else compat_str(v)
- def str_to_int(int_str):
- """ A more relaxed version of int_or_none """
- if isinstance(int_str, compat_integer_types):
- return int_str
- elif isinstance(int_str, compat_str):
- int_str = re.sub(r'[,\.\+]', '', int_str)
- return int_or_none(int_str)
- def float_or_none(v, scale=1, invscale=1, default=None):
- if v is None:
- return default
- try:
- return float(v) * invscale / scale
- except (ValueError, TypeError):
- return default
- def bool_or_none(v, default=None):
- return v if isinstance(v, bool) else default
- def strip_or_none(v, default=None):
- return v.strip() if isinstance(v, compat_str) else default
- def txt_or_none(v, default=None):
- """ Combine str/strip_or_none, disallow blank value (for traverse_obj) """
- return default if v is None else (compat_str(v).strip() or default)
- def url_or_none(url):
- if not url or not isinstance(url, compat_str):
- return None
- url = url.strip()
- return url if re.match(r'^(?:(?:https?|rt(?:m(?:pt?[es]?|fp)|sp[su]?)|mms|ftps?):)?//', url) else None
- def parse_duration(s):
- if not isinstance(s, compat_basestring):
- return None
- s = s.strip()
- days, hours, mins, secs, ms = [None] * 5
- m = re.match(r'(?:(?:(?:(?P<days>[0-9]+):)?(?P<hours>[0-9]+):)?(?P<mins>[0-9]+):)?(?P<secs>[0-9]+)(?P<ms>\.[0-9]+)?Z?$', s)
- if m:
- days, hours, mins, secs, ms = m.groups()
- else:
- m = re.match(
- r'''(?ix)(?:P?
- (?:
- [0-9]+\s*y(?:ears?)?\s*
- )?
- (?:
- [0-9]+\s*m(?:onths?)?\s*
- )?
- (?:
- [0-9]+\s*w(?:eeks?)?\s*
- )?
- (?:
- (?P<days>[0-9]+)\s*d(?:ays?)?\s*
- )?
- T)?
- (?:
- (?P<hours>[0-9]+)\s*h(?:ours?)?\s*
- )?
- (?:
- (?P<mins>[0-9]+)\s*m(?:in(?:ute)?s?)?\s*
- )?
- (?:
- (?P<secs>[0-9]+)(?P<ms>\.[0-9]+)?\s*s(?:ec(?:ond)?s?)?\s*
- )?Z?$''', s)
- if m:
- days, hours, mins, secs, ms = m.groups()
- else:
- m = re.match(r'(?i)(?:(?P<hours>[0-9.]+)\s*(?:hours?)|(?P<mins>[0-9.]+)\s*(?:mins?\.?|minutes?)\s*)Z?$', s)
- if m:
- hours, mins = m.groups()
- else:
- return None
- duration = 0
- if secs:
- duration += float(secs)
- if mins:
- duration += float(mins) * 60
- if hours:
- duration += float(hours) * 60 * 60
- if days:
- duration += float(days) * 24 * 60 * 60
- if ms:
- duration += float(ms)
- return duration
- def prepend_extension(filename, ext, expected_real_ext=None):
- name, real_ext = os.path.splitext(filename)
- return (
- '{0}.{1}{2}'.format(name, ext, real_ext)
- if not expected_real_ext or real_ext[1:] == expected_real_ext
- else '{0}.{1}'.format(filename, ext))
- def replace_extension(filename, ext, expected_real_ext=None):
- name, real_ext = os.path.splitext(filename)
- return '{0}.{1}'.format(
- name if not expected_real_ext or real_ext[1:] == expected_real_ext else filename,
- ext)
- def check_executable(exe, args=[]):
- """ Checks if the given binary is installed somewhere in PATH, and returns its name.
- args can be a list of arguments for a short output (like -version) """
- try:
- process_communicate_or_kill(subprocess.Popen(
- [exe] + args, stdout=subprocess.PIPE, stderr=subprocess.PIPE))
- except OSError:
- return False
- return exe
- def get_exe_version(exe, args=['--version'],
- version_re=None, unrecognized='present'):
- """ Returns the version of the specified executable,
- or False if the executable is not present """
- try:
- # STDIN should be redirected too. On UNIX-like systems, ffmpeg triggers
- # SIGTTOU if youtube-dl is run in the background.
- # See https://github.com/ytdl-org/youtube-dl/issues/955#issuecomment-209789656
- out, _ = process_communicate_or_kill(subprocess.Popen(
- [encodeArgument(exe)] + args,
- stdin=subprocess.PIPE,
- stdout=subprocess.PIPE, stderr=subprocess.STDOUT))
- except OSError:
- return False
- out = _decode_compat_str(out, 'ascii', 'ignore')
- return detect_exe_version(out, version_re, unrecognized)
- def detect_exe_version(output, version_re=None, unrecognized='present'):
- assert isinstance(output, compat_str)
- if version_re is None:
- version_re = r'version\s+([-0-9._a-zA-Z]+)'
- m = re.search(version_re, output)
- if m:
- return m.group(1)
- else:
- return unrecognized
- class LazyList(compat_collections_abc.Iterable):
- """Lazy immutable list from an iterable
- Note that slices of a LazyList are lists and not LazyList"""
- class IndexError(IndexError):
- def __init__(self, cause=None):
- if cause:
- # reproduce `raise from`
- self.__cause__ = cause
- super(IndexError, self).__init__()
- def __init__(self, iterable, **kwargs):
- # kwarg-only
- reverse = kwargs.get('reverse', False)
- _cache = kwargs.get('_cache')
- self._iterable = iter(iterable)
- self._cache = [] if _cache is None else _cache
- self._reversed = reverse
- def __iter__(self):
- if self._reversed:
- # We need to consume the entire iterable to iterate in reverse
- for item in self.exhaust():
- yield item
- return
- for item in self._cache:
- yield item
- for item in self._iterable:
- self._cache.append(item)
- yield item
- def _exhaust(self):
- self._cache.extend(self._iterable)
- self._iterable = [] # Discard the emptied iterable to make it pickle-able
- return self._cache
- def exhaust(self):
- """Evaluate the entire iterable"""
- return self._exhaust()[::-1 if self._reversed else 1]
- @staticmethod
- def _reverse_index(x):
- return None if x is None else ~x
- def __getitem__(self, idx):
- if isinstance(idx, slice):
- if self._reversed:
- idx = slice(self._reverse_index(idx.start), self._reverse_index(idx.stop), -(idx.step or 1))
- start, stop, step = idx.start, idx.stop, idx.step or 1
- elif isinstance(idx, int):
- if self._reversed:
- idx = self._reverse_index(idx)
- start, stop, step = idx, idx, 0
- else:
- raise TypeError('indices must be integers or slices')
- if ((start or 0) < 0 or (stop or 0) < 0
- or (start is None and step < 0)
- or (stop is None and step > 0)):
- # We need to consume the entire iterable to be able to slice from the end
- # Obviously, never use this with infinite iterables
- self._exhaust()
- try:
- return self._cache[idx]
- except IndexError as e:
- raise self.IndexError(e)
- n = max(start or 0, stop or 0) - len(self._cache) + 1
- if n > 0:
- self._cache.extend(itertools.islice(self._iterable, n))
- try:
- return self._cache[idx]
- except IndexError as e:
- raise self.IndexError(e)
- def __bool__(self):
- try:
- self[-1] if self._reversed else self[0]
- except self.IndexError:
- return False
- return True
- def __len__(self):
- self._exhaust()
- return len(self._cache)
- def __reversed__(self):
- return type(self)(self._iterable, reverse=not self._reversed, _cache=self._cache)
- def __copy__(self):
- return type(self)(self._iterable, reverse=self._reversed, _cache=self._cache)
- def __repr__(self):
- # repr and str should mimic a list. So we exhaust the iterable
- return repr(self.exhaust())
- def __str__(self):
- return repr(self.exhaust())
- class PagedList(object):
- def __len__(self):
- # This is only useful for tests
- return len(self.getslice())
- class OnDemandPagedList(PagedList):
- def __init__(self, pagefunc, pagesize, use_cache=True):
- self._pagefunc = pagefunc
- self._pagesize = pagesize
- self._use_cache = use_cache
- if use_cache:
- self._cache = {}
- def getslice(self, start=0, end=None):
- res = []
- for pagenum in itertools.count(start // self._pagesize):
- firstid = pagenum * self._pagesize
- nextfirstid = pagenum * self._pagesize + self._pagesize
- if start >= nextfirstid:
- continue
- page_results = None
- if self._use_cache:
- page_results = self._cache.get(pagenum)
- if page_results is None:
- page_results = list(self._pagefunc(pagenum))
- if self._use_cache:
- self._cache[pagenum] = page_results
- startv = (
- start % self._pagesize
- if firstid <= start < nextfirstid
- else 0)
- endv = (
- ((end - 1) % self._pagesize) + 1
- if (end is not None and firstid <= end <= nextfirstid)
- else None)
- if startv != 0 or endv is not None:
- page_results = page_results[startv:endv]
- res.extend(page_results)
- # A little optimization - if current page is not "full", ie. does
- # not contain page_size videos then we can assume that this page
- # is the last one - there are no more ids on further pages -
- # i.e. no need to query again.
- if len(page_results) + startv < self._pagesize:
- break
- # If we got the whole page, but the next page is not interesting,
- # break out early as well
- if end == nextfirstid:
- break
- return res
- class InAdvancePagedList(PagedList):
- def __init__(self, pagefunc, pagecount, pagesize):
- self._pagefunc = pagefunc
- self._pagecount = pagecount
- self._pagesize = pagesize
- def getslice(self, start=0, end=None):
- res = []
- start_page = start // self._pagesize
- end_page = (
- self._pagecount if end is None else (end // self._pagesize + 1))
- skip_elems = start - start_page * self._pagesize
- only_more = None if end is None else end - start
- for pagenum in range(start_page, end_page):
- page = list(self._pagefunc(pagenum))
- if skip_elems:
- page = page[skip_elems:]
- skip_elems = None
- if only_more is not None:
- if len(page) < only_more:
- only_more -= len(page)
- else:
- page = page[:only_more]
- res.extend(page)
- break
- res.extend(page)
- return res
- def uppercase_escape(s):
- unicode_escape = codecs.getdecoder('unicode_escape')
- return re.sub(
- r'\\U[0-9a-fA-F]{8}',
- lambda m: unicode_escape(m.group(0))[0],
- s)
- def lowercase_escape(s):
- unicode_escape = codecs.getdecoder('unicode_escape')
- return re.sub(
- r'\\u[0-9a-fA-F]{4}',
- lambda m: unicode_escape(m.group(0))[0],
- s)
- def escape_rfc3986(s):
- """Escape non-ASCII characters as suggested by RFC 3986"""
- if sys.version_info < (3, 0):
- s = _encode_compat_str(s, 'utf-8')
- # ensure unicode: after quoting, it can always be converted
- return compat_str(compat_urllib_parse.quote(s, b"%/;:@&=+$,!~*'()?#[]"))
- def escape_url(url):
- """Escape URL as suggested by RFC 3986"""
- url_parsed = compat_urllib_parse_urlparse(url)
- return url_parsed._replace(
- netloc=url_parsed.netloc.encode('idna').decode('ascii'),
- path=escape_rfc3986(url_parsed.path),
- params=escape_rfc3986(url_parsed.params),
- query=escape_rfc3986(url_parsed.query),
- fragment=escape_rfc3986(url_parsed.fragment)
- ).geturl()
- def parse_qs(url, **kwargs):
- return compat_parse_qs(compat_urllib_parse.urlparse(url).query, **kwargs)
- def read_batch_urls(batch_fd):
- def fixup(url):
- url = _decode_compat_str(url, 'utf-8', 'replace')
- BOM_UTF8 = '\xef\xbb\xbf'
- if url.startswith(BOM_UTF8):
- url = url[len(BOM_UTF8):]
- url = url.strip()
- if url.startswith(('#', ';', ']')):
- return False
- return url
- with contextlib.closing(batch_fd) as fd:
- return [url for url in map(fixup, fd) if url]
- def urlencode_postdata(*args, **kargs):
- return compat_urllib_parse_urlencode(*args, **kargs).encode('ascii')
- def update_url(url, **kwargs):
- """Replace URL components specified by kwargs
- url: compat_str or parsed URL tuple
- if query_update is in kwargs, update query with
- its value instead of replacing (overrides any `query`)
- NB: query_update expects parse_qs() format: [key: value_list, ...]
- returns: compat_str
- """
- if not kwargs:
- return compat_urllib_parse.urlunparse(url) if isinstance(url, tuple) else url
- if not isinstance(url, tuple):
- url = compat_urllib_parse.urlparse(url)
- query = kwargs.pop('query_update', None)
- if query:
- qs = compat_parse_qs(url.query)
- qs.update(query)
- kwargs['query'] = compat_urllib_parse_urlencode(qs, True)
- kwargs = compat_kwargs(kwargs)
- return compat_urllib_parse.urlunparse(url._replace(**kwargs))
- def update_url_query(url, query):
- return update_url(url, query_update=query)
- def update_Request(req, url=None, data=None, headers={}, query={}):
- req_headers = req.headers.copy()
- req_headers.update(headers)
- req_data = data if data is not None else req.data
- req_url = update_url_query(url or req.get_full_url(), query)
- req_type = {'HEAD': HEADRequest, 'PUT': PUTRequest}.get(
- req.get_method(), compat_urllib_request.Request)
- new_req = req_type(
- req_url, data=req_data, headers=req_headers,
- origin_req_host=req.origin_req_host, unverifiable=req.unverifiable)
- if hasattr(req, 'timeout'):
- new_req.timeout = req.timeout
- return new_req
- def _multipart_encode_impl(data, boundary):
- content_type = 'multipart/form-data; boundary=%s' % boundary
- out = b''
- for k, v in data.items():
- out += b'--' + boundary.encode('ascii') + b'\r\n'
- k = _encode_compat_str(k, 'utf-8')
- v = _encode_compat_str(v, 'utf-8')
- # RFC 2047 requires non-ASCII field names to be encoded, while RFC 7578
- # suggests sending UTF-8 directly. Firefox sends UTF-8, too
- content = b'Content-Disposition: form-data; name="' + k + b'"\r\n\r\n' + v + b'\r\n'
- if boundary.encode('ascii') in content:
- raise ValueError('Boundary overlaps with data')
- out += content
- out += b'--' + boundary.encode('ascii') + b'--\r\n'
- return out, content_type
- def multipart_encode(data, boundary=None):
- '''
- Encode a dict to RFC 7578-compliant form-data
- data:
- A dict where keys and values can be either Unicode or bytes-like
- objects.
- boundary:
- If specified a Unicode object, it's used as the boundary. Otherwise
- a random boundary is generated.
- Reference: https://tools.ietf.org/html/rfc7578
- '''
- has_specified_boundary = boundary is not None
- while True:
- if boundary is None:
- boundary = '---------------' + str(random.randrange(0x0fffffff, 0xffffffff))
- try:
- out, content_type = _multipart_encode_impl(data, boundary)
- break
- except ValueError:
- if has_specified_boundary:
- raise
- boundary = None
- return out, content_type
- def is_iterable_like(x, allowed_types=compat_collections_abc.Iterable, blocked_types=NO_DEFAULT):
- if blocked_types is NO_DEFAULT:
- blocked_types = (compat_str, bytes, compat_collections_abc.Mapping)
- return isinstance(x, allowed_types) and not isinstance(x, blocked_types)
- def variadic(x, allowed_types=NO_DEFAULT):
- if isinstance(allowed_types, compat_collections_abc.Iterable):
- allowed_types = tuple(allowed_types)
- return x if is_iterable_like(x, blocked_types=allowed_types) else (x,)
- def dict_get(d, key_or_keys, default=None, skip_false_values=True):
- exp = (lambda x: x or None) if skip_false_values else IDENTITY
- return traverse_obj(d, *variadic(key_or_keys), expected_type=exp,
- default=default, get_all=False)
- def try_call(*funcs, **kwargs):
- # parameter defaults
- expected_type = kwargs.get('expected_type')
- fargs = kwargs.get('args', [])
- fkwargs = kwargs.get('kwargs', {})
- for f in funcs:
- try:
- val = f(*fargs, **fkwargs)
- except (AttributeError, KeyError, TypeError, IndexError, ZeroDivisionError):
- pass
- else:
- if expected_type is None or isinstance(val, expected_type):
- return val
- def try_get(src, getter, expected_type=None):
- if not isinstance(getter, (list, tuple)):
- getter = [getter]
- for get in getter:
- try:
- v = get(src)
- except (AttributeError, KeyError, TypeError, IndexError):
- pass
- else:
- if expected_type is None or isinstance(v, expected_type):
- return v
- def filter_dict(dct, cndn=lambda _, v: v is not None):
- # NB: don't use dict comprehension for python 2.6 compatibility
- return dict((k, v) for k, v in dct.items() if cndn(k, v))
- def merge_dicts(*dicts, **kwargs):
- """
- Merge the `dict`s in `dicts` using the first valid value for each key.
- Normally valid: not None and not an empty string
- Keyword-only args:
- unblank: allow empty string if False (default True)
- rev: merge dicts in reverse order (default False)
- merge_dicts(dct1, dct2, ..., unblank=False, rev=True)
- matches {**dct1, **dct2, ...}
- However, merge_dicts(dct1, dct2, ..., rev=True) may often be better.
- """
- unblank = kwargs.get('unblank', True)
- rev = kwargs.get('rev', False)
- if unblank:
- def can_merge_str(k, v, to_dict):
- return (isinstance(v, compat_str) and v
- and isinstance(to_dict[k], compat_str)
- and not to_dict[k])
- else:
- can_merge_str = lambda k, v, to_dict: False
- merged = {}
- for a_dict in reversed(dicts) if rev else dicts:
- for k, v in a_dict.items():
- if v is None:
- continue
- if (k not in merged) or can_merge_str(k, v, merged):
- merged[k] = v
- return merged
- # very poor choice of name, as if Python string encodings weren't confusing enough
- def encode_compat_str(s, encoding=preferredencoding(), errors='strict'):
- assert isinstance(s, compat_basestring)
- return s if isinstance(s, compat_str) else compat_str(s, encoding, errors)
- # what it could have been
- def _decode_compat_str(s, encoding=preferredencoding(), errors='strict', or_none=False):
- if not or_none:
- assert isinstance(s, compat_basestring)
- return (
- s if isinstance(s, compat_str)
- else compat_str(s, encoding, errors) if isinstance(s, compat_basestring)
- else None)
- # the real encode_compat_str, but only for internal use
- def _encode_compat_str(s, encoding=preferredencoding(), errors='strict'):
- assert isinstance(s, compat_basestring)
- return s.encode(encoding, errors) if isinstance(s, compat_str) else s
- US_RATINGS = {
- 'G': 0,
- 'PG': 10,
- 'PG-13': 13,
- 'R': 16,
- 'NC': 18,
- }
- TV_PARENTAL_GUIDELINES = {
- 'TV-Y': 0,
- 'TV-Y7': 7,
- 'TV-G': 0,
- 'TV-PG': 0,
- 'TV-14': 14,
- 'TV-MA': 17,
- }
- def parse_age_limit(s):
- if not isinstance(s, bool):
- age = int_or_none(s)
- if age is not None:
- return age if 0 <= age <= 21 else None
- if not isinstance(s, compat_basestring):
- return None
- m = re.match(r'^(?P<age>\d{1,2})\+?$', s)
- if m:
- return int(m.group('age'))
- if s in US_RATINGS:
- return US_RATINGS[s]
- m = re.match(r'^TV[_-]?(%s)$' % '|'.join(k[3:] for k in TV_PARENTAL_GUIDELINES), s)
- if m:
- return TV_PARENTAL_GUIDELINES['TV-' + m.group(1)]
- return None
- def strip_jsonp(code):
- return re.sub(
- r'''(?sx)^
- (?:window\.)?(?P<func_name>[a-zA-Z0-9_.$]*)
- (?:\s*&&\s*(?P=func_name))?
- \s*\(\s*(?P<callback_data>.*)\);?
- \s*?(?://[^\n]*)*$''',
- r'\g<callback_data>', code)
- def js_to_json(code, *args, **kwargs):
- # vars is a dict of (var, val) pairs to substitute
- vars = args[0] if len(args) > 0 else kwargs.get('vars', {})
- strict = kwargs.get('strict', False)
- STRING_QUOTES = '\'"`'
- STRING_RE = '|'.join(r'{0}(?:\\.|[^\\{0}])*{0}'.format(q) for q in STRING_QUOTES)
- COMMENT_RE = r'/\*(?:(?!\*/).)*?\*/|//[^\n]*\n'
- SKIP_RE = r'\s*(?:{comment})?\s*'.format(comment=COMMENT_RE)
- INTEGER_TABLE = (
- (r'(?s)^(0[xX][0-9a-fA-F]+){skip}:?$'.format(skip=SKIP_RE), 16),
- (r'(?s)^(0+[0-7]+){skip}:?$'.format(skip=SKIP_RE), 8),
- (r'(?s)^(\d+){skip}:?$'.format(skip=SKIP_RE), 10),
- )
- # compat candidate
- JSONDecodeError = json.JSONDecodeError if 'JSONDecodeError' in dir(json) else ValueError
- def process_escape(match):
- JSON_PASSTHROUGH_ESCAPES = r'"\bfnrtu'
- escape = match.group(1) or match.group(2)
- return ('\\' + escape if escape in JSON_PASSTHROUGH_ESCAPES
- else '\\u00' if escape == 'x'
- else '' if escape == '\n'
- else escape)
- def template_substitute(match):
- evaluated = js_to_json(match.group(1), vars, strict=strict)
- if evaluated[0] == '"':
- return json.loads(evaluated)
- return evaluated
- def fix_kv(m):
- v = m.group(0)
- if v in ('true', 'false', 'null'):
- return v
- elif v in ('undefined', 'void 0'):
- return 'null'
- elif v.startswith('/*') or v.startswith('//') or v == ',':
- return ''
- if v[0] in STRING_QUOTES:
- v = re.sub(r'(?s)\${([^}]+)}', template_substitute, v[1:-1]) if v[0] == '`' else v[1:-1]
- escaped = re.sub(r'(?s)(")|\\(.)', process_escape, v)
- return '"{0}"'.format(escaped)
- inv = IDENTITY
- im = re.split(r'^!+', v)
- if len(im) > 1 and not im[-1].endswith(':'):
- if (len(v) - len(im[1])) % 2 == 1:
- inv = lambda x: 'true' if x == 0 else 'false'
- else:
- inv = lambda x: 'false' if x == 0 else 'true'
- if not any(x for x in im):
- return
- v = im[-1]
- for regex, base in INTEGER_TABLE:
- im = re.match(regex, v)
- if im:
- i = int(im.group(1), base)
- return ('"%s":' if v.endswith(':') else '%s') % inv(i)
- if v in vars:
- try:
- if not strict:
- json.loads(vars[v])
- except JSONDecodeError:
- return inv(json.dumps(vars[v]))
- else:
- return inv(vars[v])
- if not strict:
- v = try_call(inv, args=(v,), default=v)
- if v in ('true', 'false'):
- return v
- return '"{0}"'.format(v)
- raise ValueError('Unknown value: ' + v)
- def create_map(mobj):
- return json.dumps(dict(json.loads(js_to_json(mobj.group(1) or '[]', vars=vars))))
- code = re.sub(r'new Map\((\[.*?\])?\)', create_map, code)
- if not strict:
- code = re.sub(r'new Date\((".+")\)', r'\g<1>', code)
- code = re.sub(r'new \w+\((.*?)\)', lambda m: json.dumps(m.group(0)), code)
- code = re.sub(r'parseInt\([^\d]+(\d+)[^\d]+\)', r'\1', code)
- code = re.sub(r'\(function\([^)]*\)\s*\{[^}]*\}\s*\)\s*\(\s*(["\'][^)]*["\'])\s*\)', r'\1', code)
- return re.sub(r'''(?sx)
- {str_}|
- {comment}|
- ,(?={skip}[\]}}])|
- void\s0|
- !*(?:(?<!\d)[eE]|[a-df-zA-DF-Z_$])[.a-zA-Z_$0-9]*|
- (?:\b|!+)0(?:[xX][\da-fA-F]+|[0-7]+)(?:{skip}:)?|
- !+\d+(?:\.\d*)?(?:{skip}:)?|
- [0-9]+(?:{skip}:)|
- !+
- '''.format(comment=COMMENT_RE, skip=SKIP_RE, str_=STRING_RE), fix_kv, code)
- def qualities(quality_ids):
- """ Get a numeric quality value out of a list of possible values """
- def q(qid):
- try:
- return quality_ids.index(qid)
- except ValueError:
- return -1
- return q
- DEFAULT_OUTTMPL = '%(title)s-%(id)s.%(ext)s'
- def limit_length(s, length):
- """ Add ellipses to overly long strings """
- if s is None:
- return None
- ELLIPSES = '...'
- if len(s) > length:
- return s[:length - len(ELLIPSES)] + ELLIPSES
- return s
- def version_tuple(v):
- return tuple(int(e) for e in re.split(r'[-.]', v))
- def is_outdated_version(version, limit, assume_new=True):
- if not version:
- return not assume_new
- try:
- return version_tuple(version) < version_tuple(limit)
- except ValueError:
- return not assume_new
- def ytdl_is_updateable():
- """ Returns if youtube-dl can be updated with -U """
- from zipimport import zipimporter
- return isinstance(globals().get('__loader__'), zipimporter) or hasattr(sys, 'frozen')
- def args_to_str(args):
- # Get a short string representation for a subprocess command
- return ' '.join(compat_shlex_quote(a) for a in args)
- def error_to_compat_str(err):
- return _decode_compat_str(str(err))
- def mimetype2ext(mt):
- if mt is None:
- return None
- ext = {
- 'audio/mp4': 'm4a',
- # Per RFC 3003, audio/mpeg can be .mp1, .mp2 or .mp3. Here use .mp3 as
- # it's the most popular one
- 'audio/mpeg': 'mp3',
- }.get(mt)
- if ext is not None:
- return ext
- _, _, res = mt.rpartition('/')
- res = res.split(';')[0].strip().lower()
- return {
- '3gpp': '3gp',
- 'smptett+xml': 'tt',
- 'ttaf+xml': 'dfxp',
- 'ttml+xml': 'ttml',
- 'x-flv': 'flv',
- 'x-mp4-fragmented': 'mp4',
- 'x-ms-sami': 'sami',
- 'x-ms-wmv': 'wmv',
- 'mpegurl': 'm3u8',
- 'x-mpegurl': 'm3u8',
- 'vnd.apple.mpegurl': 'm3u8',
- 'dash+xml': 'mpd',
- 'f4m+xml': 'f4m',
- 'hds+xml': 'f4m',
- 'vnd.ms-sstr+xml': 'ism',
- 'quicktime': 'mov',
- 'mp2t': 'ts',
- 'x-wav': 'wav',
- }.get(res, res)
- def parse_codecs(codecs_str):
- # http://tools.ietf.org/html/rfc6381
- if not codecs_str:
- return {}
- split_codecs = list(filter(None, map(
- lambda str: str.strip(), codecs_str.strip().strip(',').split(','))))
- vcodec, acodec = None, None
- for full_codec in split_codecs:
- codec = full_codec.split('.')[0]
- if codec in ('avc1', 'avc2', 'avc3', 'avc4', 'vp9', 'vp8', 'hev1', 'hev2', 'h263', 'h264', 'mp4v', 'hvc1', 'av01', 'theora'):
- if not vcodec:
- vcodec = full_codec
- elif codec in ('mp4a', 'opus', 'vorbis', 'mp3', 'aac', 'ac-3', 'ec-3', 'eac3', 'dtsc', 'dtse', 'dtsh', 'dtsl'):
- if not acodec:
- acodec = full_codec
- else:
- write_string('WARNING: Unknown codec %s\n' % full_codec, sys.stderr)
- if not vcodec and not acodec:
- if len(split_codecs) == 2:
- return {
- 'vcodec': split_codecs[0],
- 'acodec': split_codecs[1],
- }
- else:
- return {
- 'vcodec': vcodec or 'none',
- 'acodec': acodec or 'none',
- }
- return {}
- def urlhandle_detect_ext(url_handle):
- getheader = url_handle.headers.get
- cd = getheader('Content-Disposition')
- if cd:
- m = re.match(r'attachment;\s*filename="(?P<filename>[^"]+)"', cd)
- if m:
- e = determine_ext(m.group('filename'), default_ext=None)
- if e:
- return e
- return mimetype2ext(getheader('Content-Type'))
- def encode_data_uri(data, mime_type):
- return 'data:%s;base64,%s' % (mime_type, base64.b64encode(data).decode('ascii'))
- def age_restricted(content_limit, age_limit):
- """ Returns True iff the content should be blocked """
- if age_limit is None: # No limit set
- return False
- if content_limit is None:
- return False # Content available for everyone
- return age_limit < content_limit
- def is_html(first_bytes):
- """ Detect whether a file contains HTML by examining its first bytes. """
- BOMS = [
- (b'\xef\xbb\xbf', 'utf-8'),
- (b'\x00\x00\xfe\xff', 'utf-32-be'),
- (b'\xff\xfe\x00\x00', 'utf-32-le'),
- (b'\xff\xfe', 'utf-16-le'),
- (b'\xfe\xff', 'utf-16-be'),
- ]
- for bom, enc in BOMS:
- if first_bytes.startswith(bom):
- s = first_bytes[len(bom):].decode(enc, 'replace')
- break
- else:
- s = first_bytes.decode('utf-8', 'replace')
- return re.match(r'^\s*<', s)
- def determine_protocol(info_dict):
- protocol = info_dict.get('protocol')
- if protocol is not None:
- return protocol
- url = info_dict['url']
- if url.startswith('rtmp'):
- return 'rtmp'
- elif url.startswith('mms'):
- return 'mms'
- elif url.startswith('rtsp'):
- return 'rtsp'
- ext = determine_ext(url)
- if ext == 'm3u8':
- return 'm3u8'
- elif ext == 'f4m':
- return 'f4m'
- return compat_urllib_parse_urlparse(url).scheme
- def render_table(header_row, data):
- """ Render a list of rows, each as a list of values """
- table = [header_row] + data
- max_lens = [max(len(compat_str(v)) for v in col) for col in zip(*table)]
- format_str = ' '.join('%-' + compat_str(ml + 1) + 's' for ml in max_lens[:-1]) + '%s'
- return '\n'.join(format_str % tuple(row) for row in table)
- def _match_one(filter_part, dct):
- COMPARISON_OPERATORS = {
- '<': operator.lt,
- '<=': operator.le,
- '>': operator.gt,
- '>=': operator.ge,
- '=': operator.eq,
- '!=': operator.ne,
- }
- operator_rex = re.compile(r'''(?x)\s*
- (?P<key>[a-z_]+)
- \s*(?P<op>%s)(?P<none_inclusive>\s*\?)?\s*
- (?:
- (?P<intval>[0-9.]+(?:[kKmMgGtTpPeEzZyY]i?[Bb]?)?)|
- (?P<quote>["\'])(?P<quotedstrval>(?:\\.|(?!(?P=quote)|\\).)+?)(?P=quote)|
- (?P<strval>(?![0-9.])[a-z0-9A-Z]*)
- )
- \s*$
- ''' % '|'.join(map(re.escape, COMPARISON_OPERATORS.keys())))
- m = operator_rex.search(filter_part)
- if m:
- op = COMPARISON_OPERATORS[m.group('op')]
- actual_value = dct.get(m.group('key'))
- if (m.group('quotedstrval') is not None
- or m.group('strval') is not None
- # If the original field is a string and matching comparisonvalue is
- # a number we should respect the origin of the original field
- # and process comparison value as a string (see
- # https://github.com/ytdl-org/youtube-dl/issues/11082).
- or actual_value is not None and m.group('intval') is not None
- and isinstance(actual_value, compat_str)):
- if m.group('op') not in ('=', '!='):
- raise ValueError(
- 'Operator %s does not support string values!' % m.group('op'))
- comparison_value = m.group('quotedstrval') or m.group('strval') or m.group('intval')
- quote = m.group('quote')
- if quote is not None:
- comparison_value = comparison_value.replace(r'\%s' % quote, quote)
- else:
- try:
- comparison_value = int(m.group('intval'))
- except ValueError:
- comparison_value = parse_filesize(m.group('intval'))
- if comparison_value is None:
- comparison_value = parse_filesize(m.group('intval') + 'B')
- if comparison_value is None:
- raise ValueError(
- 'Invalid integer value %r in filter part %r' % (
- m.group('intval'), filter_part))
- if actual_value is None:
- return m.group('none_inclusive')
- return op(actual_value, comparison_value)
- UNARY_OPERATORS = {
- '': lambda v: (v is True) if isinstance(v, bool) else (v is not None),
- '!': lambda v: (v is False) if isinstance(v, bool) else (v is None),
- }
- operator_rex = re.compile(r'''(?x)\s*
- (?P<op>%s)\s*(?P<key>[a-z_]+)
- \s*$
- ''' % '|'.join(map(re.escape, UNARY_OPERATORS.keys())))
- m = operator_rex.search(filter_part)
- if m:
- op = UNARY_OPERATORS[m.group('op')]
- actual_value = dct.get(m.group('key'))
- return op(actual_value)
- raise ValueError('Invalid filter part %r' % filter_part)
- def match_str(filter_str, dct):
- """ Filter a dictionary with a simple string syntax. Returns True (=passes filter) or false """
- return all(
- _match_one(filter_part, dct) for filter_part in filter_str.split('&'))
- def match_filter_func(filter_str):
- def _match_func(info_dict):
- if match_str(filter_str, info_dict):
- return None
- else:
- video_title = info_dict.get('title', info_dict.get('id', 'video'))
- return '%s does not pass filter %s, skipping ..' % (video_title, filter_str)
- return _match_func
- def parse_dfxp_time_expr(time_expr):
- if not time_expr:
- return
- mobj = re.match(r'^(?P<time_offset>\d+(?:\.\d+)?)s?$', time_expr)
- if mobj:
- return float(mobj.group('time_offset'))
- mobj = re.match(r'^(\d+):(\d\d):(\d\d(?:(?:\.|:)\d+)?)$', time_expr)
- if mobj:
- return 3600 * int(mobj.group(1)) + 60 * int(mobj.group(2)) + float(mobj.group(3).replace(':', '.'))
- def srt_subtitles_timecode(seconds):
- return '%02d:%02d:%02d,%03d' % (seconds / 3600, (seconds % 3600) / 60, seconds % 60, (seconds % 1) * 1000)
- def dfxp2srt(dfxp_data):
- '''
- @param dfxp_data A bytes-like object containing DFXP data
- @returns A unicode object containing converted SRT data
- '''
- LEGACY_NAMESPACES = (
- (b'http://www.w3.org/ns/ttml', [
- b'http://www.w3.org/2004/11/ttaf1',
- b'http://www.w3.org/2006/04/ttaf1',
- b'http://www.w3.org/2006/10/ttaf1',
- ]),
- (b'http://www.w3.org/ns/ttml#styling', [
- b'http://www.w3.org/ns/ttml#style',
- ]),
- )
- SUPPORTED_STYLING = [
- 'color',
- 'fontFamily',
- 'fontSize',
- 'fontStyle',
- 'fontWeight',
- 'textDecoration'
- ]
- _x = functools.partial(xpath_with_ns, ns_map={
- 'xml': 'http://www.w3.org/XML/1998/namespace',
- 'ttml': 'http://www.w3.org/ns/ttml',
- 'tts': 'http://www.w3.org/ns/ttml#styling',
- })
- styles = {}
- default_style = {}
- class TTMLPElementParser(object):
- _out = ''
- _unclosed_elements = []
- _applied_styles = []
- def start(self, tag, attrib):
- if tag in (_x('ttml:br'), 'br'):
- self._out += '\n'
- else:
- unclosed_elements = []
- style = {}
- element_style_id = attrib.get('style')
- if default_style:
- style.update(default_style)
- if element_style_id:
- style.update(styles.get(element_style_id, {}))
- for prop in SUPPORTED_STYLING:
- prop_val = attrib.get(_x('tts:' + prop))
- if prop_val:
- style[prop] = prop_val
- if style:
- font = ''
- for k, v in sorted(style.items()):
- if self._applied_styles and self._applied_styles[-1].get(k) == v:
- continue
- if k == 'color':
- font += ' color="%s"' % v
- elif k == 'fontSize':
- font += ' size="%s"' % v
- elif k == 'fontFamily':
- font += ' face="%s"' % v
- elif k == 'fontWeight' and v == 'bold':
- self._out += '<b>'
- unclosed_elements.append('b')
- elif k == 'fontStyle' and v == 'italic':
- self._out += '<i>'
- unclosed_elements.append('i')
- elif k == 'textDecoration' and v == 'underline':
- self._out += '<u>'
- unclosed_elements.append('u')
- if font:
- self._out += '<font' + font + '>'
- unclosed_elements.append('font')
- applied_style = {}
- if self._applied_styles:
- applied_style.update(self._applied_styles[-1])
- applied_style.update(style)
- self._applied_styles.append(applied_style)
- self._unclosed_elements.append(unclosed_elements)
- def end(self, tag):
- if tag not in (_x('ttml:br'), 'br'):
- unclosed_elements = self._unclosed_elements.pop()
- for element in reversed(unclosed_elements):
- self._out += '</%s>' % element
- if unclosed_elements and self._applied_styles:
- self._applied_styles.pop()
- def data(self, data):
- self._out += data
- def close(self):
- return self._out.strip()
- def parse_node(node):
- target = TTMLPElementParser()
- parser = xml.etree.ElementTree.XMLParser(target=target)
- parser.feed(xml.etree.ElementTree.tostring(node))
- return parser.close()
- for k, v in LEGACY_NAMESPACES:
- for ns in v:
- dfxp_data = dfxp_data.replace(ns, k)
- dfxp = compat_etree_fromstring(dfxp_data)
- out = []
- paras = dfxp.findall(_x('.//ttml:p')) or dfxp.findall('.//p')
- if not paras:
- raise ValueError('Invalid dfxp/TTML subtitle')
- repeat = False
- while True:
- for style in dfxp.findall(_x('.//ttml:style')):
- style_id = style.get('id') or style.get(_x('xml:id'))
- if not style_id:
- continue
- parent_style_id = style.get('style')
- if parent_style_id:
- if parent_style_id not in styles:
- repeat = True
- continue
- styles[style_id] = styles[parent_style_id].copy()
- for prop in SUPPORTED_STYLING:
- prop_val = style.get(_x('tts:' + prop))
- if prop_val:
- styles.setdefault(style_id, {})[prop] = prop_val
- if repeat:
- repeat = False
- else:
- break
- for p in ('body', 'div'):
- ele = xpath_element(dfxp, [_x('.//ttml:' + p), './/' + p])
- if ele is None:
- continue
- style = styles.get(ele.get('style'))
- if not style:
- continue
- default_style.update(style)
- for para, index in zip(paras, itertools.count(1)):
- begin_time = parse_dfxp_time_expr(para.attrib.get('begin'))
- end_time = parse_dfxp_time_expr(para.attrib.get('end'))
- dur = parse_dfxp_time_expr(para.attrib.get('dur'))
- if begin_time is None:
- continue
- if not end_time:
- if not dur:
- continue
- end_time = begin_time + dur
- out.append('%d\n%s --> %s\n%s\n\n' % (
- index,
- srt_subtitles_timecode(begin_time),
- srt_subtitles_timecode(end_time),
- parse_node(para)))
- return ''.join(out)
- def cli_option(params, command_option, param):
- param = params.get(param)
- if param:
- param = compat_str(param)
- return [command_option, param] if param is not None else []
- def cli_bool_option(params, command_option, param, true_value='true', false_value='false', separator=None):
- param = params.get(param)
- if param is None:
- return []
- assert isinstance(param, bool)
- if separator:
- return [command_option + separator + (true_value if param else false_value)]
- return [command_option, true_value if param else false_value]
- def cli_valueless_option(params, command_option, param, expected_value=True):
- param = params.get(param)
- return [command_option] if param == expected_value else []
- def cli_configuration_args(params, param, default=[]):
- ex_args = params.get(param)
- if ex_args is None:
- return default
- assert isinstance(ex_args, list)
- return ex_args
- class ISO639Utils(object):
- # See http://www.loc.gov/standards/iso639-2/ISO-639-2_utf-8.txt
- _lang_map = {
- 'aa': 'aar',
- 'ab': 'abk',
- 'ae': 'ave',
- 'af': 'afr',
- 'ak': 'aka',
- 'am': 'amh',
- 'an': 'arg',
- 'ar': 'ara',
- 'as': 'asm',
- 'av': 'ava',
- 'ay': 'aym',
- 'az': 'aze',
- 'ba': 'bak',
- 'be': 'bel',
- 'bg': 'bul',
- 'bh': 'bih',
- 'bi': 'bis',
- 'bm': 'bam',
- 'bn': 'ben',
- 'bo': 'bod',
- 'br': 'bre',
- 'bs': 'bos',
- 'ca': 'cat',
- 'ce': 'che',
- 'ch': 'cha',
- 'co': 'cos',
- 'cr': 'cre',
- 'cs': 'ces',
- 'cu': 'chu',
- 'cv': 'chv',
- 'cy': 'cym',
- 'da': 'dan',
- 'de': 'deu',
- 'dv': 'div',
- 'dz': 'dzo',
- 'ee': 'ewe',
- 'el': 'ell',
- 'en': 'eng',
- 'eo': 'epo',
- 'es': 'spa',
- 'et': 'est',
- 'eu': 'eus',
- 'fa': 'fas',
- 'ff': 'ful',
- 'fi': 'fin',
- 'fj': 'fij',
- 'fo': 'fao',
- 'fr': 'fra',
- 'fy': 'fry',
- 'ga': 'gle',
- 'gd': 'gla',
- 'gl': 'glg',
- 'gn': 'grn',
- 'gu': 'guj',
- 'gv': 'glv',
- 'ha': 'hau',
- 'he': 'heb',
- 'iw': 'heb', # Replaced by he in 1989 revision
- 'hi': 'hin',
- 'ho': 'hmo',
- 'hr': 'hrv',
- 'ht': 'hat',
- 'hu': 'hun',
- 'hy': 'hye',
- 'hz': 'her',
- 'ia': 'ina',
- 'id': 'ind',
- 'in': 'ind', # Replaced by id in 1989 revision
- 'ie': 'ile',
- 'ig': 'ibo',
- 'ii': 'iii',
- 'ik': 'ipk',
- 'io': 'ido',
- 'is': 'isl',
- 'it': 'ita',
- 'iu': 'iku',
- 'ja': 'jpn',
- 'jv': 'jav',
- 'ka': 'kat',
- 'kg': 'kon',
- 'ki': 'kik',
- 'kj': 'kua',
- 'kk': 'kaz',
- 'kl': 'kal',
- 'km': 'khm',
- 'kn': 'kan',
- 'ko': 'kor',
- 'kr': 'kau',
- 'ks': 'kas',
- 'ku': 'kur',
- 'kv': 'kom',
- 'kw': 'cor',
- 'ky': 'kir',
- 'la': 'lat',
- 'lb': 'ltz',
- 'lg': 'lug',
- 'li': 'lim',
- 'ln': 'lin',
- 'lo': 'lao',
- 'lt': 'lit',
- 'lu': 'lub',
- 'lv': 'lav',
- 'mg': 'mlg',
- 'mh': 'mah',
- 'mi': 'mri',
- 'mk': 'mkd',
- 'ml': 'mal',
- 'mn': 'mon',
- 'mr': 'mar',
- 'ms': 'msa',
- 'mt': 'mlt',
- 'my': 'mya',
- 'na': 'nau',
- 'nb': 'nob',
- 'nd': 'nde',
- 'ne': 'nep',
- 'ng': 'ndo',
- 'nl': 'nld',
- 'nn': 'nno',
- 'no': 'nor',
- 'nr': 'nbl',
- 'nv': 'nav',
- 'ny': 'nya',
- 'oc': 'oci',
- 'oj': 'oji',
- 'om': 'orm',
- 'or': 'ori',
- 'os': 'oss',
- 'pa': 'pan',
- 'pi': 'pli',
- 'pl': 'pol',
- 'ps': 'pus',
- 'pt': 'por',
- 'qu': 'que',
- 'rm': 'roh',
- 'rn': 'run',
- 'ro': 'ron',
- 'ru': 'rus',
- 'rw': 'kin',
- 'sa': 'san',
- 'sc': 'srd',
- 'sd': 'snd',
- 'se': 'sme',
- 'sg': 'sag',
- 'si': 'sin',
- 'sk': 'slk',
- 'sl': 'slv',
- 'sm': 'smo',
- 'sn': 'sna',
- 'so': 'som',
- 'sq': 'sqi',
- 'sr': 'srp',
- 'ss': 'ssw',
- 'st': 'sot',
- 'su': 'sun',
- 'sv': 'swe',
- 'sw': 'swa',
- 'ta': 'tam',
- 'te': 'tel',
- 'tg': 'tgk',
- 'th': 'tha',
- 'ti': 'tir',
- 'tk': 'tuk',
- 'tl': 'tgl',
- 'tn': 'tsn',
- 'to': 'ton',
- 'tr': 'tur',
- 'ts': 'tso',
- 'tt': 'tat',
- 'tw': 'twi',
- 'ty': 'tah',
- 'ug': 'uig',
- 'uk': 'ukr',
- 'ur': 'urd',
- 'uz': 'uzb',
- 've': 'ven',
- 'vi': 'vie',
- 'vo': 'vol',
- 'wa': 'wln',
- 'wo': 'wol',
- 'xh': 'xho',
- 'yi': 'yid',
- 'ji': 'yid', # Replaced by yi in 1989 revision
- 'yo': 'yor',
- 'za': 'zha',
- 'zh': 'zho',
- 'zu': 'zul',
- }
- @classmethod
- def short2long(cls, code):
- """Convert language code from ISO 639-1 to ISO 639-2/T"""
- return cls._lang_map.get(code[:2])
- @classmethod
- def long2short(cls, code):
- """Convert language code from ISO 639-2/T to ISO 639-1"""
- for short_name, long_name in cls._lang_map.items():
- if long_name == code:
- return short_name
- class ISO3166Utils(object):
- # From http://data.okfn.org/data/core/country-list
- _country_map = {
- 'AF': 'Afghanistan',
- 'AX': 'Åland Islands',
- 'AL': 'Albania',
- 'DZ': 'Algeria',
- 'AS': 'American Samoa',
- 'AD': 'Andorra',
- 'AO': 'Angola',
- 'AI': 'Anguilla',
- 'AQ': 'Antarctica',
- 'AG': 'Antigua and Barbuda',
- 'AR': 'Argentina',
- 'AM': 'Armenia',
- 'AW': 'Aruba',
- 'AU': 'Australia',
- 'AT': 'Austria',
- 'AZ': 'Azerbaijan',
- 'BS': 'Bahamas',
- 'BH': 'Bahrain',
- 'BD': 'Bangladesh',
- 'BB': 'Barbados',
- 'BY': 'Belarus',
- 'BE': 'Belgium',
- 'BZ': 'Belize',
- 'BJ': 'Benin',
- 'BM': 'Bermuda',
- 'BT': 'Bhutan',
- 'BO': 'Bolivia, Plurinational State of',
- 'BQ': 'Bonaire, Sint Eustatius and Saba',
- 'BA': 'Bosnia and Herzegovina',
- 'BW': 'Botswana',
- 'BV': 'Bouvet Island',
- 'BR': 'Brazil',
- 'IO': 'British Indian Ocean Territory',
- 'BN': 'Brunei Darussalam',
- 'BG': 'Bulgaria',
- 'BF': 'Burkina Faso',
- 'BI': 'Burundi',
- 'KH': 'Cambodia',
- 'CM': 'Cameroon',
- 'CA': 'Canada',
- 'CV': 'Cape Verde',
- 'KY': 'Cayman Islands',
- 'CF': 'Central African Republic',
- 'TD': 'Chad',
- 'CL': 'Chile',
- 'CN': 'China',
- 'CX': 'Christmas Island',
- 'CC': 'Cocos (Keeling) Islands',
- 'CO': 'Colombia',
- 'KM': 'Comoros',
- 'CG': 'Congo',
- 'CD': 'Congo, the Democratic Republic of the',
- 'CK': 'Cook Islands',
- 'CR': 'Costa Rica',
- 'CI': 'Côte d\'Ivoire',
- 'HR': 'Croatia',
- 'CU': 'Cuba',
- 'CW': 'Curaçao',
- 'CY': 'Cyprus',
- 'CZ': 'Czech Republic',
- 'DK': 'Denmark',
- 'DJ': 'Djibouti',
- 'DM': 'Dominica',
- 'DO': 'Dominican Republic',
- 'EC': 'Ecuador',
- 'EG': 'Egypt',
- 'SV': 'El Salvador',
- 'GQ': 'Equatorial Guinea',
- 'ER': 'Eritrea',
- 'EE': 'Estonia',
- 'ET': 'Ethiopia',
- 'FK': 'Falkland Islands (Malvinas)',
- 'FO': 'Faroe Islands',
- 'FJ': 'Fiji',
- 'FI': 'Finland',
- 'FR': 'France',
- 'GF': 'French Guiana',
- 'PF': 'French Polynesia',
- 'TF': 'French Southern Territories',
- 'GA': 'Gabon',
- 'GM': 'Gambia',
- 'GE': 'Georgia',
- 'DE': 'Germany',
- 'GH': 'Ghana',
- 'GI': 'Gibraltar',
- 'GR': 'Greece',
- 'GL': 'Greenland',
- 'GD': 'Grenada',
- 'GP': 'Guadeloupe',
- 'GU': 'Guam',
- 'GT': 'Guatemala',
- 'GG': 'Guernsey',
- 'GN': 'Guinea',
- 'GW': 'Guinea-Bissau',
- 'GY': 'Guyana',
- 'HT': 'Haiti',
- 'HM': 'Heard Island and McDonald Islands',
- 'VA': 'Holy See (Vatican City State)',
- 'HN': 'Honduras',
- 'HK': 'Hong Kong',
- 'HU': 'Hungary',
- 'IS': 'Iceland',
- 'IN': 'India',
- 'ID': 'Indonesia',
- 'IR': 'Iran, Islamic Republic of',
- 'IQ': 'Iraq',
- 'IE': 'Ireland',
- 'IM': 'Isle of Man',
- 'IL': 'Israel',
- 'IT': 'Italy',
- 'JM': 'Jamaica',
- 'JP': 'Japan',
- 'JE': 'Jersey',
- 'JO': 'Jordan',
- 'KZ': 'Kazakhstan',
- 'KE': 'Kenya',
- 'KI': 'Kiribati',
- 'KP': 'Korea, Democratic People\'s Republic of',
- 'KR': 'Korea, Republic of',
- 'KW': 'Kuwait',
- 'KG': 'Kyrgyzstan',
- 'LA': 'Lao People\'s Democratic Republic',
- 'LV': 'Latvia',
- 'LB': 'Lebanon',
- 'LS': 'Lesotho',
- 'LR': 'Liberia',
- 'LY': 'Libya',
- 'LI': 'Liechtenstein',
- 'LT': 'Lithuania',
- 'LU': 'Luxembourg',
- 'MO': 'Macao',
- 'MK': 'Macedonia, the Former Yugoslav Republic of',
- 'MG': 'Madagascar',
- 'MW': 'Malawi',
- 'MY': 'Malaysia',
- 'MV': 'Maldives',
- 'ML': 'Mali',
- 'MT': 'Malta',
- 'MH': 'Marshall Islands',
- 'MQ': 'Martinique',
- 'MR': 'Mauritania',
- 'MU': 'Mauritius',
- 'YT': 'Mayotte',
- 'MX': 'Mexico',
- 'FM': 'Micronesia, Federated States of',
- 'MD': 'Moldova, Republic of',
- 'MC': 'Monaco',
- 'MN': 'Mongolia',
- 'ME': 'Montenegro',
- 'MS': 'Montserrat',
- 'MA': 'Morocco',
- 'MZ': 'Mozambique',
- 'MM': 'Myanmar',
- 'NA': 'Namibia',
- 'NR': 'Nauru',
- 'NP': 'Nepal',
- 'NL': 'Netherlands',
- 'NC': 'New Caledonia',
- 'NZ': 'New Zealand',
- 'NI': 'Nicaragua',
- 'NE': 'Niger',
- 'NG': 'Nigeria',
- 'NU': 'Niue',
- 'NF': 'Norfolk Island',
- 'MP': 'Northern Mariana Islands',
- 'NO': 'Norway',
- 'OM': 'Oman',
- 'PK': 'Pakistan',
- 'PW': 'Palau',
- 'PS': 'Palestine, State of',
- 'PA': 'Panama',
- 'PG': 'Papua New Guinea',
- 'PY': 'Paraguay',
- 'PE': 'Peru',
- 'PH': 'Philippines',
- 'PN': 'Pitcairn',
- 'PL': 'Poland',
- 'PT': 'Portugal',
- 'PR': 'Puerto Rico',
- 'QA': 'Qatar',
- 'RE': 'Réunion',
- 'RO': 'Romania',
- 'RU': 'Russian Federation',
- 'RW': 'Rwanda',
- 'BL': 'Saint Barthélemy',
- 'SH': 'Saint Helena, Ascension and Tristan da Cunha',
- 'KN': 'Saint Kitts and Nevis',
- 'LC': 'Saint Lucia',
- 'MF': 'Saint Martin (French part)',
- 'PM': 'Saint Pierre and Miquelon',
- 'VC': 'Saint Vincent and the Grenadines',
- 'WS': 'Samoa',
- 'SM': 'San Marino',
- 'ST': 'Sao Tome and Principe',
- 'SA': 'Saudi Arabia',
- 'SN': 'Senegal',
- 'RS': 'Serbia',
- 'SC': 'Seychelles',
- 'SL': 'Sierra Leone',
- 'SG': 'Singapore',
- 'SX': 'Sint Maarten (Dutch part)',
- 'SK': 'Slovakia',
- 'SI': 'Slovenia',
- 'SB': 'Solomon Islands',
- 'SO': 'Somalia',
- 'ZA': 'South Africa',
- 'GS': 'South Georgia and the South Sandwich Islands',
- 'SS': 'South Sudan',
- 'ES': 'Spain',
- 'LK': 'Sri Lanka',
- 'SD': 'Sudan',
- 'SR': 'Suriname',
- 'SJ': 'Svalbard and Jan Mayen',
- 'SZ': 'Swaziland',
- 'SE': 'Sweden',
- 'CH': 'Switzerland',
- 'SY': 'Syrian Arab Republic',
- 'TW': 'Taiwan, Province of China',
- 'TJ': 'Tajikistan',
- 'TZ': 'Tanzania, United Republic of',
- 'TH': 'Thailand',
- 'TL': 'Timor-Leste',
- 'TG': 'Togo',
- 'TK': 'Tokelau',
- 'TO': 'Tonga',
- 'TT': 'Trinidad and Tobago',
- 'TN': 'Tunisia',
- 'TR': 'Turkey',
- 'TM': 'Turkmenistan',
- 'TC': 'Turks and Caicos Islands',
- 'TV': 'Tuvalu',
- 'UG': 'Uganda',
- 'UA': 'Ukraine',
- 'AE': 'United Arab Emirates',
- 'GB': 'United Kingdom',
- 'US': 'United States',
- 'UM': 'United States Minor Outlying Islands',
- 'UY': 'Uruguay',
- 'UZ': 'Uzbekistan',
- 'VU': 'Vanuatu',
- 'VE': 'Venezuela, Bolivarian Republic of',
- 'VN': 'Viet Nam',
- 'VG': 'Virgin Islands, British',
- 'VI': 'Virgin Islands, U.S.',
- 'WF': 'Wallis and Futuna',
- 'EH': 'Western Sahara',
- 'YE': 'Yemen',
- 'ZM': 'Zambia',
- 'ZW': 'Zimbabwe',
- }
- @classmethod
- def short2full(cls, code):
- """Convert an ISO 3166-2 country code to the corresponding full name"""
- return cls._country_map.get(code.upper())
- class GeoUtils(object):
- # Major IPv4 address blocks per country
- _country_ip_map = {
- 'AD': '46.172.224.0/19',
- 'AE': '94.200.0.0/13',
- 'AF': '149.54.0.0/17',
- 'AG': '209.59.64.0/18',
- 'AI': '204.14.248.0/21',
- 'AL': '46.99.0.0/16',
- 'AM': '46.70.0.0/15',
- 'AO': '105.168.0.0/13',
- 'AP': '182.50.184.0/21',
- 'AQ': '23.154.160.0/24',
- 'AR': '181.0.0.0/12',
- 'AS': '202.70.112.0/20',
- 'AT': '77.116.0.0/14',
- 'AU': '1.128.0.0/11',
- 'AW': '181.41.0.0/18',
- 'AX': '185.217.4.0/22',
- 'AZ': '5.197.0.0/16',
- 'BA': '31.176.128.0/17',
- 'BB': '65.48.128.0/17',
- 'BD': '114.130.0.0/16',
- 'BE': '57.0.0.0/8',
- 'BF': '102.178.0.0/15',
- 'BG': '95.42.0.0/15',
- 'BH': '37.131.0.0/17',
- 'BI': '154.117.192.0/18',
- 'BJ': '137.255.0.0/16',
- 'BL': '185.212.72.0/23',
- 'BM': '196.12.64.0/18',
- 'BN': '156.31.0.0/16',
- 'BO': '161.56.0.0/16',
- 'BQ': '161.0.80.0/20',
- 'BR': '191.128.0.0/12',
- 'BS': '24.51.64.0/18',
- 'BT': '119.2.96.0/19',
- 'BW': '168.167.0.0/16',
- 'BY': '178.120.0.0/13',
- 'BZ': '179.42.192.0/18',
- 'CA': '99.224.0.0/11',
- 'CD': '41.243.0.0/16',
- 'CF': '197.242.176.0/21',
- 'CG': '160.113.0.0/16',
- 'CH': '85.0.0.0/13',
- 'CI': '102.136.0.0/14',
- 'CK': '202.65.32.0/19',
- 'CL': '152.172.0.0/14',
- 'CM': '102.244.0.0/14',
- 'CN': '36.128.0.0/10',
- 'CO': '181.240.0.0/12',
- 'CR': '201.192.0.0/12',
- 'CU': '152.206.0.0/15',
- 'CV': '165.90.96.0/19',
- 'CW': '190.88.128.0/17',
- 'CY': '31.153.0.0/16',
- 'CZ': '88.100.0.0/14',
- 'DE': '53.0.0.0/8',
- 'DJ': '197.241.0.0/17',
- 'DK': '87.48.0.0/12',
- 'DM': '192.243.48.0/20',
- 'DO': '152.166.0.0/15',
- 'DZ': '41.96.0.0/12',
- 'EC': '186.68.0.0/15',
- 'EE': '90.190.0.0/15',
- 'EG': '156.160.0.0/11',
- 'ER': '196.200.96.0/20',
- 'ES': '88.0.0.0/11',
- 'ET': '196.188.0.0/14',
- 'EU': '2.16.0.0/13',
- 'FI': '91.152.0.0/13',
- 'FJ': '144.120.0.0/16',
- 'FK': '80.73.208.0/21',
- 'FM': '119.252.112.0/20',
- 'FO': '88.85.32.0/19',
- 'FR': '90.0.0.0/9',
- 'GA': '41.158.0.0/15',
- 'GB': '25.0.0.0/8',
- 'GD': '74.122.88.0/21',
- 'GE': '31.146.0.0/16',
- 'GF': '161.22.64.0/18',
- 'GG': '62.68.160.0/19',
- 'GH': '154.160.0.0/12',
- 'GI': '95.164.0.0/16',
- 'GL': '88.83.0.0/19',
- 'GM': '160.182.0.0/15',
- 'GN': '197.149.192.0/18',
- 'GP': '104.250.0.0/19',
- 'GQ': '105.235.224.0/20',
- 'GR': '94.64.0.0/13',
- 'GT': '168.234.0.0/16',
- 'GU': '168.123.0.0/16',
- 'GW': '197.214.80.0/20',
- 'GY': '181.41.64.0/18',
- 'HK': '113.252.0.0/14',
- 'HN': '181.210.0.0/16',
- 'HR': '93.136.0.0/13',
- 'HT': '148.102.128.0/17',
- 'HU': '84.0.0.0/14',
- 'ID': '39.192.0.0/10',
- 'IE': '87.32.0.0/12',
- 'IL': '79.176.0.0/13',
- 'IM': '5.62.80.0/20',
- 'IN': '117.192.0.0/10',
- 'IO': '203.83.48.0/21',
- 'IQ': '37.236.0.0/14',
- 'IR': '2.176.0.0/12',
- 'IS': '82.221.0.0/16',
- 'IT': '79.0.0.0/10',
- 'JE': '87.244.64.0/18',
- 'JM': '72.27.0.0/17',
- 'JO': '176.29.0.0/16',
- 'JP': '133.0.0.0/8',
- 'KE': '105.48.0.0/12',
- 'KG': '158.181.128.0/17',
- 'KH': '36.37.128.0/17',
- 'KI': '103.25.140.0/22',
- 'KM': '197.255.224.0/20',
- 'KN': '198.167.192.0/19',
- 'KP': '175.45.176.0/22',
- 'KR': '175.192.0.0/10',
- 'KW': '37.36.0.0/14',
- 'KY': '64.96.0.0/15',
- 'KZ': '2.72.0.0/13',
- 'LA': '115.84.64.0/18',
- 'LB': '178.135.0.0/16',
- 'LC': '24.92.144.0/20',
- 'LI': '82.117.0.0/19',
- 'LK': '112.134.0.0/15',
- 'LR': '102.183.0.0/16',
- 'LS': '129.232.0.0/17',
- 'LT': '78.56.0.0/13',
- 'LU': '188.42.0.0/16',
- 'LV': '46.109.0.0/16',
- 'LY': '41.252.0.0/14',
- 'MA': '105.128.0.0/11',
- 'MC': '88.209.64.0/18',
- 'MD': '37.246.0.0/16',
- 'ME': '178.175.0.0/17',
- 'MF': '74.112.232.0/21',
- 'MG': '154.126.0.0/17',
- 'MH': '117.103.88.0/21',
- 'MK': '77.28.0.0/15',
- 'ML': '154.118.128.0/18',
- 'MM': '37.111.0.0/17',
- 'MN': '49.0.128.0/17',
- 'MO': '60.246.0.0/16',
- 'MP': '202.88.64.0/20',
- 'MQ': '109.203.224.0/19',
- 'MR': '41.188.64.0/18',
- 'MS': '208.90.112.0/22',
- 'MT': '46.11.0.0/16',
- 'MU': '105.16.0.0/12',
- 'MV': '27.114.128.0/18',
- 'MW': '102.70.0.0/15',
- 'MX': '187.192.0.0/11',
- 'MY': '175.136.0.0/13',
- 'MZ': '197.218.0.0/15',
- 'NA': '41.182.0.0/16',
- 'NC': '101.101.0.0/18',
- 'NE': '197.214.0.0/18',
- 'NF': '203.17.240.0/22',
- 'NG': '105.112.0.0/12',
- 'NI': '186.76.0.0/15',
- 'NL': '145.96.0.0/11',
- 'NO': '84.208.0.0/13',
- 'NP': '36.252.0.0/15',
- 'NR': '203.98.224.0/19',
- 'NU': '49.156.48.0/22',
- 'NZ': '49.224.0.0/14',
- 'OM': '5.36.0.0/15',
- 'PA': '186.72.0.0/15',
- 'PE': '186.160.0.0/14',
- 'PF': '123.50.64.0/18',
- 'PG': '124.240.192.0/19',
- 'PH': '49.144.0.0/13',
- 'PK': '39.32.0.0/11',
- 'PL': '83.0.0.0/11',
- 'PM': '70.36.0.0/20',
- 'PR': '66.50.0.0/16',
- 'PS': '188.161.0.0/16',
- 'PT': '85.240.0.0/13',
- 'PW': '202.124.224.0/20',
- 'PY': '181.120.0.0/14',
- 'QA': '37.210.0.0/15',
- 'RE': '102.35.0.0/16',
- 'RO': '79.112.0.0/13',
- 'RS': '93.86.0.0/15',
- 'RU': '5.136.0.0/13',
- 'RW': '41.186.0.0/16',
- 'SA': '188.48.0.0/13',
- 'SB': '202.1.160.0/19',
- 'SC': '154.192.0.0/11',
- 'SD': '102.120.0.0/13',
- 'SE': '78.64.0.0/12',
- 'SG': '8.128.0.0/10',
- 'SI': '188.196.0.0/14',
- 'SK': '78.98.0.0/15',
- 'SL': '102.143.0.0/17',
- 'SM': '89.186.32.0/19',
- 'SN': '41.82.0.0/15',
- 'SO': '154.115.192.0/18',
- 'SR': '186.179.128.0/17',
- 'SS': '105.235.208.0/21',
- 'ST': '197.159.160.0/19',
- 'SV': '168.243.0.0/16',
- 'SX': '190.102.0.0/20',
- 'SY': '5.0.0.0/16',
- 'SZ': '41.84.224.0/19',
- 'TC': '65.255.48.0/20',
- 'TD': '154.68.128.0/19',
- 'TG': '196.168.0.0/14',
- 'TH': '171.96.0.0/13',
- 'TJ': '85.9.128.0/18',
- 'TK': '27.96.24.0/21',
- 'TL': '180.189.160.0/20',
- 'TM': '95.85.96.0/19',
- 'TN': '197.0.0.0/11',
- 'TO': '175.176.144.0/21',
- 'TR': '78.160.0.0/11',
- 'TT': '186.44.0.0/15',
- 'TV': '202.2.96.0/19',
- 'TW': '120.96.0.0/11',
- 'TZ': '156.156.0.0/14',
- 'UA': '37.52.0.0/14',
- 'UG': '102.80.0.0/13',
- 'US': '6.0.0.0/8',
- 'UY': '167.56.0.0/13',
- 'UZ': '84.54.64.0/18',
- 'VA': '212.77.0.0/19',
- 'VC': '207.191.240.0/21',
- 'VE': '186.88.0.0/13',
- 'VG': '66.81.192.0/20',
- 'VI': '146.226.0.0/16',
- 'VN': '14.160.0.0/11',
- 'VU': '202.80.32.0/20',
- 'WF': '117.20.32.0/21',
- 'WS': '202.4.32.0/19',
- 'YE': '134.35.0.0/16',
- 'YT': '41.242.116.0/22',
- 'ZA': '41.0.0.0/11',
- 'ZM': '102.144.0.0/13',
- 'ZW': '102.177.192.0/18',
- }
- @classmethod
- def random_ipv4(cls, code_or_block):
- if len(code_or_block) == 2:
- block = cls._country_ip_map.get(code_or_block.upper())
- if not block:
- return None
- else:
- block = code_or_block
- addr, preflen = block.split('/')
- addr_min = compat_struct_unpack('!L', socket.inet_aton(addr))[0]
- addr_max = addr_min | (0xffffffff >> int(preflen))
- return compat_str(socket.inet_ntoa(
- compat_struct_pack('!L', random.randint(addr_min, addr_max))))
- class PerRequestProxyHandler(compat_urllib_request.ProxyHandler):
- def __init__(self, proxies=None):
- # Set default handlers
- for type in ('http', 'https'):
- setattr(self, '%s_open' % type,
- lambda r, proxy='__noproxy__', type=type, meth=self.proxy_open:
- meth(r, proxy, type))
- compat_urllib_request.ProxyHandler.__init__(self, proxies)
- def proxy_open(self, req, proxy, type):
- req_proxy = req.headers.get('Ytdl-request-proxy')
- if req_proxy is not None:
- proxy = req_proxy
- del req.headers['Ytdl-request-proxy']
- if proxy == '__noproxy__':
- return None # No Proxy
- if compat_urllib_parse.urlparse(proxy).scheme.lower() in ('socks', 'socks4', 'socks4a', 'socks5'):
- req.add_header('Ytdl-socks-proxy', proxy)
- # youtube-dl's http/https handlers do wrapping the socket with socks
- return None
- return compat_urllib_request.ProxyHandler.proxy_open(
- self, req, proxy, type)
- # Both long_to_bytes and bytes_to_long are adapted from PyCrypto, which is
- # released into Public Domain
- # https://github.com/dlitz/pycrypto/blob/master/lib/Crypto/Util/number.py#L387
- def long_to_bytes(n, blocksize=0):
- """long_to_bytes(n:long, blocksize:int) : string
- Convert a long integer to a byte string.
- If optional blocksize is given and greater than zero, pad the front of the
- byte string with binary zeros so that the length is a multiple of
- blocksize.
- """
- # after much testing, this algorithm was deemed to be the fastest
- s = b''
- n = int(n)
- while n > 0:
- s = compat_struct_pack('>I', n & 0xffffffff) + s
- n = n >> 32
- # strip off leading zeros
- for i in range(len(s)):
- if s[i] != b'\000'[0]:
- break
- else:
- # only happens when n == 0
- s = b'\000'
- i = 0
- s = s[i:]
- # add back some pad bytes. this could be done more efficiently w.r.t. the
- # de-padding being done above, but sigh...
- if blocksize > 0 and len(s) % blocksize:
- s = (blocksize - len(s) % blocksize) * b'\000' + s
- return s
- def bytes_to_long(s):
- """bytes_to_long(string) : long
- Convert a byte string to a long integer.
- This is (essentially) the inverse of long_to_bytes().
- """
- acc = 0
- length = len(s)
- if length % 4:
- extra = (4 - length % 4)
- s = b'\000' * extra + s
- length = length + extra
- for i in range(0, length, 4):
- acc = (acc << 32) + compat_struct_unpack('>I', s[i:i + 4])[0]
- return acc
- def ohdave_rsa_encrypt(data, exponent, modulus):
- '''
- Implement OHDave's RSA algorithm. See http://www.ohdave.com/rsa/
- Input:
- data: data to encrypt, bytes-like object
- exponent, modulus: parameter e and N of RSA algorithm, both integer
- Output: hex string of encrypted data
- Limitation: supports one block encryption only
- '''
- payload = int(binascii.hexlify(data[::-1]), 16)
- encrypted = pow(payload, exponent, modulus)
- return '%x' % encrypted
- def pkcs1pad(data, length):
- """
- Padding input data with PKCS#1 scheme
- @param {int[]} data input data
- @param {int} length target length
- @returns {int[]} padded data
- """
- if len(data) > length - 11:
- raise ValueError('Input data too long for PKCS#1 padding')
- pseudo_random = [random.randint(0, 254) for _ in range(length - len(data) - 3)]
- return [0, 2] + pseudo_random + [0] + data
- def encode_base_n(num, n, table=None):
- FULL_TABLE = '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
- if not table:
- table = FULL_TABLE[:n]
- if n > len(table):
- raise ValueError('base %d exceeds table length %d' % (n, len(table)))
- if num == 0:
- return table[0]
- ret = ''
- while num:
- ret = table[num % n] + ret
- num = num // n
- return ret
- def decode_packed_codes(code):
- mobj = re.search(PACKED_CODES_RE, code)
- obfuscated_code, base, count, symbols = mobj.groups()
- base = int(base)
- count = int(count)
- symbols = symbols.split('|')
- symbol_table = {}
- while count:
- count -= 1
- base_n_count = encode_base_n(count, base)
- symbol_table[base_n_count] = symbols[count] or base_n_count
- return re.sub(
- r'\b(\w+)\b', lambda mobj: symbol_table[mobj.group(0)],
- obfuscated_code)
- def caesar(s, alphabet, shift):
- if shift == 0:
- return s
- l = len(alphabet)
- return ''.join(
- alphabet[(alphabet.index(c) + shift) % l] if c in alphabet else c
- for c in s)
- def rot47(s):
- return caesar(s, r'''!"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefghijklmnopqrstuvwxyz{|}~''', 47)
- def parse_m3u8_attributes(attrib):
- info = {}
- for (key, val) in re.findall(r'(?P<key>[A-Z0-9-]+)=(?P<val>"[^"]+"|[^",]+)(?:,|$)', attrib):
- if val.startswith('"'):
- val = val[1:-1]
- info[key] = val
- return info
- def urshift(val, n):
- return val >> n if val >= 0 else (val + 0x100000000) >> n
- # Based on png2str() written by @gdkchan and improved by @yokrysty
- # Originally posted at https://github.com/ytdl-org/youtube-dl/issues/9706
- def decode_png(png_data):
- # Reference: https://www.w3.org/TR/PNG/
- header = png_data[8:]
- if png_data[:8] != b'\x89PNG\x0d\x0a\x1a\x0a' or header[4:8] != b'IHDR':
- raise IOError('Not a valid PNG file.')
- int_map = {1: '>B', 2: '>H', 4: '>I'}
- unpack_integer = lambda x: compat_struct_unpack(int_map[len(x)], x)[0]
- chunks = []
- while header:
- length = unpack_integer(header[:4])
- header = header[4:]
- chunk_type = header[:4]
- header = header[4:]
- chunk_data = header[:length]
- header = header[length:]
- header = header[4:] # Skip CRC
- chunks.append({
- 'type': chunk_type,
- 'length': length,
- 'data': chunk_data
- })
- ihdr = chunks[0]['data']
- width = unpack_integer(ihdr[:4])
- height = unpack_integer(ihdr[4:8])
- idat = b''
- for chunk in chunks:
- if chunk['type'] == b'IDAT':
- idat += chunk['data']
- if not idat:
- raise IOError('Unable to read PNG data.')
- decompressed_data = bytearray(zlib.decompress(idat))
- stride = width * 3
- pixels = []
- def _get_pixel(idx):
- x = idx % stride
- y = idx // stride
- return pixels[y][x]
- for y in range(height):
- basePos = y * (1 + stride)
- filter_type = decompressed_data[basePos]
- current_row = []
- pixels.append(current_row)
- for x in range(stride):
- color = decompressed_data[1 + basePos + x]
- basex = y * stride + x
- left = 0
- up = 0
- if x > 2:
- left = _get_pixel(basex - 3)
- if y > 0:
- up = _get_pixel(basex - stride)
- if filter_type == 1: # Sub
- color = (color + left) & 0xff
- elif filter_type == 2: # Up
- color = (color + up) & 0xff
- elif filter_type == 3: # Average
- color = (color + ((left + up) >> 1)) & 0xff
- elif filter_type == 4: # Paeth
- a = left
- b = up
- c = 0
- if x > 2 and y > 0:
- c = _get_pixel(basex - stride - 3)
- p = a + b - c
- pa = abs(p - a)
- pb = abs(p - b)
- pc = abs(p - c)
- if pa <= pb and pa <= pc:
- color = (color + a) & 0xff
- elif pb <= pc:
- color = (color + b) & 0xff
- else:
- color = (color + c) & 0xff
- current_row.append(color)
- return width, height, pixels
- def write_xattr(path, key, value):
- # This mess below finds the best xattr tool for the job
- try:
- # try the pyxattr module...
- import xattr
- if hasattr(xattr, 'set'): # pyxattr
- # Unicode arguments are not supported in python-pyxattr until
- # version 0.5.0
- # See https://github.com/ytdl-org/youtube-dl/issues/5498
- pyxattr_required_version = '0.5.0'
- if version_tuple(xattr.__version__) < version_tuple(pyxattr_required_version):
- # TODO: fallback to CLI tools
- raise XAttrUnavailableError(
- 'python-pyxattr is detected but is too old. '
- 'youtube-dl requires %s or above while your version is %s. '
- 'Falling back to other xattr implementations' % (
- pyxattr_required_version, xattr.__version__))
- setxattr = xattr.set
- else: # xattr
- setxattr = xattr.setxattr
- try:
- setxattr(path, key, value)
- except EnvironmentError as e:
- raise XAttrMetadataError(e.errno, e.strerror)
- except ImportError:
- if compat_os_name == 'nt':
- # Write xattrs to NTFS Alternate Data Streams:
- # http://en.wikipedia.org/wiki/NTFS#Alternate_data_streams_.28ADS.29
- assert ':' not in key
- assert os.path.exists(path)
- ads_fn = path + ':' + key
- try:
- with open(ads_fn, 'wb') as f:
- f.write(value)
- except EnvironmentError as e:
- raise XAttrMetadataError(e.errno, e.strerror)
- else:
- user_has_setfattr = check_executable('setfattr', ['--version'])
- user_has_xattr = check_executable('xattr', ['-h'])
- if user_has_setfattr or user_has_xattr:
- value = value.decode('utf-8')
- if user_has_setfattr:
- executable = 'setfattr'
- opts = ['-n', key, '-v', value]
- elif user_has_xattr:
- executable = 'xattr'
- opts = ['-w', key, value]
- cmd = ([encodeFilename(executable, True)]
- + [encodeArgument(o) for o in opts]
- + [encodeFilename(path, True)])
- try:
- p = subprocess.Popen(
- cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
- except EnvironmentError as e:
- raise XAttrMetadataError(e.errno, e.strerror)
- stdout, stderr = process_communicate_or_kill(p)
- stderr = stderr.decode('utf-8', 'replace')
- if p.returncode != 0:
- raise XAttrMetadataError(p.returncode, stderr)
- else:
- # On Unix, and can't find pyxattr, setfattr, or xattr.
- if sys.platform.startswith('linux'):
- raise XAttrUnavailableError(
- "Couldn't find a tool to set the xattrs. "
- "Install either the python 'pyxattr' or 'xattr' "
- "modules, or the GNU 'attr' package "
- "(which contains the 'setfattr' tool).")
- else:
- raise XAttrUnavailableError(
- "Couldn't find a tool to set the xattrs. "
- "Install either the python 'xattr' module, "
- "or the 'xattr' binary.")
- def random_birthday(year_field, month_field, day_field):
- start_date = datetime.date(1950, 1, 1)
- end_date = datetime.date(1995, 12, 31)
- offset = random.randint(0, (end_date - start_date).days)
- random_date = start_date + datetime.timedelta(offset)
- return {
- year_field: str(random_date.year),
- month_field: str(random_date.month),
- day_field: str(random_date.day),
- }
- def clean_podcast_url(url):
- return re.sub(r'''(?x)
- (?:
- (?:
- chtbl\.com/track|
- media\.blubrry\.com| # https://create.blubrry.com/resources/podcast-media-download-statistics/getting-started/
- play\.podtrac\.com
- )/[^/]+|
- (?:dts|www)\.podtrac\.com/(?:pts/)?redirect\.[0-9a-z]{3,4}| # http://analytics.podtrac.com/how-to-measure
- flex\.acast\.com|
- pd(?:
- cn\.co| # https://podcorn.com/analytics-prefix/
- st\.fm # https://podsights.com/docs/
- )/e
- )/''', '', url)
- if __debug__:
- # Raise TypeError if args can't be bound
- # needs compat owing to unstable inspect API, thanks PSF :-(
- try:
- inspect.signature
- def _try_bind_args(fn, *args, **kwargs):
- inspect.signature(fn).bind(*args, **kwargs)
- except AttributeError:
- # Py < 3.3
- def _try_bind_args(fn, *args, **kwargs):
- fn_args = inspect.getargspec(fn)
- # Py2: ArgInfo(args, varargs, keywords, defaults)
- # Py3: ArgSpec(args, varargs, keywords, defaults)
- if not fn_args.keywords:
- for k in kwargs:
- if k not in (fn_args.args or []):
- raise TypeError("got an unexpected keyword argument: '{0}'".format(k))
- if not fn_args.varargs:
- args_to_bind = len(args)
- bindable = len(fn_args.args or [])
- if args_to_bind > bindable:
- raise TypeError('too many positional arguments')
- bindable -= len(fn_args.defaults or [])
- if args_to_bind < bindable:
- if kwargs:
- bindable -= len(set(fn_args.args or []) & set(kwargs))
- if bindable > args_to_bind:
- raise TypeError("missing a required argument: '{0}'".format(fn_args.args[args_to_bind]))
- def traverse_obj(obj, *paths, **kwargs):
- """
- Safely traverse nested `dict`s and `Iterable`s
- >>> obj = [{}, {"key": "value"}]
- >>> traverse_obj(obj, (1, "key"))
- "value"
- Each of the provided `paths` is tested and the first producing a valid result will be returned.
- The next path will also be tested if the path branched but no results could be found.
- Supported values for traversal are `Mapping`, `Iterable` and `re.Match`.
- Unhelpful values (`{}`, `None`) are treated as the absence of a value and discarded.
- The paths will be wrapped in `variadic`, so that `'key'` is conveniently the same as `('key', )`.
- The keys in the path can be one of:
- - `None`: Return the current object.
- - `set`: Requires the only item in the set to be a type or function,
- like `{type}`/`{func}`. If a `type`, returns only values
- of this type. If a function, returns `func(obj)`.
- - `str`/`int`: Return `obj[key]`. For `re.Match`, return `obj.group(key)`.
- - `slice`: Branch out and return all values in `obj[key]`.
- - `Ellipsis`: Branch out and return a list of all values.
- - `tuple`/`list`: Branch out and return a list of all matching values.
- Read as: `[traverse_obj(obj, branch) for branch in branches]`.
- - `function`: Branch out and return values filtered by the function.
- Read as: `[value for key, value in obj if function(key, value)]`.
- For `Sequence`s, `key` is the index of the value.
- For `Iterable`s, `key` is the enumeration count of the value.
- For `re.Match`es, `key` is the group number (0 = full match)
- as well as additionally any group names, if given.
- - `dict` Transform the current object and return a matching dict.
- Read as: `{key: traverse_obj(obj, path) for key, path in dct.items()}`.
- `tuple`, `list`, and `dict` all support nested paths and branches.
- @params paths Paths which to traverse by.
- Keyword arguments:
- @param default Value to return if the paths do not match.
- If the last key in the path is a `dict`, it will apply to each value inside
- the dict instead, depth first. Try to avoid if using nested `dict` keys.
- @param expected_type If a `type`, only accept final values of this type.
- If any other callable, try to call the function on each result.
- If the last key in the path is a `dict`, it will apply to each value inside
- the dict instead, recursively. This does respect branching paths.
- @param get_all If `False`, return the first matching result, otherwise all matching ones.
- @param casesense If `False`, consider string dictionary keys as case insensitive.
- The following are only meant to be used by YoutubeDL.prepare_outtmpl and are not part of the API
- @param _is_user_input Whether the keys are generated from user input.
- If `True` strings get converted to `int`/`slice` if needed.
- @param _traverse_string Whether to traverse into objects as strings.
- If `True`, any non-compatible object will first be
- converted into a string and then traversed into.
- The return value of that path will be a string instead,
- not respecting any further branching.
- @returns The result of the object traversal.
- If successful, `get_all=True`, and the path branches at least once,
- then a list of results is returned instead.
- A list is always returned if the last path branches and no `default` is given.
- If a path ends on a `dict` that result will always be a `dict`.
- """
- # parameter defaults
- default = kwargs.get('default', NO_DEFAULT)
- expected_type = kwargs.get('expected_type')
- get_all = kwargs.get('get_all', True)
- casesense = kwargs.get('casesense', True)
- _is_user_input = kwargs.get('_is_user_input', False)
- _traverse_string = kwargs.get('_traverse_string', False)
- # instant compat
- str = compat_str
- casefold = lambda k: compat_casefold(k) if isinstance(k, str) else k
- if isinstance(expected_type, type):
- type_test = lambda val: val if isinstance(val, expected_type) else None
- else:
- type_test = lambda val: try_call(expected_type or IDENTITY, args=(val,))
- def lookup_or_none(v, k, getter=None):
- try:
- return getter(v, k) if getter else v[k]
- except IndexError:
- return None
- def from_iterable(iterables):
- # chain.from_iterable(['ABC', 'DEF']) --> A B C D E F
- for it in iterables:
- for item in it:
- yield item
- def apply_key(key, obj, is_last):
- branching = False
- if obj is None and _traverse_string:
- if key is Ellipsis or callable(key) or isinstance(key, slice):
- branching = True
- result = ()
- else:
- result = None
- elif key is None:
- result = obj
- elif isinstance(key, set):
- assert len(key) == 1, 'Set should only be used to wrap a single item'
- item = next(iter(key))
- if isinstance(item, type):
- result = obj if isinstance(obj, item) else None
- else:
- result = try_call(item, args=(obj,))
- elif isinstance(key, (list, tuple)):
- branching = True
- result = from_iterable(
- apply_path(obj, branch, is_last)[0] for branch in key)
- elif key is Ellipsis:
- branching = True
- if isinstance(obj, compat_collections_abc.Mapping):
- result = obj.values()
- elif is_iterable_like(obj):
- result = obj
- elif isinstance(obj, compat_re_Match):
- result = obj.groups()
- elif _traverse_string:
- branching = False
- result = str(obj)
- else:
- result = ()
- elif callable(key):
- branching = True
- if isinstance(obj, compat_collections_abc.Mapping):
- iter_obj = obj.items()
- elif is_iterable_like(obj):
- iter_obj = enumerate(obj)
- elif isinstance(obj, compat_re_Match):
- iter_obj = itertools.chain(
- enumerate(itertools.chain((obj.group(),), obj.groups())),
- obj.groupdict().items())
- elif _traverse_string:
- branching = False
- iter_obj = enumerate(str(obj))
- else:
- iter_obj = ()
- result = (v for k, v in iter_obj if try_call(key, args=(k, v)))
- if not branching: # string traversal
- result = ''.join(result)
- elif isinstance(key, dict):
- iter_obj = ((k, _traverse_obj(obj, v, False, is_last)) for k, v in key.items())
- result = dict((k, v if v is not None else default) for k, v in iter_obj
- if v is not None or default is not NO_DEFAULT) or None
- elif isinstance(obj, compat_collections_abc.Mapping):
- result = (try_call(obj.get, args=(key,))
- if casesense or try_call(obj.__contains__, args=(key,))
- else next((v for k, v in obj.items() if casefold(k) == key), None))
- elif isinstance(obj, compat_re_Match):
- result = None
- if isinstance(key, int) or casesense:
- # Py 2.6 doesn't have methods in the Match class/type
- result = lookup_or_none(obj, key, getter=lambda _, k: obj.group(k))
- elif isinstance(key, str):
- result = next((v for k, v in obj.groupdict().items()
- if casefold(k) == key), None)
- else:
- result = None
- if isinstance(key, (int, slice)):
- if is_iterable_like(obj, compat_collections_abc.Sequence):
- branching = isinstance(key, slice)
- result = lookup_or_none(obj, key)
- elif _traverse_string:
- result = lookup_or_none(str(obj), key)
- return branching, result if branching else (result,)
- def lazy_last(iterable):
- iterator = iter(iterable)
- prev = next(iterator, NO_DEFAULT)
- if prev is NO_DEFAULT:
- return
- for item in iterator:
- yield False, prev
- prev = item
- yield True, prev
- def apply_path(start_obj, path, test_type):
- objs = (start_obj,)
- has_branched = False
- key = None
- for last, key in lazy_last(variadic(path, (str, bytes, dict, set))):
- if _is_user_input and isinstance(key, str):
- if key == ':':
- key = Ellipsis
- elif ':' in key:
- key = slice(*map(int_or_none, key.split(':')))
- elif int_or_none(key) is not None:
- key = int(key)
- if not casesense and isinstance(key, str):
- key = compat_casefold(key)
- if __debug__ and callable(key):
- # Verify function signature
- _try_bind_args(key, None, None)
- new_objs = []
- for obj in objs:
- branching, results = apply_key(key, obj, last)
- has_branched |= branching
- new_objs.append(results)
- objs = from_iterable(new_objs)
- if test_type and not isinstance(key, (dict, list, tuple)):
- objs = map(type_test, objs)
- return objs, has_branched, isinstance(key, dict)
- def _traverse_obj(obj, path, allow_empty, test_type):
- results, has_branched, is_dict = apply_path(obj, path, test_type)
- results = LazyList(x for x in results if x not in (None, {}))
- if get_all and has_branched:
- if results:
- return results.exhaust()
- if allow_empty:
- return [] if default is NO_DEFAULT else default
- return None
- return results[0] if results else {} if allow_empty and is_dict else None
- for index, path in enumerate(paths, 1):
- result = _traverse_obj(obj, path, index == len(paths), True)
- if result is not None:
- return result
- return None if default is NO_DEFAULT else default
- def T(x):
- """ For use in yt-dl instead of {type} or set((type,)) """
- return set((x,))
- def get_first(obj, keys, **kwargs):
- return traverse_obj(obj, (Ellipsis,) + tuple(variadic(keys)), get_all=False, **kwargs)
- def join_nonempty(*values, **kwargs):
- # parameter defaults
- delim = kwargs.get('delim', '-')
- from_dict = kwargs.get('from_dict')
- if from_dict is not None:
- values = (traverse_obj(from_dict, variadic(v)) for v in values)
- return delim.join(map(compat_str, filter(None, values)))