logo

oasis-root

Compiled tree of Oasis Linux based on own branch at <https://hacktivis.me/git/oasis/> git clone https://anongit.hacktivis.me/git/oasis-root.git

zipfile.py (88531B)


  1. """
  2. Read and write ZIP files.
  3. XXX references to utf-8 need further investigation.
  4. """
  5. import binascii
  6. import importlib.util
  7. import io
  8. import itertools
  9. import os
  10. import posixpath
  11. import shutil
  12. import stat
  13. import struct
  14. import sys
  15. import threading
  16. import time
  17. import contextlib
  18. import pathlib
  19. try:
  20. import zlib # We may need its compression method
  21. crc32 = zlib.crc32
  22. except ImportError:
  23. zlib = None
  24. crc32 = binascii.crc32
  25. try:
  26. import bz2 # We may need its compression method
  27. except ImportError:
  28. bz2 = None
  29. try:
  30. import lzma # We may need its compression method
  31. except ImportError:
  32. lzma = None
  33. __all__ = ["BadZipFile", "BadZipfile", "error",
  34. "ZIP_STORED", "ZIP_DEFLATED", "ZIP_BZIP2", "ZIP_LZMA",
  35. "is_zipfile", "ZipInfo", "ZipFile", "PyZipFile", "LargeZipFile",
  36. "Path"]
  37. class BadZipFile(Exception):
  38. pass
  39. class LargeZipFile(Exception):
  40. """
  41. Raised when writing a zipfile, the zipfile requires ZIP64 extensions
  42. and those extensions are disabled.
  43. """
  44. error = BadZipfile = BadZipFile # Pre-3.2 compatibility names
  45. ZIP64_LIMIT = (1 << 31) - 1
  46. ZIP_FILECOUNT_LIMIT = (1 << 16) - 1
  47. ZIP_MAX_COMMENT = (1 << 16) - 1
  48. # constants for Zip file compression methods
  49. ZIP_STORED = 0
  50. ZIP_DEFLATED = 8
  51. ZIP_BZIP2 = 12
  52. ZIP_LZMA = 14
  53. # Other ZIP compression methods not supported
  54. DEFAULT_VERSION = 20
  55. ZIP64_VERSION = 45
  56. BZIP2_VERSION = 46
  57. LZMA_VERSION = 63
  58. # we recognize (but not necessarily support) all features up to that version
  59. MAX_EXTRACT_VERSION = 63
  60. # Below are some formats and associated data for reading/writing headers using
  61. # the struct module. The names and structures of headers/records are those used
  62. # in the PKWARE description of the ZIP file format:
  63. # http://www.pkware.com/documents/casestudies/APPNOTE.TXT
  64. # (URL valid as of January 2008)
  65. # The "end of central directory" structure, magic number, size, and indices
  66. # (section V.I in the format document)
  67. structEndArchive = b"<4s4H2LH"
  68. stringEndArchive = b"PK\005\006"
  69. sizeEndCentDir = struct.calcsize(structEndArchive)
  70. _ECD_SIGNATURE = 0
  71. _ECD_DISK_NUMBER = 1
  72. _ECD_DISK_START = 2
  73. _ECD_ENTRIES_THIS_DISK = 3
  74. _ECD_ENTRIES_TOTAL = 4
  75. _ECD_SIZE = 5
  76. _ECD_OFFSET = 6
  77. _ECD_COMMENT_SIZE = 7
  78. # These last two indices are not part of the structure as defined in the
  79. # spec, but they are used internally by this module as a convenience
  80. _ECD_COMMENT = 8
  81. _ECD_LOCATION = 9
  82. # The "central directory" structure, magic number, size, and indices
  83. # of entries in the structure (section V.F in the format document)
  84. structCentralDir = "<4s4B4HL2L5H2L"
  85. stringCentralDir = b"PK\001\002"
  86. sizeCentralDir = struct.calcsize(structCentralDir)
  87. # indexes of entries in the central directory structure
  88. _CD_SIGNATURE = 0
  89. _CD_CREATE_VERSION = 1
  90. _CD_CREATE_SYSTEM = 2
  91. _CD_EXTRACT_VERSION = 3
  92. _CD_EXTRACT_SYSTEM = 4
  93. _CD_FLAG_BITS = 5
  94. _CD_COMPRESS_TYPE = 6
  95. _CD_TIME = 7
  96. _CD_DATE = 8
  97. _CD_CRC = 9
  98. _CD_COMPRESSED_SIZE = 10
  99. _CD_UNCOMPRESSED_SIZE = 11
  100. _CD_FILENAME_LENGTH = 12
  101. _CD_EXTRA_FIELD_LENGTH = 13
  102. _CD_COMMENT_LENGTH = 14
  103. _CD_DISK_NUMBER_START = 15
  104. _CD_INTERNAL_FILE_ATTRIBUTES = 16
  105. _CD_EXTERNAL_FILE_ATTRIBUTES = 17
  106. _CD_LOCAL_HEADER_OFFSET = 18
  107. # The "local file header" structure, magic number, size, and indices
  108. # (section V.A in the format document)
  109. structFileHeader = "<4s2B4HL2L2H"
  110. stringFileHeader = b"PK\003\004"
  111. sizeFileHeader = struct.calcsize(structFileHeader)
  112. _FH_SIGNATURE = 0
  113. _FH_EXTRACT_VERSION = 1
  114. _FH_EXTRACT_SYSTEM = 2
  115. _FH_GENERAL_PURPOSE_FLAG_BITS = 3
  116. _FH_COMPRESSION_METHOD = 4
  117. _FH_LAST_MOD_TIME = 5
  118. _FH_LAST_MOD_DATE = 6
  119. _FH_CRC = 7
  120. _FH_COMPRESSED_SIZE = 8
  121. _FH_UNCOMPRESSED_SIZE = 9
  122. _FH_FILENAME_LENGTH = 10
  123. _FH_EXTRA_FIELD_LENGTH = 11
  124. # The "Zip64 end of central directory locator" structure, magic number, and size
  125. structEndArchive64Locator = "<4sLQL"
  126. stringEndArchive64Locator = b"PK\x06\x07"
  127. sizeEndCentDir64Locator = struct.calcsize(structEndArchive64Locator)
  128. # The "Zip64 end of central directory" record, magic number, size, and indices
  129. # (section V.G in the format document)
  130. structEndArchive64 = "<4sQ2H2L4Q"
  131. stringEndArchive64 = b"PK\x06\x06"
  132. sizeEndCentDir64 = struct.calcsize(structEndArchive64)
  133. _CD64_SIGNATURE = 0
  134. _CD64_DIRECTORY_RECSIZE = 1
  135. _CD64_CREATE_VERSION = 2
  136. _CD64_EXTRACT_VERSION = 3
  137. _CD64_DISK_NUMBER = 4
  138. _CD64_DISK_NUMBER_START = 5
  139. _CD64_NUMBER_ENTRIES_THIS_DISK = 6
  140. _CD64_NUMBER_ENTRIES_TOTAL = 7
  141. _CD64_DIRECTORY_SIZE = 8
  142. _CD64_OFFSET_START_CENTDIR = 9
  143. _DD_SIGNATURE = 0x08074b50
  144. _EXTRA_FIELD_STRUCT = struct.Struct('<HH')
  145. def _strip_extra(extra, xids):
  146. # Remove Extra Fields with specified IDs.
  147. unpack = _EXTRA_FIELD_STRUCT.unpack
  148. modified = False
  149. buffer = []
  150. start = i = 0
  151. while i + 4 <= len(extra):
  152. xid, xlen = unpack(extra[i : i + 4])
  153. j = i + 4 + xlen
  154. if xid in xids:
  155. if i != start:
  156. buffer.append(extra[start : i])
  157. start = j
  158. modified = True
  159. i = j
  160. if not modified:
  161. return extra
  162. return b''.join(buffer)
  163. def _check_zipfile(fp):
  164. try:
  165. if _EndRecData(fp):
  166. return True # file has correct magic number
  167. except OSError:
  168. pass
  169. return False
  170. def is_zipfile(filename):
  171. """Quickly see if a file is a ZIP file by checking the magic number.
  172. The filename argument may be a file or file-like object too.
  173. """
  174. result = False
  175. try:
  176. if hasattr(filename, "read"):
  177. result = _check_zipfile(fp=filename)
  178. else:
  179. with open(filename, "rb") as fp:
  180. result = _check_zipfile(fp)
  181. except OSError:
  182. pass
  183. return result
  184. def _EndRecData64(fpin, offset, endrec):
  185. """
  186. Read the ZIP64 end-of-archive records and use that to update endrec
  187. """
  188. try:
  189. fpin.seek(offset - sizeEndCentDir64Locator, 2)
  190. except OSError:
  191. # If the seek fails, the file is not large enough to contain a ZIP64
  192. # end-of-archive record, so just return the end record we were given.
  193. return endrec
  194. data = fpin.read(sizeEndCentDir64Locator)
  195. if len(data) != sizeEndCentDir64Locator:
  196. return endrec
  197. sig, diskno, reloff, disks = struct.unpack(structEndArchive64Locator, data)
  198. if sig != stringEndArchive64Locator:
  199. return endrec
  200. if diskno != 0 or disks > 1:
  201. raise BadZipFile("zipfiles that span multiple disks are not supported")
  202. # Assume no 'zip64 extensible data'
  203. fpin.seek(offset - sizeEndCentDir64Locator - sizeEndCentDir64, 2)
  204. data = fpin.read(sizeEndCentDir64)
  205. if len(data) != sizeEndCentDir64:
  206. return endrec
  207. sig, sz, create_version, read_version, disk_num, disk_dir, \
  208. dircount, dircount2, dirsize, diroffset = \
  209. struct.unpack(structEndArchive64, data)
  210. if sig != stringEndArchive64:
  211. return endrec
  212. # Update the original endrec using data from the ZIP64 record
  213. endrec[_ECD_SIGNATURE] = sig
  214. endrec[_ECD_DISK_NUMBER] = disk_num
  215. endrec[_ECD_DISK_START] = disk_dir
  216. endrec[_ECD_ENTRIES_THIS_DISK] = dircount
  217. endrec[_ECD_ENTRIES_TOTAL] = dircount2
  218. endrec[_ECD_SIZE] = dirsize
  219. endrec[_ECD_OFFSET] = diroffset
  220. return endrec
  221. def _EndRecData(fpin):
  222. """Return data from the "End of Central Directory" record, or None.
  223. The data is a list of the nine items in the ZIP "End of central dir"
  224. record followed by a tenth item, the file seek offset of this record."""
  225. # Determine file size
  226. fpin.seek(0, 2)
  227. filesize = fpin.tell()
  228. # Check to see if this is ZIP file with no archive comment (the
  229. # "end of central directory" structure should be the last item in the
  230. # file if this is the case).
  231. try:
  232. fpin.seek(-sizeEndCentDir, 2)
  233. except OSError:
  234. return None
  235. data = fpin.read()
  236. if (len(data) == sizeEndCentDir and
  237. data[0:4] == stringEndArchive and
  238. data[-2:] == b"\000\000"):
  239. # the signature is correct and there's no comment, unpack structure
  240. endrec = struct.unpack(structEndArchive, data)
  241. endrec=list(endrec)
  242. # Append a blank comment and record start offset
  243. endrec.append(b"")
  244. endrec.append(filesize - sizeEndCentDir)
  245. # Try to read the "Zip64 end of central directory" structure
  246. return _EndRecData64(fpin, -sizeEndCentDir, endrec)
  247. # Either this is not a ZIP file, or it is a ZIP file with an archive
  248. # comment. Search the end of the file for the "end of central directory"
  249. # record signature. The comment is the last item in the ZIP file and may be
  250. # up to 64K long. It is assumed that the "end of central directory" magic
  251. # number does not appear in the comment.
  252. maxCommentStart = max(filesize - (1 << 16) - sizeEndCentDir, 0)
  253. fpin.seek(maxCommentStart, 0)
  254. data = fpin.read()
  255. start = data.rfind(stringEndArchive)
  256. if start >= 0:
  257. # found the magic number; attempt to unpack and interpret
  258. recData = data[start:start+sizeEndCentDir]
  259. if len(recData) != sizeEndCentDir:
  260. # Zip file is corrupted.
  261. return None
  262. endrec = list(struct.unpack(structEndArchive, recData))
  263. commentSize = endrec[_ECD_COMMENT_SIZE] #as claimed by the zip file
  264. comment = data[start+sizeEndCentDir:start+sizeEndCentDir+commentSize]
  265. endrec.append(comment)
  266. endrec.append(maxCommentStart + start)
  267. # Try to read the "Zip64 end of central directory" structure
  268. return _EndRecData64(fpin, maxCommentStart + start - filesize,
  269. endrec)
  270. # Unable to find a valid end of central directory structure
  271. return None
  272. class ZipInfo (object):
  273. """Class with attributes describing each file in the ZIP archive."""
  274. __slots__ = (
  275. 'orig_filename',
  276. 'filename',
  277. 'date_time',
  278. 'compress_type',
  279. '_compresslevel',
  280. 'comment',
  281. 'extra',
  282. 'create_system',
  283. 'create_version',
  284. 'extract_version',
  285. 'reserved',
  286. 'flag_bits',
  287. 'volume',
  288. 'internal_attr',
  289. 'external_attr',
  290. 'header_offset',
  291. 'CRC',
  292. 'compress_size',
  293. 'file_size',
  294. '_raw_time',
  295. )
  296. def __init__(self, filename="NoName", date_time=(1980,1,1,0,0,0)):
  297. self.orig_filename = filename # Original file name in archive
  298. # Terminate the file name at the first null byte. Null bytes in file
  299. # names are used as tricks by viruses in archives.
  300. null_byte = filename.find(chr(0))
  301. if null_byte >= 0:
  302. filename = filename[0:null_byte]
  303. # This is used to ensure paths in generated ZIP files always use
  304. # forward slashes as the directory separator, as required by the
  305. # ZIP format specification.
  306. if os.sep != "/" and os.sep in filename:
  307. filename = filename.replace(os.sep, "/")
  308. self.filename = filename # Normalized file name
  309. self.date_time = date_time # year, month, day, hour, min, sec
  310. if date_time[0] < 1980:
  311. raise ValueError('ZIP does not support timestamps before 1980')
  312. # Standard values:
  313. self.compress_type = ZIP_STORED # Type of compression for the file
  314. self._compresslevel = None # Level for the compressor
  315. self.comment = b"" # Comment for each file
  316. self.extra = b"" # ZIP extra data
  317. if sys.platform == 'win32':
  318. self.create_system = 0 # System which created ZIP archive
  319. else:
  320. # Assume everything else is unix-y
  321. self.create_system = 3 # System which created ZIP archive
  322. self.create_version = DEFAULT_VERSION # Version which created ZIP archive
  323. self.extract_version = DEFAULT_VERSION # Version needed to extract archive
  324. self.reserved = 0 # Must be zero
  325. self.flag_bits = 0 # ZIP flag bits
  326. self.volume = 0 # Volume number of file header
  327. self.internal_attr = 0 # Internal attributes
  328. self.external_attr = 0 # External file attributes
  329. self.compress_size = 0 # Size of the compressed file
  330. self.file_size = 0 # Size of the uncompressed file
  331. # Other attributes are set by class ZipFile:
  332. # header_offset Byte offset to the file header
  333. # CRC CRC-32 of the uncompressed file
  334. def __repr__(self):
  335. result = ['<%s filename=%r' % (self.__class__.__name__, self.filename)]
  336. if self.compress_type != ZIP_STORED:
  337. result.append(' compress_type=%s' %
  338. compressor_names.get(self.compress_type,
  339. self.compress_type))
  340. hi = self.external_attr >> 16
  341. lo = self.external_attr & 0xFFFF
  342. if hi:
  343. result.append(' filemode=%r' % stat.filemode(hi))
  344. if lo:
  345. result.append(' external_attr=%#x' % lo)
  346. isdir = self.is_dir()
  347. if not isdir or self.file_size:
  348. result.append(' file_size=%r' % self.file_size)
  349. if ((not isdir or self.compress_size) and
  350. (self.compress_type != ZIP_STORED or
  351. self.file_size != self.compress_size)):
  352. result.append(' compress_size=%r' % self.compress_size)
  353. result.append('>')
  354. return ''.join(result)
  355. def FileHeader(self, zip64=None):
  356. """Return the per-file header as a bytes object."""
  357. dt = self.date_time
  358. dosdate = (dt[0] - 1980) << 9 | dt[1] << 5 | dt[2]
  359. dostime = dt[3] << 11 | dt[4] << 5 | (dt[5] // 2)
  360. if self.flag_bits & 0x08:
  361. # Set these to zero because we write them after the file data
  362. CRC = compress_size = file_size = 0
  363. else:
  364. CRC = self.CRC
  365. compress_size = self.compress_size
  366. file_size = self.file_size
  367. extra = self.extra
  368. min_version = 0
  369. if zip64 is None:
  370. zip64 = file_size > ZIP64_LIMIT or compress_size > ZIP64_LIMIT
  371. if zip64:
  372. fmt = '<HHQQ'
  373. extra = extra + struct.pack(fmt,
  374. 1, struct.calcsize(fmt)-4, file_size, compress_size)
  375. if file_size > ZIP64_LIMIT or compress_size > ZIP64_LIMIT:
  376. if not zip64:
  377. raise LargeZipFile("Filesize would require ZIP64 extensions")
  378. # File is larger than what fits into a 4 byte integer,
  379. # fall back to the ZIP64 extension
  380. file_size = 0xffffffff
  381. compress_size = 0xffffffff
  382. min_version = ZIP64_VERSION
  383. if self.compress_type == ZIP_BZIP2:
  384. min_version = max(BZIP2_VERSION, min_version)
  385. elif self.compress_type == ZIP_LZMA:
  386. min_version = max(LZMA_VERSION, min_version)
  387. self.extract_version = max(min_version, self.extract_version)
  388. self.create_version = max(min_version, self.create_version)
  389. filename, flag_bits = self._encodeFilenameFlags()
  390. header = struct.pack(structFileHeader, stringFileHeader,
  391. self.extract_version, self.reserved, flag_bits,
  392. self.compress_type, dostime, dosdate, CRC,
  393. compress_size, file_size,
  394. len(filename), len(extra))
  395. return header + filename + extra
  396. def _encodeFilenameFlags(self):
  397. try:
  398. return self.filename.encode('ascii'), self.flag_bits
  399. except UnicodeEncodeError:
  400. return self.filename.encode('utf-8'), self.flag_bits | 0x800
  401. def _decodeExtra(self):
  402. # Try to decode the extra field.
  403. extra = self.extra
  404. unpack = struct.unpack
  405. while len(extra) >= 4:
  406. tp, ln = unpack('<HH', extra[:4])
  407. if ln+4 > len(extra):
  408. raise BadZipFile("Corrupt extra field %04x (size=%d)" % (tp, ln))
  409. if tp == 0x0001:
  410. data = extra[4:ln+4]
  411. # ZIP64 extension (large files and/or large archives)
  412. try:
  413. if self.file_size in (0xFFFF_FFFF_FFFF_FFFF, 0xFFFF_FFFF):
  414. field = "File size"
  415. self.file_size, = unpack('<Q', data[:8])
  416. data = data[8:]
  417. if self.compress_size == 0xFFFF_FFFF:
  418. field = "Compress size"
  419. self.compress_size, = unpack('<Q', data[:8])
  420. data = data[8:]
  421. if self.header_offset == 0xFFFF_FFFF:
  422. field = "Header offset"
  423. self.header_offset, = unpack('<Q', data[:8])
  424. except struct.error:
  425. raise BadZipFile(f"Corrupt zip64 extra field. "
  426. f"{field} not found.") from None
  427. extra = extra[ln+4:]
  428. @classmethod
  429. def from_file(cls, filename, arcname=None, *, strict_timestamps=True):
  430. """Construct an appropriate ZipInfo for a file on the filesystem.
  431. filename should be the path to a file or directory on the filesystem.
  432. arcname is the name which it will have within the archive (by default,
  433. this will be the same as filename, but without a drive letter and with
  434. leading path separators removed).
  435. """
  436. if isinstance(filename, os.PathLike):
  437. filename = os.fspath(filename)
  438. st = os.stat(filename)
  439. isdir = stat.S_ISDIR(st.st_mode)
  440. mtime = time.localtime(st.st_mtime)
  441. date_time = mtime[0:6]
  442. if not strict_timestamps and date_time[0] < 1980:
  443. date_time = (1980, 1, 1, 0, 0, 0)
  444. elif not strict_timestamps and date_time[0] > 2107:
  445. date_time = (2107, 12, 31, 23, 59, 59)
  446. # Create ZipInfo instance to store file information
  447. if arcname is None:
  448. arcname = filename
  449. arcname = os.path.normpath(os.path.splitdrive(arcname)[1])
  450. while arcname[0] in (os.sep, os.altsep):
  451. arcname = arcname[1:]
  452. if isdir:
  453. arcname += '/'
  454. zinfo = cls(arcname, date_time)
  455. zinfo.external_attr = (st.st_mode & 0xFFFF) << 16 # Unix attributes
  456. if isdir:
  457. zinfo.file_size = 0
  458. zinfo.external_attr |= 0x10 # MS-DOS directory flag
  459. else:
  460. zinfo.file_size = st.st_size
  461. return zinfo
  462. def is_dir(self):
  463. """Return True if this archive member is a directory."""
  464. return self.filename[-1] == '/'
  465. # ZIP encryption uses the CRC32 one-byte primitive for scrambling some
  466. # internal keys. We noticed that a direct implementation is faster than
  467. # relying on binascii.crc32().
  468. _crctable = None
  469. def _gen_crc(crc):
  470. for j in range(8):
  471. if crc & 1:
  472. crc = (crc >> 1) ^ 0xEDB88320
  473. else:
  474. crc >>= 1
  475. return crc
  476. # ZIP supports a password-based form of encryption. Even though known
  477. # plaintext attacks have been found against it, it is still useful
  478. # to be able to get data out of such a file.
  479. #
  480. # Usage:
  481. # zd = _ZipDecrypter(mypwd)
  482. # plain_bytes = zd(cypher_bytes)
  483. def _ZipDecrypter(pwd):
  484. key0 = 305419896
  485. key1 = 591751049
  486. key2 = 878082192
  487. global _crctable
  488. if _crctable is None:
  489. _crctable = list(map(_gen_crc, range(256)))
  490. crctable = _crctable
  491. def crc32(ch, crc):
  492. """Compute the CRC32 primitive on one byte."""
  493. return (crc >> 8) ^ crctable[(crc ^ ch) & 0xFF]
  494. def update_keys(c):
  495. nonlocal key0, key1, key2
  496. key0 = crc32(c, key0)
  497. key1 = (key1 + (key0 & 0xFF)) & 0xFFFFFFFF
  498. key1 = (key1 * 134775813 + 1) & 0xFFFFFFFF
  499. key2 = crc32(key1 >> 24, key2)
  500. for p in pwd:
  501. update_keys(p)
  502. def decrypter(data):
  503. """Decrypt a bytes object."""
  504. result = bytearray()
  505. append = result.append
  506. for c in data:
  507. k = key2 | 2
  508. c ^= ((k * (k^1)) >> 8) & 0xFF
  509. update_keys(c)
  510. append(c)
  511. return bytes(result)
  512. return decrypter
  513. class LZMACompressor:
  514. def __init__(self):
  515. self._comp = None
  516. def _init(self):
  517. props = lzma._encode_filter_properties({'id': lzma.FILTER_LZMA1})
  518. self._comp = lzma.LZMACompressor(lzma.FORMAT_RAW, filters=[
  519. lzma._decode_filter_properties(lzma.FILTER_LZMA1, props)
  520. ])
  521. return struct.pack('<BBH', 9, 4, len(props)) + props
  522. def compress(self, data):
  523. if self._comp is None:
  524. return self._init() + self._comp.compress(data)
  525. return self._comp.compress(data)
  526. def flush(self):
  527. if self._comp is None:
  528. return self._init() + self._comp.flush()
  529. return self._comp.flush()
  530. class LZMADecompressor:
  531. def __init__(self):
  532. self._decomp = None
  533. self._unconsumed = b''
  534. self.eof = False
  535. def decompress(self, data):
  536. if self._decomp is None:
  537. self._unconsumed += data
  538. if len(self._unconsumed) <= 4:
  539. return b''
  540. psize, = struct.unpack('<H', self._unconsumed[2:4])
  541. if len(self._unconsumed) <= 4 + psize:
  542. return b''
  543. self._decomp = lzma.LZMADecompressor(lzma.FORMAT_RAW, filters=[
  544. lzma._decode_filter_properties(lzma.FILTER_LZMA1,
  545. self._unconsumed[4:4 + psize])
  546. ])
  547. data = self._unconsumed[4 + psize:]
  548. del self._unconsumed
  549. result = self._decomp.decompress(data)
  550. self.eof = self._decomp.eof
  551. return result
  552. compressor_names = {
  553. 0: 'store',
  554. 1: 'shrink',
  555. 2: 'reduce',
  556. 3: 'reduce',
  557. 4: 'reduce',
  558. 5: 'reduce',
  559. 6: 'implode',
  560. 7: 'tokenize',
  561. 8: 'deflate',
  562. 9: 'deflate64',
  563. 10: 'implode',
  564. 12: 'bzip2',
  565. 14: 'lzma',
  566. 18: 'terse',
  567. 19: 'lz77',
  568. 97: 'wavpack',
  569. 98: 'ppmd',
  570. }
  571. def _check_compression(compression):
  572. if compression == ZIP_STORED:
  573. pass
  574. elif compression == ZIP_DEFLATED:
  575. if not zlib:
  576. raise RuntimeError(
  577. "Compression requires the (missing) zlib module")
  578. elif compression == ZIP_BZIP2:
  579. if not bz2:
  580. raise RuntimeError(
  581. "Compression requires the (missing) bz2 module")
  582. elif compression == ZIP_LZMA:
  583. if not lzma:
  584. raise RuntimeError(
  585. "Compression requires the (missing) lzma module")
  586. else:
  587. raise NotImplementedError("That compression method is not supported")
  588. def _get_compressor(compress_type, compresslevel=None):
  589. if compress_type == ZIP_DEFLATED:
  590. if compresslevel is not None:
  591. return zlib.compressobj(compresslevel, zlib.DEFLATED, -15)
  592. return zlib.compressobj(zlib.Z_DEFAULT_COMPRESSION, zlib.DEFLATED, -15)
  593. elif compress_type == ZIP_BZIP2:
  594. if compresslevel is not None:
  595. return bz2.BZ2Compressor(compresslevel)
  596. return bz2.BZ2Compressor()
  597. # compresslevel is ignored for ZIP_LZMA
  598. elif compress_type == ZIP_LZMA:
  599. return LZMACompressor()
  600. else:
  601. return None
  602. def _get_decompressor(compress_type):
  603. _check_compression(compress_type)
  604. if compress_type == ZIP_STORED:
  605. return None
  606. elif compress_type == ZIP_DEFLATED:
  607. return zlib.decompressobj(-15)
  608. elif compress_type == ZIP_BZIP2:
  609. return bz2.BZ2Decompressor()
  610. elif compress_type == ZIP_LZMA:
  611. return LZMADecompressor()
  612. else:
  613. descr = compressor_names.get(compress_type)
  614. if descr:
  615. raise NotImplementedError("compression type %d (%s)" % (compress_type, descr))
  616. else:
  617. raise NotImplementedError("compression type %d" % (compress_type,))
  618. class _SharedFile:
  619. def __init__(self, file, pos, close, lock, writing):
  620. self._file = file
  621. self._pos = pos
  622. self._close = close
  623. self._lock = lock
  624. self._writing = writing
  625. self.seekable = file.seekable
  626. self.tell = file.tell
  627. def seek(self, offset, whence=0):
  628. with self._lock:
  629. if self._writing():
  630. raise ValueError("Can't reposition in the ZIP file while "
  631. "there is an open writing handle on it. "
  632. "Close the writing handle before trying to read.")
  633. self._file.seek(offset, whence)
  634. self._pos = self._file.tell()
  635. return self._pos
  636. def read(self, n=-1):
  637. with self._lock:
  638. if self._writing():
  639. raise ValueError("Can't read from the ZIP file while there "
  640. "is an open writing handle on it. "
  641. "Close the writing handle before trying to read.")
  642. self._file.seek(self._pos)
  643. data = self._file.read(n)
  644. self._pos = self._file.tell()
  645. return data
  646. def close(self):
  647. if self._file is not None:
  648. fileobj = self._file
  649. self._file = None
  650. self._close(fileobj)
  651. # Provide the tell method for unseekable stream
  652. class _Tellable:
  653. def __init__(self, fp):
  654. self.fp = fp
  655. self.offset = 0
  656. def write(self, data):
  657. n = self.fp.write(data)
  658. self.offset += n
  659. return n
  660. def tell(self):
  661. return self.offset
  662. def flush(self):
  663. self.fp.flush()
  664. def close(self):
  665. self.fp.close()
  666. class ZipExtFile(io.BufferedIOBase):
  667. """File-like object for reading an archive member.
  668. Is returned by ZipFile.open().
  669. """
  670. # Max size supported by decompressor.
  671. MAX_N = 1 << 31 - 1
  672. # Read from compressed files in 4k blocks.
  673. MIN_READ_SIZE = 4096
  674. # Chunk size to read during seek
  675. MAX_SEEK_READ = 1 << 24
  676. def __init__(self, fileobj, mode, zipinfo, pwd=None,
  677. close_fileobj=False):
  678. self._fileobj = fileobj
  679. self._pwd = pwd
  680. self._close_fileobj = close_fileobj
  681. self._compress_type = zipinfo.compress_type
  682. self._compress_left = zipinfo.compress_size
  683. self._left = zipinfo.file_size
  684. self._decompressor = _get_decompressor(self._compress_type)
  685. self._eof = False
  686. self._readbuffer = b''
  687. self._offset = 0
  688. self.newlines = None
  689. self.mode = mode
  690. self.name = zipinfo.filename
  691. if hasattr(zipinfo, 'CRC'):
  692. self._expected_crc = zipinfo.CRC
  693. self._running_crc = crc32(b'')
  694. else:
  695. self._expected_crc = None
  696. self._seekable = False
  697. try:
  698. if fileobj.seekable():
  699. self._orig_compress_start = fileobj.tell()
  700. self._orig_compress_size = zipinfo.compress_size
  701. self._orig_file_size = zipinfo.file_size
  702. self._orig_start_crc = self._running_crc
  703. self._seekable = True
  704. except AttributeError:
  705. pass
  706. self._decrypter = None
  707. if pwd:
  708. if zipinfo.flag_bits & 0x8:
  709. # compare against the file type from extended local headers
  710. check_byte = (zipinfo._raw_time >> 8) & 0xff
  711. else:
  712. # compare against the CRC otherwise
  713. check_byte = (zipinfo.CRC >> 24) & 0xff
  714. h = self._init_decrypter()
  715. if h != check_byte:
  716. raise RuntimeError("Bad password for file %r" % zipinfo.orig_filename)
  717. def _init_decrypter(self):
  718. self._decrypter = _ZipDecrypter(self._pwd)
  719. # The first 12 bytes in the cypher stream is an encryption header
  720. # used to strengthen the algorithm. The first 11 bytes are
  721. # completely random, while the 12th contains the MSB of the CRC,
  722. # or the MSB of the file time depending on the header type
  723. # and is used to check the correctness of the password.
  724. header = self._fileobj.read(12)
  725. self._compress_left -= 12
  726. return self._decrypter(header)[11]
  727. def __repr__(self):
  728. result = ['<%s.%s' % (self.__class__.__module__,
  729. self.__class__.__qualname__)]
  730. if not self.closed:
  731. result.append(' name=%r mode=%r' % (self.name, self.mode))
  732. if self._compress_type != ZIP_STORED:
  733. result.append(' compress_type=%s' %
  734. compressor_names.get(self._compress_type,
  735. self._compress_type))
  736. else:
  737. result.append(' [closed]')
  738. result.append('>')
  739. return ''.join(result)
  740. def readline(self, limit=-1):
  741. """Read and return a line from the stream.
  742. If limit is specified, at most limit bytes will be read.
  743. """
  744. if limit < 0:
  745. # Shortcut common case - newline found in buffer.
  746. i = self._readbuffer.find(b'\n', self._offset) + 1
  747. if i > 0:
  748. line = self._readbuffer[self._offset: i]
  749. self._offset = i
  750. return line
  751. return io.BufferedIOBase.readline(self, limit)
  752. def peek(self, n=1):
  753. """Returns buffered bytes without advancing the position."""
  754. if n > len(self._readbuffer) - self._offset:
  755. chunk = self.read(n)
  756. if len(chunk) > self._offset:
  757. self._readbuffer = chunk + self._readbuffer[self._offset:]
  758. self._offset = 0
  759. else:
  760. self._offset -= len(chunk)
  761. # Return up to 512 bytes to reduce allocation overhead for tight loops.
  762. return self._readbuffer[self._offset: self._offset + 512]
  763. def readable(self):
  764. if self.closed:
  765. raise ValueError("I/O operation on closed file.")
  766. return True
  767. def read(self, n=-1):
  768. """Read and return up to n bytes.
  769. If the argument is omitted, None, or negative, data is read and returned until EOF is reached.
  770. """
  771. if self.closed:
  772. raise ValueError("read from closed file.")
  773. if n is None or n < 0:
  774. buf = self._readbuffer[self._offset:]
  775. self._readbuffer = b''
  776. self._offset = 0
  777. while not self._eof:
  778. buf += self._read1(self.MAX_N)
  779. return buf
  780. end = n + self._offset
  781. if end < len(self._readbuffer):
  782. buf = self._readbuffer[self._offset:end]
  783. self._offset = end
  784. return buf
  785. n = end - len(self._readbuffer)
  786. buf = self._readbuffer[self._offset:]
  787. self._readbuffer = b''
  788. self._offset = 0
  789. while n > 0 and not self._eof:
  790. data = self._read1(n)
  791. if n < len(data):
  792. self._readbuffer = data
  793. self._offset = n
  794. buf += data[:n]
  795. break
  796. buf += data
  797. n -= len(data)
  798. return buf
  799. def _update_crc(self, newdata):
  800. # Update the CRC using the given data.
  801. if self._expected_crc is None:
  802. # No need to compute the CRC if we don't have a reference value
  803. return
  804. self._running_crc = crc32(newdata, self._running_crc)
  805. # Check the CRC if we're at the end of the file
  806. if self._eof and self._running_crc != self._expected_crc:
  807. raise BadZipFile("Bad CRC-32 for file %r" % self.name)
  808. def read1(self, n):
  809. """Read up to n bytes with at most one read() system call."""
  810. if n is None or n < 0:
  811. buf = self._readbuffer[self._offset:]
  812. self._readbuffer = b''
  813. self._offset = 0
  814. while not self._eof:
  815. data = self._read1(self.MAX_N)
  816. if data:
  817. buf += data
  818. break
  819. return buf
  820. end = n + self._offset
  821. if end < len(self._readbuffer):
  822. buf = self._readbuffer[self._offset:end]
  823. self._offset = end
  824. return buf
  825. n = end - len(self._readbuffer)
  826. buf = self._readbuffer[self._offset:]
  827. self._readbuffer = b''
  828. self._offset = 0
  829. if n > 0:
  830. while not self._eof:
  831. data = self._read1(n)
  832. if n < len(data):
  833. self._readbuffer = data
  834. self._offset = n
  835. buf += data[:n]
  836. break
  837. if data:
  838. buf += data
  839. break
  840. return buf
  841. def _read1(self, n):
  842. # Read up to n compressed bytes with at most one read() system call,
  843. # decrypt and decompress them.
  844. if self._eof or n <= 0:
  845. return b''
  846. # Read from file.
  847. if self._compress_type == ZIP_DEFLATED:
  848. ## Handle unconsumed data.
  849. data = self._decompressor.unconsumed_tail
  850. if n > len(data):
  851. data += self._read2(n - len(data))
  852. else:
  853. data = self._read2(n)
  854. if self._compress_type == ZIP_STORED:
  855. self._eof = self._compress_left <= 0
  856. elif self._compress_type == ZIP_DEFLATED:
  857. n = max(n, self.MIN_READ_SIZE)
  858. data = self._decompressor.decompress(data, n)
  859. self._eof = (self._decompressor.eof or
  860. self._compress_left <= 0 and
  861. not self._decompressor.unconsumed_tail)
  862. if self._eof:
  863. data += self._decompressor.flush()
  864. else:
  865. data = self._decompressor.decompress(data)
  866. self._eof = self._decompressor.eof or self._compress_left <= 0
  867. data = data[:self._left]
  868. self._left -= len(data)
  869. if self._left <= 0:
  870. self._eof = True
  871. self._update_crc(data)
  872. return data
  873. def _read2(self, n):
  874. if self._compress_left <= 0:
  875. return b''
  876. n = max(n, self.MIN_READ_SIZE)
  877. n = min(n, self._compress_left)
  878. data = self._fileobj.read(n)
  879. self._compress_left -= len(data)
  880. if not data:
  881. raise EOFError
  882. if self._decrypter is not None:
  883. data = self._decrypter(data)
  884. return data
  885. def close(self):
  886. try:
  887. if self._close_fileobj:
  888. self._fileobj.close()
  889. finally:
  890. super().close()
  891. def seekable(self):
  892. if self.closed:
  893. raise ValueError("I/O operation on closed file.")
  894. return self._seekable
  895. def seek(self, offset, whence=0):
  896. if self.closed:
  897. raise ValueError("seek on closed file.")
  898. if not self._seekable:
  899. raise io.UnsupportedOperation("underlying stream is not seekable")
  900. curr_pos = self.tell()
  901. if whence == 0: # Seek from start of file
  902. new_pos = offset
  903. elif whence == 1: # Seek from current position
  904. new_pos = curr_pos + offset
  905. elif whence == 2: # Seek from EOF
  906. new_pos = self._orig_file_size + offset
  907. else:
  908. raise ValueError("whence must be os.SEEK_SET (0), "
  909. "os.SEEK_CUR (1), or os.SEEK_END (2)")
  910. if new_pos > self._orig_file_size:
  911. new_pos = self._orig_file_size
  912. if new_pos < 0:
  913. new_pos = 0
  914. read_offset = new_pos - curr_pos
  915. buff_offset = read_offset + self._offset
  916. if buff_offset >= 0 and buff_offset < len(self._readbuffer):
  917. # Just move the _offset index if the new position is in the _readbuffer
  918. self._offset = buff_offset
  919. read_offset = 0
  920. elif read_offset < 0:
  921. # Position is before the current position. Reset the ZipExtFile
  922. self._fileobj.seek(self._orig_compress_start)
  923. self._running_crc = self._orig_start_crc
  924. self._compress_left = self._orig_compress_size
  925. self._left = self._orig_file_size
  926. self._readbuffer = b''
  927. self._offset = 0
  928. self._decompressor = _get_decompressor(self._compress_type)
  929. self._eof = False
  930. read_offset = new_pos
  931. if self._decrypter is not None:
  932. self._init_decrypter()
  933. while read_offset > 0:
  934. read_len = min(self.MAX_SEEK_READ, read_offset)
  935. self.read(read_len)
  936. read_offset -= read_len
  937. return self.tell()
  938. def tell(self):
  939. if self.closed:
  940. raise ValueError("tell on closed file.")
  941. if not self._seekable:
  942. raise io.UnsupportedOperation("underlying stream is not seekable")
  943. filepos = self._orig_file_size - self._left - len(self._readbuffer) + self._offset
  944. return filepos
  945. class _ZipWriteFile(io.BufferedIOBase):
  946. def __init__(self, zf, zinfo, zip64):
  947. self._zinfo = zinfo
  948. self._zip64 = zip64
  949. self._zipfile = zf
  950. self._compressor = _get_compressor(zinfo.compress_type,
  951. zinfo._compresslevel)
  952. self._file_size = 0
  953. self._compress_size = 0
  954. self._crc = 0
  955. @property
  956. def _fileobj(self):
  957. return self._zipfile.fp
  958. def writable(self):
  959. return True
  960. def write(self, data):
  961. if self.closed:
  962. raise ValueError('I/O operation on closed file.')
  963. nbytes = len(data)
  964. self._file_size += nbytes
  965. self._crc = crc32(data, self._crc)
  966. if self._compressor:
  967. data = self._compressor.compress(data)
  968. self._compress_size += len(data)
  969. self._fileobj.write(data)
  970. return nbytes
  971. def close(self):
  972. if self.closed:
  973. return
  974. try:
  975. super().close()
  976. # Flush any data from the compressor, and update header info
  977. if self._compressor:
  978. buf = self._compressor.flush()
  979. self._compress_size += len(buf)
  980. self._fileobj.write(buf)
  981. self._zinfo.compress_size = self._compress_size
  982. else:
  983. self._zinfo.compress_size = self._file_size
  984. self._zinfo.CRC = self._crc
  985. self._zinfo.file_size = self._file_size
  986. # Write updated header info
  987. if self._zinfo.flag_bits & 0x08:
  988. # Write CRC and file sizes after the file data
  989. fmt = '<LLQQ' if self._zip64 else '<LLLL'
  990. self._fileobj.write(struct.pack(fmt, _DD_SIGNATURE, self._zinfo.CRC,
  991. self._zinfo.compress_size, self._zinfo.file_size))
  992. self._zipfile.start_dir = self._fileobj.tell()
  993. else:
  994. if not self._zip64:
  995. if self._file_size > ZIP64_LIMIT:
  996. raise RuntimeError(
  997. 'File size unexpectedly exceeded ZIP64 limit')
  998. if self._compress_size > ZIP64_LIMIT:
  999. raise RuntimeError(
  1000. 'Compressed size unexpectedly exceeded ZIP64 limit')
  1001. # Seek backwards and write file header (which will now include
  1002. # correct CRC and file sizes)
  1003. # Preserve current position in file
  1004. self._zipfile.start_dir = self._fileobj.tell()
  1005. self._fileobj.seek(self._zinfo.header_offset)
  1006. self._fileobj.write(self._zinfo.FileHeader(self._zip64))
  1007. self._fileobj.seek(self._zipfile.start_dir)
  1008. # Successfully written: Add file to our caches
  1009. self._zipfile.filelist.append(self._zinfo)
  1010. self._zipfile.NameToInfo[self._zinfo.filename] = self._zinfo
  1011. finally:
  1012. self._zipfile._writing = False
  1013. class ZipFile:
  1014. """ Class with methods to open, read, write, close, list zip files.
  1015. z = ZipFile(file, mode="r", compression=ZIP_STORED, allowZip64=True,
  1016. compresslevel=None)
  1017. file: Either the path to the file, or a file-like object.
  1018. If it is a path, the file will be opened and closed by ZipFile.
  1019. mode: The mode can be either read 'r', write 'w', exclusive create 'x',
  1020. or append 'a'.
  1021. compression: ZIP_STORED (no compression), ZIP_DEFLATED (requires zlib),
  1022. ZIP_BZIP2 (requires bz2) or ZIP_LZMA (requires lzma).
  1023. allowZip64: if True ZipFile will create files with ZIP64 extensions when
  1024. needed, otherwise it will raise an exception when this would
  1025. be necessary.
  1026. compresslevel: None (default for the given compression type) or an integer
  1027. specifying the level to pass to the compressor.
  1028. When using ZIP_STORED or ZIP_LZMA this keyword has no effect.
  1029. When using ZIP_DEFLATED integers 0 through 9 are accepted.
  1030. When using ZIP_BZIP2 integers 1 through 9 are accepted.
  1031. """
  1032. fp = None # Set here since __del__ checks it
  1033. _windows_illegal_name_trans_table = None
  1034. def __init__(self, file, mode="r", compression=ZIP_STORED, allowZip64=True,
  1035. compresslevel=None, *, strict_timestamps=True):
  1036. """Open the ZIP file with mode read 'r', write 'w', exclusive create 'x',
  1037. or append 'a'."""
  1038. if mode not in ('r', 'w', 'x', 'a'):
  1039. raise ValueError("ZipFile requires mode 'r', 'w', 'x', or 'a'")
  1040. _check_compression(compression)
  1041. self._allowZip64 = allowZip64
  1042. self._didModify = False
  1043. self.debug = 0 # Level of printing: 0 through 3
  1044. self.NameToInfo = {} # Find file info given name
  1045. self.filelist = [] # List of ZipInfo instances for archive
  1046. self.compression = compression # Method of compression
  1047. self.compresslevel = compresslevel
  1048. self.mode = mode
  1049. self.pwd = None
  1050. self._comment = b''
  1051. self._strict_timestamps = strict_timestamps
  1052. # Check if we were passed a file-like object
  1053. if isinstance(file, os.PathLike):
  1054. file = os.fspath(file)
  1055. if isinstance(file, str):
  1056. # No, it's a filename
  1057. self._filePassed = 0
  1058. self.filename = file
  1059. modeDict = {'r' : 'rb', 'w': 'w+b', 'x': 'x+b', 'a' : 'r+b',
  1060. 'r+b': 'w+b', 'w+b': 'wb', 'x+b': 'xb'}
  1061. filemode = modeDict[mode]
  1062. while True:
  1063. try:
  1064. self.fp = io.open(file, filemode)
  1065. except OSError:
  1066. if filemode in modeDict:
  1067. filemode = modeDict[filemode]
  1068. continue
  1069. raise
  1070. break
  1071. else:
  1072. self._filePassed = 1
  1073. self.fp = file
  1074. self.filename = getattr(file, 'name', None)
  1075. self._fileRefCnt = 1
  1076. self._lock = threading.RLock()
  1077. self._seekable = True
  1078. self._writing = False
  1079. try:
  1080. if mode == 'r':
  1081. self._RealGetContents()
  1082. elif mode in ('w', 'x'):
  1083. # set the modified flag so central directory gets written
  1084. # even if no files are added to the archive
  1085. self._didModify = True
  1086. try:
  1087. self.start_dir = self.fp.tell()
  1088. except (AttributeError, OSError):
  1089. self.fp = _Tellable(self.fp)
  1090. self.start_dir = 0
  1091. self._seekable = False
  1092. else:
  1093. # Some file-like objects can provide tell() but not seek()
  1094. try:
  1095. self.fp.seek(self.start_dir)
  1096. except (AttributeError, OSError):
  1097. self._seekable = False
  1098. elif mode == 'a':
  1099. try:
  1100. # See if file is a zip file
  1101. self._RealGetContents()
  1102. # seek to start of directory and overwrite
  1103. self.fp.seek(self.start_dir)
  1104. except BadZipFile:
  1105. # file is not a zip file, just append
  1106. self.fp.seek(0, 2)
  1107. # set the modified flag so central directory gets written
  1108. # even if no files are added to the archive
  1109. self._didModify = True
  1110. self.start_dir = self.fp.tell()
  1111. else:
  1112. raise ValueError("Mode must be 'r', 'w', 'x', or 'a'")
  1113. except:
  1114. fp = self.fp
  1115. self.fp = None
  1116. self._fpclose(fp)
  1117. raise
  1118. def __enter__(self):
  1119. return self
  1120. def __exit__(self, type, value, traceback):
  1121. self.close()
  1122. def __repr__(self):
  1123. result = ['<%s.%s' % (self.__class__.__module__,
  1124. self.__class__.__qualname__)]
  1125. if self.fp is not None:
  1126. if self._filePassed:
  1127. result.append(' file=%r' % self.fp)
  1128. elif self.filename is not None:
  1129. result.append(' filename=%r' % self.filename)
  1130. result.append(' mode=%r' % self.mode)
  1131. else:
  1132. result.append(' [closed]')
  1133. result.append('>')
  1134. return ''.join(result)
  1135. def _RealGetContents(self):
  1136. """Read in the table of contents for the ZIP file."""
  1137. fp = self.fp
  1138. try:
  1139. endrec = _EndRecData(fp)
  1140. except OSError:
  1141. raise BadZipFile("File is not a zip file")
  1142. if not endrec:
  1143. raise BadZipFile("File is not a zip file")
  1144. if self.debug > 1:
  1145. print(endrec)
  1146. size_cd = endrec[_ECD_SIZE] # bytes in central directory
  1147. offset_cd = endrec[_ECD_OFFSET] # offset of central directory
  1148. self._comment = endrec[_ECD_COMMENT] # archive comment
  1149. # "concat" is zero, unless zip was concatenated to another file
  1150. concat = endrec[_ECD_LOCATION] - size_cd - offset_cd
  1151. if endrec[_ECD_SIGNATURE] == stringEndArchive64:
  1152. # If Zip64 extension structures are present, account for them
  1153. concat -= (sizeEndCentDir64 + sizeEndCentDir64Locator)
  1154. if self.debug > 2:
  1155. inferred = concat + offset_cd
  1156. print("given, inferred, offset", offset_cd, inferred, concat)
  1157. # self.start_dir: Position of start of central directory
  1158. self.start_dir = offset_cd + concat
  1159. fp.seek(self.start_dir, 0)
  1160. data = fp.read(size_cd)
  1161. fp = io.BytesIO(data)
  1162. total = 0
  1163. while total < size_cd:
  1164. centdir = fp.read(sizeCentralDir)
  1165. if len(centdir) != sizeCentralDir:
  1166. raise BadZipFile("Truncated central directory")
  1167. centdir = struct.unpack(structCentralDir, centdir)
  1168. if centdir[_CD_SIGNATURE] != stringCentralDir:
  1169. raise BadZipFile("Bad magic number for central directory")
  1170. if self.debug > 2:
  1171. print(centdir)
  1172. filename = fp.read(centdir[_CD_FILENAME_LENGTH])
  1173. flags = centdir[5]
  1174. if flags & 0x800:
  1175. # UTF-8 file names extension
  1176. filename = filename.decode('utf-8')
  1177. else:
  1178. # Historical ZIP filename encoding
  1179. filename = filename.decode('cp437')
  1180. # Create ZipInfo instance to store file information
  1181. x = ZipInfo(filename)
  1182. x.extra = fp.read(centdir[_CD_EXTRA_FIELD_LENGTH])
  1183. x.comment = fp.read(centdir[_CD_COMMENT_LENGTH])
  1184. x.header_offset = centdir[_CD_LOCAL_HEADER_OFFSET]
  1185. (x.create_version, x.create_system, x.extract_version, x.reserved,
  1186. x.flag_bits, x.compress_type, t, d,
  1187. x.CRC, x.compress_size, x.file_size) = centdir[1:12]
  1188. if x.extract_version > MAX_EXTRACT_VERSION:
  1189. raise NotImplementedError("zip file version %.1f" %
  1190. (x.extract_version / 10))
  1191. x.volume, x.internal_attr, x.external_attr = centdir[15:18]
  1192. # Convert date/time code to (year, month, day, hour, min, sec)
  1193. x._raw_time = t
  1194. x.date_time = ( (d>>9)+1980, (d>>5)&0xF, d&0x1F,
  1195. t>>11, (t>>5)&0x3F, (t&0x1F) * 2 )
  1196. x._decodeExtra()
  1197. x.header_offset = x.header_offset + concat
  1198. self.filelist.append(x)
  1199. self.NameToInfo[x.filename] = x
  1200. # update total bytes read from central directory
  1201. total = (total + sizeCentralDir + centdir[_CD_FILENAME_LENGTH]
  1202. + centdir[_CD_EXTRA_FIELD_LENGTH]
  1203. + centdir[_CD_COMMENT_LENGTH])
  1204. if self.debug > 2:
  1205. print("total", total)
  1206. def namelist(self):
  1207. """Return a list of file names in the archive."""
  1208. return [data.filename for data in self.filelist]
  1209. def infolist(self):
  1210. """Return a list of class ZipInfo instances for files in the
  1211. archive."""
  1212. return self.filelist
  1213. def printdir(self, file=None):
  1214. """Print a table of contents for the zip file."""
  1215. print("%-46s %19s %12s" % ("File Name", "Modified ", "Size"),
  1216. file=file)
  1217. for zinfo in self.filelist:
  1218. date = "%d-%02d-%02d %02d:%02d:%02d" % zinfo.date_time[:6]
  1219. print("%-46s %s %12d" % (zinfo.filename, date, zinfo.file_size),
  1220. file=file)
  1221. def testzip(self):
  1222. """Read all the files and check the CRC."""
  1223. chunk_size = 2 ** 20
  1224. for zinfo in self.filelist:
  1225. try:
  1226. # Read by chunks, to avoid an OverflowError or a
  1227. # MemoryError with very large embedded files.
  1228. with self.open(zinfo.filename, "r") as f:
  1229. while f.read(chunk_size): # Check CRC-32
  1230. pass
  1231. except BadZipFile:
  1232. return zinfo.filename
  1233. def getinfo(self, name):
  1234. """Return the instance of ZipInfo given 'name'."""
  1235. info = self.NameToInfo.get(name)
  1236. if info is None:
  1237. raise KeyError(
  1238. 'There is no item named %r in the archive' % name)
  1239. return info
  1240. def setpassword(self, pwd):
  1241. """Set default password for encrypted files."""
  1242. if pwd and not isinstance(pwd, bytes):
  1243. raise TypeError("pwd: expected bytes, got %s" % type(pwd).__name__)
  1244. if pwd:
  1245. self.pwd = pwd
  1246. else:
  1247. self.pwd = None
  1248. @property
  1249. def comment(self):
  1250. """The comment text associated with the ZIP file."""
  1251. return self._comment
  1252. @comment.setter
  1253. def comment(self, comment):
  1254. if not isinstance(comment, bytes):
  1255. raise TypeError("comment: expected bytes, got %s" % type(comment).__name__)
  1256. # check for valid comment length
  1257. if len(comment) > ZIP_MAX_COMMENT:
  1258. import warnings
  1259. warnings.warn('Archive comment is too long; truncating to %d bytes'
  1260. % ZIP_MAX_COMMENT, stacklevel=2)
  1261. comment = comment[:ZIP_MAX_COMMENT]
  1262. self._comment = comment
  1263. self._didModify = True
  1264. def read(self, name, pwd=None):
  1265. """Return file bytes for name."""
  1266. with self.open(name, "r", pwd) as fp:
  1267. return fp.read()
  1268. def open(self, name, mode="r", pwd=None, *, force_zip64=False):
  1269. """Return file-like object for 'name'.
  1270. name is a string for the file name within the ZIP file, or a ZipInfo
  1271. object.
  1272. mode should be 'r' to read a file already in the ZIP file, or 'w' to
  1273. write to a file newly added to the archive.
  1274. pwd is the password to decrypt files (only used for reading).
  1275. When writing, if the file size is not known in advance but may exceed
  1276. 2 GiB, pass force_zip64 to use the ZIP64 format, which can handle large
  1277. files. If the size is known in advance, it is best to pass a ZipInfo
  1278. instance for name, with zinfo.file_size set.
  1279. """
  1280. if mode not in {"r", "w"}:
  1281. raise ValueError('open() requires mode "r" or "w"')
  1282. if pwd and not isinstance(pwd, bytes):
  1283. raise TypeError("pwd: expected bytes, got %s" % type(pwd).__name__)
  1284. if pwd and (mode == "w"):
  1285. raise ValueError("pwd is only supported for reading files")
  1286. if not self.fp:
  1287. raise ValueError(
  1288. "Attempt to use ZIP archive that was already closed")
  1289. # Make sure we have an info object
  1290. if isinstance(name, ZipInfo):
  1291. # 'name' is already an info object
  1292. zinfo = name
  1293. elif mode == 'w':
  1294. zinfo = ZipInfo(name)
  1295. zinfo.compress_type = self.compression
  1296. zinfo._compresslevel = self.compresslevel
  1297. else:
  1298. # Get info object for name
  1299. zinfo = self.getinfo(name)
  1300. if mode == 'w':
  1301. return self._open_to_write(zinfo, force_zip64=force_zip64)
  1302. if self._writing:
  1303. raise ValueError("Can't read from the ZIP file while there "
  1304. "is an open writing handle on it. "
  1305. "Close the writing handle before trying to read.")
  1306. # Open for reading:
  1307. self._fileRefCnt += 1
  1308. zef_file = _SharedFile(self.fp, zinfo.header_offset,
  1309. self._fpclose, self._lock, lambda: self._writing)
  1310. try:
  1311. # Skip the file header:
  1312. fheader = zef_file.read(sizeFileHeader)
  1313. if len(fheader) != sizeFileHeader:
  1314. raise BadZipFile("Truncated file header")
  1315. fheader = struct.unpack(structFileHeader, fheader)
  1316. if fheader[_FH_SIGNATURE] != stringFileHeader:
  1317. raise BadZipFile("Bad magic number for file header")
  1318. fname = zef_file.read(fheader[_FH_FILENAME_LENGTH])
  1319. if fheader[_FH_EXTRA_FIELD_LENGTH]:
  1320. zef_file.read(fheader[_FH_EXTRA_FIELD_LENGTH])
  1321. if zinfo.flag_bits & 0x20:
  1322. # Zip 2.7: compressed patched data
  1323. raise NotImplementedError("compressed patched data (flag bit 5)")
  1324. if zinfo.flag_bits & 0x40:
  1325. # strong encryption
  1326. raise NotImplementedError("strong encryption (flag bit 6)")
  1327. if fheader[_FH_GENERAL_PURPOSE_FLAG_BITS] & 0x800:
  1328. # UTF-8 filename
  1329. fname_str = fname.decode("utf-8")
  1330. else:
  1331. fname_str = fname.decode("cp437")
  1332. if fname_str != zinfo.orig_filename:
  1333. raise BadZipFile(
  1334. 'File name in directory %r and header %r differ.'
  1335. % (zinfo.orig_filename, fname))
  1336. # check for encrypted flag & handle password
  1337. is_encrypted = zinfo.flag_bits & 0x1
  1338. if is_encrypted:
  1339. if not pwd:
  1340. pwd = self.pwd
  1341. if not pwd:
  1342. raise RuntimeError("File %r is encrypted, password "
  1343. "required for extraction" % name)
  1344. else:
  1345. pwd = None
  1346. return ZipExtFile(zef_file, mode, zinfo, pwd, True)
  1347. except:
  1348. zef_file.close()
  1349. raise
  1350. def _open_to_write(self, zinfo, force_zip64=False):
  1351. if force_zip64 and not self._allowZip64:
  1352. raise ValueError(
  1353. "force_zip64 is True, but allowZip64 was False when opening "
  1354. "the ZIP file."
  1355. )
  1356. if self._writing:
  1357. raise ValueError("Can't write to the ZIP file while there is "
  1358. "another write handle open on it. "
  1359. "Close the first handle before opening another.")
  1360. # Size and CRC are overwritten with correct data after processing the file
  1361. zinfo.compress_size = 0
  1362. zinfo.CRC = 0
  1363. zinfo.flag_bits = 0x00
  1364. if zinfo.compress_type == ZIP_LZMA:
  1365. # Compressed data includes an end-of-stream (EOS) marker
  1366. zinfo.flag_bits |= 0x02
  1367. if not self._seekable:
  1368. zinfo.flag_bits |= 0x08
  1369. if not zinfo.external_attr:
  1370. zinfo.external_attr = 0o600 << 16 # permissions: ?rw-------
  1371. # Compressed size can be larger than uncompressed size
  1372. zip64 = self._allowZip64 and \
  1373. (force_zip64 or zinfo.file_size * 1.05 > ZIP64_LIMIT)
  1374. if self._seekable:
  1375. self.fp.seek(self.start_dir)
  1376. zinfo.header_offset = self.fp.tell()
  1377. self._writecheck(zinfo)
  1378. self._didModify = True
  1379. self.fp.write(zinfo.FileHeader(zip64))
  1380. self._writing = True
  1381. return _ZipWriteFile(self, zinfo, zip64)
  1382. def extract(self, member, path=None, pwd=None):
  1383. """Extract a member from the archive to the current working directory,
  1384. using its full name. Its file information is extracted as accurately
  1385. as possible. `member' may be a filename or a ZipInfo object. You can
  1386. specify a different directory using `path'.
  1387. """
  1388. if path is None:
  1389. path = os.getcwd()
  1390. else:
  1391. path = os.fspath(path)
  1392. return self._extract_member(member, path, pwd)
  1393. def extractall(self, path=None, members=None, pwd=None):
  1394. """Extract all members from the archive to the current working
  1395. directory. `path' specifies a different directory to extract to.
  1396. `members' is optional and must be a subset of the list returned
  1397. by namelist().
  1398. """
  1399. if members is None:
  1400. members = self.namelist()
  1401. if path is None:
  1402. path = os.getcwd()
  1403. else:
  1404. path = os.fspath(path)
  1405. for zipinfo in members:
  1406. self._extract_member(zipinfo, path, pwd)
  1407. @classmethod
  1408. def _sanitize_windows_name(cls, arcname, pathsep):
  1409. """Replace bad characters and remove trailing dots from parts."""
  1410. table = cls._windows_illegal_name_trans_table
  1411. if not table:
  1412. illegal = ':<>|"?*'
  1413. table = str.maketrans(illegal, '_' * len(illegal))
  1414. cls._windows_illegal_name_trans_table = table
  1415. arcname = arcname.translate(table)
  1416. # remove trailing dots
  1417. arcname = (x.rstrip('.') for x in arcname.split(pathsep))
  1418. # rejoin, removing empty parts.
  1419. arcname = pathsep.join(x for x in arcname if x)
  1420. return arcname
  1421. def _extract_member(self, member, targetpath, pwd):
  1422. """Extract the ZipInfo object 'member' to a physical
  1423. file on the path targetpath.
  1424. """
  1425. if not isinstance(member, ZipInfo):
  1426. member = self.getinfo(member)
  1427. # build the destination pathname, replacing
  1428. # forward slashes to platform specific separators.
  1429. arcname = member.filename.replace('/', os.path.sep)
  1430. if os.path.altsep:
  1431. arcname = arcname.replace(os.path.altsep, os.path.sep)
  1432. # interpret absolute pathname as relative, remove drive letter or
  1433. # UNC path, redundant separators, "." and ".." components.
  1434. arcname = os.path.splitdrive(arcname)[1]
  1435. invalid_path_parts = ('', os.path.curdir, os.path.pardir)
  1436. arcname = os.path.sep.join(x for x in arcname.split(os.path.sep)
  1437. if x not in invalid_path_parts)
  1438. if os.path.sep == '\\':
  1439. # filter illegal characters on Windows
  1440. arcname = self._sanitize_windows_name(arcname, os.path.sep)
  1441. targetpath = os.path.join(targetpath, arcname)
  1442. targetpath = os.path.normpath(targetpath)
  1443. # Create all upper directories if necessary.
  1444. upperdirs = os.path.dirname(targetpath)
  1445. if upperdirs and not os.path.exists(upperdirs):
  1446. os.makedirs(upperdirs)
  1447. if member.is_dir():
  1448. if not os.path.isdir(targetpath):
  1449. os.mkdir(targetpath)
  1450. return targetpath
  1451. with self.open(member, pwd=pwd) as source, \
  1452. open(targetpath, "wb") as target:
  1453. shutil.copyfileobj(source, target)
  1454. return targetpath
  1455. def _writecheck(self, zinfo):
  1456. """Check for errors before writing a file to the archive."""
  1457. if zinfo.filename in self.NameToInfo:
  1458. import warnings
  1459. warnings.warn('Duplicate name: %r' % zinfo.filename, stacklevel=3)
  1460. if self.mode not in ('w', 'x', 'a'):
  1461. raise ValueError("write() requires mode 'w', 'x', or 'a'")
  1462. if not self.fp:
  1463. raise ValueError(
  1464. "Attempt to write ZIP archive that was already closed")
  1465. _check_compression(zinfo.compress_type)
  1466. if not self._allowZip64:
  1467. requires_zip64 = None
  1468. if len(self.filelist) >= ZIP_FILECOUNT_LIMIT:
  1469. requires_zip64 = "Files count"
  1470. elif zinfo.file_size > ZIP64_LIMIT:
  1471. requires_zip64 = "Filesize"
  1472. elif zinfo.header_offset > ZIP64_LIMIT:
  1473. requires_zip64 = "Zipfile size"
  1474. if requires_zip64:
  1475. raise LargeZipFile(requires_zip64 +
  1476. " would require ZIP64 extensions")
  1477. def write(self, filename, arcname=None,
  1478. compress_type=None, compresslevel=None):
  1479. """Put the bytes from filename into the archive under the name
  1480. arcname."""
  1481. if not self.fp:
  1482. raise ValueError(
  1483. "Attempt to write to ZIP archive that was already closed")
  1484. if self._writing:
  1485. raise ValueError(
  1486. "Can't write to ZIP archive while an open writing handle exists"
  1487. )
  1488. zinfo = ZipInfo.from_file(filename, arcname,
  1489. strict_timestamps=self._strict_timestamps)
  1490. if zinfo.is_dir():
  1491. zinfo.compress_size = 0
  1492. zinfo.CRC = 0
  1493. else:
  1494. if compress_type is not None:
  1495. zinfo.compress_type = compress_type
  1496. else:
  1497. zinfo.compress_type = self.compression
  1498. if compresslevel is not None:
  1499. zinfo._compresslevel = compresslevel
  1500. else:
  1501. zinfo._compresslevel = self.compresslevel
  1502. if zinfo.is_dir():
  1503. with self._lock:
  1504. if self._seekable:
  1505. self.fp.seek(self.start_dir)
  1506. zinfo.header_offset = self.fp.tell() # Start of header bytes
  1507. if zinfo.compress_type == ZIP_LZMA:
  1508. # Compressed data includes an end-of-stream (EOS) marker
  1509. zinfo.flag_bits |= 0x02
  1510. self._writecheck(zinfo)
  1511. self._didModify = True
  1512. self.filelist.append(zinfo)
  1513. self.NameToInfo[zinfo.filename] = zinfo
  1514. self.fp.write(zinfo.FileHeader(False))
  1515. self.start_dir = self.fp.tell()
  1516. else:
  1517. with open(filename, "rb") as src, self.open(zinfo, 'w') as dest:
  1518. shutil.copyfileobj(src, dest, 1024*8)
  1519. def writestr(self, zinfo_or_arcname, data,
  1520. compress_type=None, compresslevel=None):
  1521. """Write a file into the archive. The contents is 'data', which
  1522. may be either a 'str' or a 'bytes' instance; if it is a 'str',
  1523. it is encoded as UTF-8 first.
  1524. 'zinfo_or_arcname' is either a ZipInfo instance or
  1525. the name of the file in the archive."""
  1526. if isinstance(data, str):
  1527. data = data.encode("utf-8")
  1528. if not isinstance(zinfo_or_arcname, ZipInfo):
  1529. zinfo = ZipInfo(filename=zinfo_or_arcname,
  1530. date_time=time.localtime(time.time())[:6])
  1531. zinfo.compress_type = self.compression
  1532. zinfo._compresslevel = self.compresslevel
  1533. if zinfo.filename[-1] == '/':
  1534. zinfo.external_attr = 0o40775 << 16 # drwxrwxr-x
  1535. zinfo.external_attr |= 0x10 # MS-DOS directory flag
  1536. else:
  1537. zinfo.external_attr = 0o600 << 16 # ?rw-------
  1538. else:
  1539. zinfo = zinfo_or_arcname
  1540. if not self.fp:
  1541. raise ValueError(
  1542. "Attempt to write to ZIP archive that was already closed")
  1543. if self._writing:
  1544. raise ValueError(
  1545. "Can't write to ZIP archive while an open writing handle exists."
  1546. )
  1547. if compress_type is not None:
  1548. zinfo.compress_type = compress_type
  1549. if compresslevel is not None:
  1550. zinfo._compresslevel = compresslevel
  1551. zinfo.file_size = len(data) # Uncompressed size
  1552. with self._lock:
  1553. with self.open(zinfo, mode='w') as dest:
  1554. dest.write(data)
  1555. def __del__(self):
  1556. """Call the "close()" method in case the user forgot."""
  1557. self.close()
  1558. def close(self):
  1559. """Close the file, and for mode 'w', 'x' and 'a' write the ending
  1560. records."""
  1561. if self.fp is None:
  1562. return
  1563. if self._writing:
  1564. raise ValueError("Can't close the ZIP file while there is "
  1565. "an open writing handle on it. "
  1566. "Close the writing handle before closing the zip.")
  1567. try:
  1568. if self.mode in ('w', 'x', 'a') and self._didModify: # write ending records
  1569. with self._lock:
  1570. if self._seekable:
  1571. self.fp.seek(self.start_dir)
  1572. self._write_end_record()
  1573. finally:
  1574. fp = self.fp
  1575. self.fp = None
  1576. self._fpclose(fp)
  1577. def _write_end_record(self):
  1578. for zinfo in self.filelist: # write central directory
  1579. dt = zinfo.date_time
  1580. dosdate = (dt[0] - 1980) << 9 | dt[1] << 5 | dt[2]
  1581. dostime = dt[3] << 11 | dt[4] << 5 | (dt[5] // 2)
  1582. extra = []
  1583. if zinfo.file_size > ZIP64_LIMIT \
  1584. or zinfo.compress_size > ZIP64_LIMIT:
  1585. extra.append(zinfo.file_size)
  1586. extra.append(zinfo.compress_size)
  1587. file_size = 0xffffffff
  1588. compress_size = 0xffffffff
  1589. else:
  1590. file_size = zinfo.file_size
  1591. compress_size = zinfo.compress_size
  1592. if zinfo.header_offset > ZIP64_LIMIT:
  1593. extra.append(zinfo.header_offset)
  1594. header_offset = 0xffffffff
  1595. else:
  1596. header_offset = zinfo.header_offset
  1597. extra_data = zinfo.extra
  1598. min_version = 0
  1599. if extra:
  1600. # Append a ZIP64 field to the extra's
  1601. extra_data = _strip_extra(extra_data, (1,))
  1602. extra_data = struct.pack(
  1603. '<HH' + 'Q'*len(extra),
  1604. 1, 8*len(extra), *extra) + extra_data
  1605. min_version = ZIP64_VERSION
  1606. if zinfo.compress_type == ZIP_BZIP2:
  1607. min_version = max(BZIP2_VERSION, min_version)
  1608. elif zinfo.compress_type == ZIP_LZMA:
  1609. min_version = max(LZMA_VERSION, min_version)
  1610. extract_version = max(min_version, zinfo.extract_version)
  1611. create_version = max(min_version, zinfo.create_version)
  1612. filename, flag_bits = zinfo._encodeFilenameFlags()
  1613. centdir = struct.pack(structCentralDir,
  1614. stringCentralDir, create_version,
  1615. zinfo.create_system, extract_version, zinfo.reserved,
  1616. flag_bits, zinfo.compress_type, dostime, dosdate,
  1617. zinfo.CRC, compress_size, file_size,
  1618. len(filename), len(extra_data), len(zinfo.comment),
  1619. 0, zinfo.internal_attr, zinfo.external_attr,
  1620. header_offset)
  1621. self.fp.write(centdir)
  1622. self.fp.write(filename)
  1623. self.fp.write(extra_data)
  1624. self.fp.write(zinfo.comment)
  1625. pos2 = self.fp.tell()
  1626. # Write end-of-zip-archive record
  1627. centDirCount = len(self.filelist)
  1628. centDirSize = pos2 - self.start_dir
  1629. centDirOffset = self.start_dir
  1630. requires_zip64 = None
  1631. if centDirCount > ZIP_FILECOUNT_LIMIT:
  1632. requires_zip64 = "Files count"
  1633. elif centDirOffset > ZIP64_LIMIT:
  1634. requires_zip64 = "Central directory offset"
  1635. elif centDirSize > ZIP64_LIMIT:
  1636. requires_zip64 = "Central directory size"
  1637. if requires_zip64:
  1638. # Need to write the ZIP64 end-of-archive records
  1639. if not self._allowZip64:
  1640. raise LargeZipFile(requires_zip64 +
  1641. " would require ZIP64 extensions")
  1642. zip64endrec = struct.pack(
  1643. structEndArchive64, stringEndArchive64,
  1644. 44, 45, 45, 0, 0, centDirCount, centDirCount,
  1645. centDirSize, centDirOffset)
  1646. self.fp.write(zip64endrec)
  1647. zip64locrec = struct.pack(
  1648. structEndArchive64Locator,
  1649. stringEndArchive64Locator, 0, pos2, 1)
  1650. self.fp.write(zip64locrec)
  1651. centDirCount = min(centDirCount, 0xFFFF)
  1652. centDirSize = min(centDirSize, 0xFFFFFFFF)
  1653. centDirOffset = min(centDirOffset, 0xFFFFFFFF)
  1654. endrec = struct.pack(structEndArchive, stringEndArchive,
  1655. 0, 0, centDirCount, centDirCount,
  1656. centDirSize, centDirOffset, len(self._comment))
  1657. self.fp.write(endrec)
  1658. self.fp.write(self._comment)
  1659. if self.mode == "a":
  1660. self.fp.truncate()
  1661. self.fp.flush()
  1662. def _fpclose(self, fp):
  1663. assert self._fileRefCnt > 0
  1664. self._fileRefCnt -= 1
  1665. if not self._fileRefCnt and not self._filePassed:
  1666. fp.close()
  1667. class PyZipFile(ZipFile):
  1668. """Class to create ZIP archives with Python library files and packages."""
  1669. def __init__(self, file, mode="r", compression=ZIP_STORED,
  1670. allowZip64=True, optimize=-1):
  1671. ZipFile.__init__(self, file, mode=mode, compression=compression,
  1672. allowZip64=allowZip64)
  1673. self._optimize = optimize
  1674. def writepy(self, pathname, basename="", filterfunc=None):
  1675. """Add all files from "pathname" to the ZIP archive.
  1676. If pathname is a package directory, search the directory and
  1677. all package subdirectories recursively for all *.py and enter
  1678. the modules into the archive. If pathname is a plain
  1679. directory, listdir *.py and enter all modules. Else, pathname
  1680. must be a Python *.py file and the module will be put into the
  1681. archive. Added modules are always module.pyc.
  1682. This method will compile the module.py into module.pyc if
  1683. necessary.
  1684. If filterfunc(pathname) is given, it is called with every argument.
  1685. When it is False, the file or directory is skipped.
  1686. """
  1687. pathname = os.fspath(pathname)
  1688. if filterfunc and not filterfunc(pathname):
  1689. if self.debug:
  1690. label = 'path' if os.path.isdir(pathname) else 'file'
  1691. print('%s %r skipped by filterfunc' % (label, pathname))
  1692. return
  1693. dir, name = os.path.split(pathname)
  1694. if os.path.isdir(pathname):
  1695. initname = os.path.join(pathname, "__init__.py")
  1696. if os.path.isfile(initname):
  1697. # This is a package directory, add it
  1698. if basename:
  1699. basename = "%s/%s" % (basename, name)
  1700. else:
  1701. basename = name
  1702. if self.debug:
  1703. print("Adding package in", pathname, "as", basename)
  1704. fname, arcname = self._get_codename(initname[0:-3], basename)
  1705. if self.debug:
  1706. print("Adding", arcname)
  1707. self.write(fname, arcname)
  1708. dirlist = sorted(os.listdir(pathname))
  1709. dirlist.remove("__init__.py")
  1710. # Add all *.py files and package subdirectories
  1711. for filename in dirlist:
  1712. path = os.path.join(pathname, filename)
  1713. root, ext = os.path.splitext(filename)
  1714. if os.path.isdir(path):
  1715. if os.path.isfile(os.path.join(path, "__init__.py")):
  1716. # This is a package directory, add it
  1717. self.writepy(path, basename,
  1718. filterfunc=filterfunc) # Recursive call
  1719. elif ext == ".py":
  1720. if filterfunc and not filterfunc(path):
  1721. if self.debug:
  1722. print('file %r skipped by filterfunc' % path)
  1723. continue
  1724. fname, arcname = self._get_codename(path[0:-3],
  1725. basename)
  1726. if self.debug:
  1727. print("Adding", arcname)
  1728. self.write(fname, arcname)
  1729. else:
  1730. # This is NOT a package directory, add its files at top level
  1731. if self.debug:
  1732. print("Adding files from directory", pathname)
  1733. for filename in sorted(os.listdir(pathname)):
  1734. path = os.path.join(pathname, filename)
  1735. root, ext = os.path.splitext(filename)
  1736. if ext == ".py":
  1737. if filterfunc and not filterfunc(path):
  1738. if self.debug:
  1739. print('file %r skipped by filterfunc' % path)
  1740. continue
  1741. fname, arcname = self._get_codename(path[0:-3],
  1742. basename)
  1743. if self.debug:
  1744. print("Adding", arcname)
  1745. self.write(fname, arcname)
  1746. else:
  1747. if pathname[-3:] != ".py":
  1748. raise RuntimeError(
  1749. 'Files added with writepy() must end with ".py"')
  1750. fname, arcname = self._get_codename(pathname[0:-3], basename)
  1751. if self.debug:
  1752. print("Adding file", arcname)
  1753. self.write(fname, arcname)
  1754. def _get_codename(self, pathname, basename):
  1755. """Return (filename, archivename) for the path.
  1756. Given a module name path, return the correct file path and
  1757. archive name, compiling if necessary. For example, given
  1758. /python/lib/string, return (/python/lib/string.pyc, string).
  1759. """
  1760. def _compile(file, optimize=-1):
  1761. import py_compile
  1762. if self.debug:
  1763. print("Compiling", file)
  1764. try:
  1765. py_compile.compile(file, doraise=True, optimize=optimize)
  1766. except py_compile.PyCompileError as err:
  1767. print(err.msg)
  1768. return False
  1769. return True
  1770. file_py = pathname + ".py"
  1771. file_pyc = pathname + ".pyc"
  1772. pycache_opt0 = importlib.util.cache_from_source(file_py, optimization='')
  1773. pycache_opt1 = importlib.util.cache_from_source(file_py, optimization=1)
  1774. pycache_opt2 = importlib.util.cache_from_source(file_py, optimization=2)
  1775. if self._optimize == -1:
  1776. # legacy mode: use whatever file is present
  1777. if (os.path.isfile(file_pyc) and
  1778. os.stat(file_pyc).st_mtime >= os.stat(file_py).st_mtime):
  1779. # Use .pyc file.
  1780. arcname = fname = file_pyc
  1781. elif (os.path.isfile(pycache_opt0) and
  1782. os.stat(pycache_opt0).st_mtime >= os.stat(file_py).st_mtime):
  1783. # Use the __pycache__/*.pyc file, but write it to the legacy pyc
  1784. # file name in the archive.
  1785. fname = pycache_opt0
  1786. arcname = file_pyc
  1787. elif (os.path.isfile(pycache_opt1) and
  1788. os.stat(pycache_opt1).st_mtime >= os.stat(file_py).st_mtime):
  1789. # Use the __pycache__/*.pyc file, but write it to the legacy pyc
  1790. # file name in the archive.
  1791. fname = pycache_opt1
  1792. arcname = file_pyc
  1793. elif (os.path.isfile(pycache_opt2) and
  1794. os.stat(pycache_opt2).st_mtime >= os.stat(file_py).st_mtime):
  1795. # Use the __pycache__/*.pyc file, but write it to the legacy pyc
  1796. # file name in the archive.
  1797. fname = pycache_opt2
  1798. arcname = file_pyc
  1799. else:
  1800. # Compile py into PEP 3147 pyc file.
  1801. if _compile(file_py):
  1802. if sys.flags.optimize == 0:
  1803. fname = pycache_opt0
  1804. elif sys.flags.optimize == 1:
  1805. fname = pycache_opt1
  1806. else:
  1807. fname = pycache_opt2
  1808. arcname = file_pyc
  1809. else:
  1810. fname = arcname = file_py
  1811. else:
  1812. # new mode: use given optimization level
  1813. if self._optimize == 0:
  1814. fname = pycache_opt0
  1815. arcname = file_pyc
  1816. else:
  1817. arcname = file_pyc
  1818. if self._optimize == 1:
  1819. fname = pycache_opt1
  1820. elif self._optimize == 2:
  1821. fname = pycache_opt2
  1822. else:
  1823. msg = "invalid value for 'optimize': {!r}".format(self._optimize)
  1824. raise ValueError(msg)
  1825. if not (os.path.isfile(fname) and
  1826. os.stat(fname).st_mtime >= os.stat(file_py).st_mtime):
  1827. if not _compile(file_py, optimize=self._optimize):
  1828. fname = arcname = file_py
  1829. archivename = os.path.split(arcname)[1]
  1830. if basename:
  1831. archivename = "%s/%s" % (basename, archivename)
  1832. return (fname, archivename)
  1833. def _parents(path):
  1834. """
  1835. Given a path with elements separated by
  1836. posixpath.sep, generate all parents of that path.
  1837. >>> list(_parents('b/d'))
  1838. ['b']
  1839. >>> list(_parents('/b/d/'))
  1840. ['/b']
  1841. >>> list(_parents('b/d/f/'))
  1842. ['b/d', 'b']
  1843. >>> list(_parents('b'))
  1844. []
  1845. >>> list(_parents(''))
  1846. []
  1847. """
  1848. return itertools.islice(_ancestry(path), 1, None)
  1849. def _ancestry(path):
  1850. """
  1851. Given a path with elements separated by
  1852. posixpath.sep, generate all elements of that path
  1853. >>> list(_ancestry('b/d'))
  1854. ['b/d', 'b']
  1855. >>> list(_ancestry('/b/d/'))
  1856. ['/b/d', '/b']
  1857. >>> list(_ancestry('b/d/f/'))
  1858. ['b/d/f', 'b/d', 'b']
  1859. >>> list(_ancestry('b'))
  1860. ['b']
  1861. >>> list(_ancestry(''))
  1862. []
  1863. """
  1864. path = path.rstrip(posixpath.sep)
  1865. while path and path != posixpath.sep:
  1866. yield path
  1867. path, tail = posixpath.split(path)
  1868. _dedupe = dict.fromkeys
  1869. """Deduplicate an iterable in original order"""
  1870. def _difference(minuend, subtrahend):
  1871. """
  1872. Return items in minuend not in subtrahend, retaining order
  1873. with O(1) lookup.
  1874. """
  1875. return itertools.filterfalse(set(subtrahend).__contains__, minuend)
  1876. class CompleteDirs(ZipFile):
  1877. """
  1878. A ZipFile subclass that ensures that implied directories
  1879. are always included in the namelist.
  1880. """
  1881. @staticmethod
  1882. def _implied_dirs(names):
  1883. parents = itertools.chain.from_iterable(map(_parents, names))
  1884. as_dirs = (p + posixpath.sep for p in parents)
  1885. return _dedupe(_difference(as_dirs, names))
  1886. def namelist(self):
  1887. names = super(CompleteDirs, self).namelist()
  1888. return names + list(self._implied_dirs(names))
  1889. def _name_set(self):
  1890. return set(self.namelist())
  1891. def resolve_dir(self, name):
  1892. """
  1893. If the name represents a directory, return that name
  1894. as a directory (with the trailing slash).
  1895. """
  1896. names = self._name_set()
  1897. dirname = name + '/'
  1898. dir_match = name not in names and dirname in names
  1899. return dirname if dir_match else name
  1900. @classmethod
  1901. def make(cls, source):
  1902. """
  1903. Given a source (filename or zipfile), return an
  1904. appropriate CompleteDirs subclass.
  1905. """
  1906. if isinstance(source, CompleteDirs):
  1907. return source
  1908. if not isinstance(source, ZipFile):
  1909. return cls(source)
  1910. # Only allow for FastLookup when supplied zipfile is read-only
  1911. if 'r' not in source.mode:
  1912. cls = CompleteDirs
  1913. source.__class__ = cls
  1914. return source
  1915. class FastLookup(CompleteDirs):
  1916. """
  1917. ZipFile subclass to ensure implicit
  1918. dirs exist and are resolved rapidly.
  1919. """
  1920. def namelist(self):
  1921. with contextlib.suppress(AttributeError):
  1922. return self.__names
  1923. self.__names = super(FastLookup, self).namelist()
  1924. return self.__names
  1925. def _name_set(self):
  1926. with contextlib.suppress(AttributeError):
  1927. return self.__lookup
  1928. self.__lookup = super(FastLookup, self)._name_set()
  1929. return self.__lookup
  1930. class Path:
  1931. """
  1932. A pathlib-compatible interface for zip files.
  1933. Consider a zip file with this structure::
  1934. .
  1935. ├── a.txt
  1936. └── b
  1937. ├── c.txt
  1938. └── d
  1939. └── e.txt
  1940. >>> data = io.BytesIO()
  1941. >>> zf = ZipFile(data, 'w')
  1942. >>> zf.writestr('a.txt', 'content of a')
  1943. >>> zf.writestr('b/c.txt', 'content of c')
  1944. >>> zf.writestr('b/d/e.txt', 'content of e')
  1945. >>> zf.filename = 'mem/abcde.zip'
  1946. Path accepts the zipfile object itself or a filename
  1947. >>> root = Path(zf)
  1948. From there, several path operations are available.
  1949. Directory iteration (including the zip file itself):
  1950. >>> a, b = root.iterdir()
  1951. >>> a
  1952. Path('mem/abcde.zip', 'a.txt')
  1953. >>> b
  1954. Path('mem/abcde.zip', 'b/')
  1955. name property:
  1956. >>> b.name
  1957. 'b'
  1958. join with divide operator:
  1959. >>> c = b / 'c.txt'
  1960. >>> c
  1961. Path('mem/abcde.zip', 'b/c.txt')
  1962. >>> c.name
  1963. 'c.txt'
  1964. Read text:
  1965. >>> c.read_text()
  1966. 'content of c'
  1967. existence:
  1968. >>> c.exists()
  1969. True
  1970. >>> (b / 'missing.txt').exists()
  1971. False
  1972. Coercion to string:
  1973. >>> import os
  1974. >>> str(c).replace(os.sep, posixpath.sep)
  1975. 'mem/abcde.zip/b/c.txt'
  1976. At the root, ``name``, ``filename``, and ``parent``
  1977. resolve to the zipfile. Note these attributes are not
  1978. valid and will raise a ``ValueError`` if the zipfile
  1979. has no filename.
  1980. >>> root.name
  1981. 'abcde.zip'
  1982. >>> str(root.filename).replace(os.sep, posixpath.sep)
  1983. 'mem/abcde.zip'
  1984. >>> str(root.parent)
  1985. 'mem'
  1986. """
  1987. __repr = "{self.__class__.__name__}({self.root.filename!r}, {self.at!r})"
  1988. def __init__(self, root, at=""):
  1989. """
  1990. Construct a Path from a ZipFile or filename.
  1991. Note: When the source is an existing ZipFile object,
  1992. its type (__class__) will be mutated to a
  1993. specialized type. If the caller wishes to retain the
  1994. original type, the caller should either create a
  1995. separate ZipFile object or pass a filename.
  1996. """
  1997. self.root = FastLookup.make(root)
  1998. self.at = at
  1999. def open(self, mode='r', *args, pwd=None, **kwargs):
  2000. """
  2001. Open this entry as text or binary following the semantics
  2002. of ``pathlib.Path.open()`` by passing arguments through
  2003. to io.TextIOWrapper().
  2004. """
  2005. if self.is_dir():
  2006. raise IsADirectoryError(self)
  2007. zip_mode = mode[0]
  2008. if not self.exists() and zip_mode == 'r':
  2009. raise FileNotFoundError(self)
  2010. stream = self.root.open(self.at, zip_mode, pwd=pwd)
  2011. if 'b' in mode:
  2012. if args or kwargs:
  2013. raise ValueError("encoding args invalid for binary operation")
  2014. return stream
  2015. else:
  2016. kwargs["encoding"] = io.text_encoding(kwargs.get("encoding"))
  2017. return io.TextIOWrapper(stream, *args, **kwargs)
  2018. @property
  2019. def name(self):
  2020. return pathlib.Path(self.at).name or self.filename.name
  2021. @property
  2022. def filename(self):
  2023. return pathlib.Path(self.root.filename).joinpath(self.at)
  2024. def read_text(self, *args, **kwargs):
  2025. kwargs["encoding"] = io.text_encoding(kwargs.get("encoding"))
  2026. with self.open('r', *args, **kwargs) as strm:
  2027. return strm.read()
  2028. def read_bytes(self):
  2029. with self.open('rb') as strm:
  2030. return strm.read()
  2031. def _is_child(self, path):
  2032. return posixpath.dirname(path.at.rstrip("/")) == self.at.rstrip("/")
  2033. def _next(self, at):
  2034. return self.__class__(self.root, at)
  2035. def is_dir(self):
  2036. return not self.at or self.at.endswith("/")
  2037. def is_file(self):
  2038. return self.exists() and not self.is_dir()
  2039. def exists(self):
  2040. return self.at in self.root._name_set()
  2041. def iterdir(self):
  2042. if not self.is_dir():
  2043. raise ValueError("Can't listdir a file")
  2044. subs = map(self._next, self.root.namelist())
  2045. return filter(self._is_child, subs)
  2046. def __str__(self):
  2047. return posixpath.join(self.root.filename, self.at)
  2048. def __repr__(self):
  2049. return self.__repr.format(self=self)
  2050. def joinpath(self, *other):
  2051. next = posixpath.join(self.at, *other)
  2052. return self._next(self.root.resolve_dir(next))
  2053. __truediv__ = joinpath
  2054. @property
  2055. def parent(self):
  2056. if not self.at:
  2057. return self.filename.parent
  2058. parent_at = posixpath.dirname(self.at.rstrip('/'))
  2059. if parent_at:
  2060. parent_at += '/'
  2061. return self._next(parent_at)
  2062. def main(args=None):
  2063. import argparse
  2064. description = 'A simple command-line interface for zipfile module.'
  2065. parser = argparse.ArgumentParser(description=description)
  2066. group = parser.add_mutually_exclusive_group(required=True)
  2067. group.add_argument('-l', '--list', metavar='<zipfile>',
  2068. help='Show listing of a zipfile')
  2069. group.add_argument('-e', '--extract', nargs=2,
  2070. metavar=('<zipfile>', '<output_dir>'),
  2071. help='Extract zipfile into target dir')
  2072. group.add_argument('-c', '--create', nargs='+',
  2073. metavar=('<name>', '<file>'),
  2074. help='Create zipfile from sources')
  2075. group.add_argument('-t', '--test', metavar='<zipfile>',
  2076. help='Test if a zipfile is valid')
  2077. args = parser.parse_args(args)
  2078. if args.test is not None:
  2079. src = args.test
  2080. with ZipFile(src, 'r') as zf:
  2081. badfile = zf.testzip()
  2082. if badfile:
  2083. print("The following enclosed file is corrupted: {!r}".format(badfile))
  2084. print("Done testing")
  2085. elif args.list is not None:
  2086. src = args.list
  2087. with ZipFile(src, 'r') as zf:
  2088. zf.printdir()
  2089. elif args.extract is not None:
  2090. src, curdir = args.extract
  2091. with ZipFile(src, 'r') as zf:
  2092. zf.extractall(curdir)
  2093. elif args.create is not None:
  2094. zip_name = args.create.pop(0)
  2095. files = args.create
  2096. def addToZip(zf, path, zippath):
  2097. if os.path.isfile(path):
  2098. zf.write(path, zippath, ZIP_DEFLATED)
  2099. elif os.path.isdir(path):
  2100. if zippath:
  2101. zf.write(path, zippath)
  2102. for nm in sorted(os.listdir(path)):
  2103. addToZip(zf,
  2104. os.path.join(path, nm), os.path.join(zippath, nm))
  2105. # else: ignore
  2106. with ZipFile(zip_name, 'w') as zf:
  2107. for path in files:
  2108. zippath = os.path.basename(path)
  2109. if not zippath:
  2110. zippath = os.path.basename(os.path.dirname(path))
  2111. if zippath in ('', os.curdir, os.pardir):
  2112. zippath = ''
  2113. addToZip(zf, path, zippath)
  2114. if __name__ == "__main__":
  2115. main()