logo

live-bootstrap

Mirror of <https://github.com/fosslinux/live-bootstrap>

py2.patch (18235B)


  1. SPDX-FileCopyrightText: 2022 fosslinux <fosslinux@aussies.space>
  2. SPDX-License-Identifier: PSF-2.0
  3. We are building Python 3 using Python 2 as our bootstrap. But
  4. makeunicodedata has been converted to Python 3. We need to
  5. convert back, particularly print statements, and writing to
  6. files.
  7. We only apply this to the first build.
  8. --- Python-3.1.5/Tools/unicode/makeunicodedata.py 2012-04-10 09:25:37.000000000 +1000
  9. +++ Python-3.1.5/Tools/unicode/makeunicodedata.py 2022-07-13 14:13:37.864821008 +1000
  10. @@ -67,7 +67,7 @@
  11. def maketables(trace=0):
  12. - print("--- Reading", UNICODE_DATA % "", "...")
  13. + print "--- Reading", UNICODE_DATA % "", "..."
  14. version = ""
  15. unicode = UnicodeData(UNICODE_DATA % version,
  16. @@ -76,15 +76,15 @@
  17. DERIVED_CORE_PROPERTIES % version,
  18. DERIVEDNORMALIZATION_PROPS % version)
  19. - print(len(list(filter(None, unicode.table))), "characters")
  20. + print len(list(filter(None, unicode.table))), "characters"
  21. for version in old_versions:
  22. - print("--- Reading", UNICODE_DATA % ("-"+version), "...")
  23. + print "--- Reading", UNICODE_DATA % ("-"+version) + "..."
  24. old_unicode = UnicodeData(UNICODE_DATA % ("-"+version),
  25. COMPOSITION_EXCLUSIONS % ("-"+version),
  26. EASTASIAN_WIDTH % ("-"+version),
  27. DERIVED_CORE_PROPERTIES % ("-"+version))
  28. - print(len(list(filter(None, old_unicode.table))), "characters")
  29. + print len(list(filter(None, old_unicode.table))), "characters"
  30. merge_old_version(version, unicode, old_unicode)
  31. makeunicodename(unicode, trace)
  32. @@ -103,7 +103,7 @@
  33. FILE = "Modules/unicodedata_db.h"
  34. - print("--- Preparing", FILE, "...")
  35. + print "--- Preparing", FILE, "..."
  36. # 1) database properties
  37. @@ -214,92 +214,90 @@
  38. l = comp_last[l]
  39. comp_data[f*total_last+l] = char
  40. - print(len(table), "unique properties")
  41. - print(len(decomp_prefix), "unique decomposition prefixes")
  42. - print(len(decomp_data), "unique decomposition entries:", end=' ')
  43. - print(decomp_size, "bytes")
  44. - print(total_first, "first characters in NFC")
  45. - print(total_last, "last characters in NFC")
  46. - print(len(comp_pairs), "NFC pairs")
  47. + print len(table), "unique properties"
  48. + print len(decomp_prefix), "unique decomposition prefixes"
  49. + print len(decomp_data), "unique decomposition entries:",
  50. + print decomp_size, "bytes"
  51. + print total_first, "first characters in NFC"
  52. + print total_last, "last characters in NFC"
  53. + print len(comp_pairs), "NFC pairs"
  54. - print("--- Writing", FILE, "...")
  55. + print "--- Writing", FILE, "..."
  56. fp = open(FILE, "w")
  57. - print("/* this file was generated by %s %s */" % (SCRIPT, VERSION), file=fp)
  58. - print(file=fp)
  59. - print('#define UNIDATA_VERSION "%s"' % UNIDATA_VERSION, file=fp)
  60. - print("/* a list of unique database records */", file=fp)
  61. - print("const _PyUnicode_DatabaseRecord _PyUnicode_Database_Records[] = {", file=fp)
  62. + fp.write("/* this file was generated by %s %s */\n\n" % (SCRIPT, VERSION))
  63. + fp.write('#define UNIDATA_VERSION "%s"\n' % UNIDATA_VERSION)
  64. + fp.write("/* a list of unique database records */\n")
  65. + fp.write("const _PyUnicode_DatabaseRecord _PyUnicode_Database_Records[] = {\n")
  66. for item in table:
  67. - print(" {%d, %d, %d, %d, %d, %d}," % item, file=fp)
  68. - print("};", file=fp)
  69. - print(file=fp)
  70. -
  71. - print("/* Reindexing of NFC first characters. */", file=fp)
  72. - print("#define TOTAL_FIRST",total_first, file=fp)
  73. - print("#define TOTAL_LAST",total_last, file=fp)
  74. - print("struct reindex{int start;short count,index;};", file=fp)
  75. - print("static struct reindex nfc_first[] = {", file=fp)
  76. + fp.write(" {%d, %d, %d, %d, %d, %d},\n" % item)
  77. + fp.write("};\n\n")
  78. +
  79. + fp.write("/* Reindexing of NFC first characters. */\n")
  80. + fp.write("#define TOTAL_FIRST %d \n" % total_first)
  81. + fp.write("#define TOTAL_LAST %d \n" % total_last)
  82. + fp.write("struct reindex{int start;short count,index;};\n")
  83. + fp.write("static struct reindex nfc_first[] = {\n")
  84. for start,end in comp_first_ranges:
  85. - print(" { %d, %d, %d}," % (start,end-start,comp_first[start]), file=fp)
  86. - print(" {0,0,0}", file=fp)
  87. - print("};\n", file=fp)
  88. - print("static struct reindex nfc_last[] = {", file=fp)
  89. + fp.write(" { %d, %d, %d},\n" % (start,end-start,comp_first[start]))
  90. + fp.write(" {0,0,0}\n")
  91. + fp.write("};\n")
  92. + fp.write("static struct reindex nfc_last[] = {\n")
  93. for start,end in comp_last_ranges:
  94. - print(" { %d, %d, %d}," % (start,end-start,comp_last[start]), file=fp)
  95. - print(" {0,0,0}", file=fp)
  96. - print("};\n", file=fp)
  97. + fp.write(" { %d, %d, %d},\n" % (start,end-start,comp_last[start]))
  98. + fp.write(" {0,0,0}\n")
  99. + fp.write("};\n")
  100. # FIXME: <fl> the following tables could be made static, and
  101. # the support code moved into unicodedatabase.c
  102. - print("/* string literals */", file=fp)
  103. - print("const char *_PyUnicode_CategoryNames[] = {", file=fp)
  104. + fp.write("/* string literals */")
  105. + fp.write("const char *_PyUnicode_CategoryNames[] = {")
  106. for name in CATEGORY_NAMES:
  107. - print(" \"%s\"," % name, file=fp)
  108. - print(" NULL", file=fp)
  109. - print("};", file=fp)
  110. + fp.write(" \"%s\",\n" % name)
  111. + fp.write(" NULL\n")
  112. + fp.write("};\n")
  113. - print("const char *_PyUnicode_BidirectionalNames[] = {", file=fp)
  114. + fp.write("const char *_PyUnicode_BidirectionalNames[] = {\n")
  115. for name in BIDIRECTIONAL_NAMES:
  116. - print(" \"%s\"," % name, file=fp)
  117. - print(" NULL", file=fp)
  118. - print("};", file=fp)
  119. + fp.write(" \"%s\",\n" % name)
  120. + fp.write(" NULL\n")
  121. + fp.write("};\n")
  122. - print("const char *_PyUnicode_EastAsianWidthNames[] = {", file=fp)
  123. + fp.write("const char *_PyUnicode_EastAsianWidthNames[] = {\n")
  124. for name in EASTASIANWIDTH_NAMES:
  125. - print(" \"%s\"," % name, file=fp)
  126. - print(" NULL", file=fp)
  127. - print("};", file=fp)
  128. + fp.write(" \"%s\",\n" % name)
  129. + fp.write(" NULL\n")
  130. + fp.write("};\n")
  131. - print("static const char *decomp_prefix[] = {", file=fp)
  132. + fp.write("static const char *decomp_prefix[] = {\n")
  133. for name in decomp_prefix:
  134. - print(" \"%s\"," % name, file=fp)
  135. - print(" NULL", file=fp)
  136. - print("};", file=fp)
  137. + fp.write(" \"%s\",\n" % name)
  138. + fp.write(" NULL\n")
  139. + fp.write("};\n")
  140. # split record index table
  141. index1, index2, shift = splitbins(index, trace)
  142. - print("/* index tables for the database records */", file=fp)
  143. - print("#define SHIFT", shift, file=fp)
  144. + fp.write("/* index tables for the database records */\n")
  145. + fp.write("#define SHIFT %d\n" % shift)
  146. Array("index1", index1).dump(fp, trace)
  147. Array("index2", index2).dump(fp, trace)
  148. # split decomposition index table
  149. index1, index2, shift = splitbins(decomp_index, trace)
  150. - print("/* decomposition data */", file=fp)
  151. + fp.write("/* decomposition data */\n")
  152. Array("decomp_data", decomp_data).dump(fp, trace)
  153. - print("/* index tables for the decomposition data */", file=fp)
  154. - print("#define DECOMP_SHIFT", shift, file=fp)
  155. + fp.write("/* index tables for the decomposition data */\n")
  156. + fp.write("#define DECOMP_SHIFT %d\n" % shift)
  157. Array("decomp_index1", index1).dump(fp, trace)
  158. Array("decomp_index2", index2).dump(fp, trace)
  159. index, index2, shift = splitbins(comp_data, trace)
  160. - print("/* NFC pairs */", file=fp)
  161. - print("#define COMP_SHIFT", shift, file=fp)
  162. + fp.write("/* NFC pairs */\n")
  163. + fp.write("#define COMP_SHIFT %d\n" % shift)
  164. Array("comp_index", index).dump(fp, trace)
  165. Array("comp_data", index2).dump(fp, trace)
  166. @@ -316,30 +314,30 @@
  167. index[i] = cache[record] = len(records)
  168. records.append(record)
  169. index1, index2, shift = splitbins(index, trace)
  170. - print("static const change_record change_records_%s[] = {" % cversion, file=fp)
  171. + fp.write("static const change_record change_records_%s[] = {\n" % cversion)
  172. for record in records:
  173. - print("\t{ %s }," % ", ".join(map(str,record)), file=fp)
  174. - print("};", file=fp)
  175. - Array("changes_%s_index" % cversion, index1).dump(fp, trace)
  176. - Array("changes_%s_data" % cversion, index2).dump(fp, trace)
  177. - print("static const change_record* get_change_%s(Py_UCS4 n)" % cversion, file=fp)
  178. - print("{", file=fp)
  179. - print("\tint index;", file=fp)
  180. - print("\tif (n >= 0x110000) index = 0;", file=fp)
  181. - print("\telse {", file=fp)
  182. - print("\t\tindex = changes_%s_index[n>>%d];" % (cversion, shift), file=fp)
  183. - print("\t\tindex = changes_%s_data[(index<<%d)+(n & %d)];" % \
  184. - (cversion, shift, ((1<<shift)-1)), file=fp)
  185. - print("\t}", file=fp)
  186. - print("\treturn change_records_%s+index;" % cversion, file=fp)
  187. - print("}\n", file=fp)
  188. - print("static Py_UCS4 normalization_%s(Py_UCS4 n)" % cversion, file=fp)
  189. - print("{", file=fp)
  190. - print("\tswitch(n) {", file=fp)
  191. + fp.write("\t{ %s },\n" % ", ".join(map(str,record)))
  192. + fp.write("};\n")
  193. + Array("changes_%s_index\n" % cversion, index1).dump(fp, trace)
  194. + Array("changes_%s_data\n" % cversion, index2).dump(fp, trace)
  195. + fp.write("static const change_record* get_change_%s(Py_UCS4 n)\n" % cversion)
  196. + fp.write("{\n")
  197. + fp.write("\tint index;\n")
  198. + fp.write("\tif (n >= 0x110000) index = 0;\n")
  199. + fp.write("\telse {\n")
  200. + fp.write("\t\tindex = changes_%s_index[n>>%d];\n" % (cversion, shift))
  201. + fp.write("\t\tindex = changes_%s_data[(index<<%d)+(n & %d)];\n" % \
  202. + (cversion, shift, ((1<<shift)-1)))
  203. + fp.write("\t}\n")
  204. + fp.write("\treturn change_records_%s+index;\n" % cversion)
  205. + fp.write("}\n\n")
  206. + fp.write("static Py_UCS4 normalization_%s(Py_UCS4 n)\n" % cversion)
  207. + fp.write("{\n")
  208. + fp.write("\tswitch(n) {\n")
  209. for k, v in normalization:
  210. - print("\tcase %s: return 0x%s;" % (hex(k), v), file=fp)
  211. - print("\tdefault: return 0;", file=fp)
  212. - print("\t}\n}\n", file=fp)
  213. + fp.write("\tcase %s: return 0x%s;\n" % (hex(k), v))
  214. + fp.write("\tdefault: return 0;\n")
  215. + fp.write("\t}\n}\n\n")
  216. fp.close()
  217. @@ -350,7 +348,7 @@
  218. FILE = "Objects/unicodetype_db.h"
  219. - print("--- Preparing", FILE, "...")
  220. + print "--- Preparing", FILE, "..."
  221. # extract unicode types
  222. dummy = (0, 0, 0, 0, 0, 0)
  223. @@ -433,25 +431,25 @@
  224. table.append(item)
  225. index[char] = i
  226. - print(len(table), "unique character type entries")
  227. + print len(table), "unique character type entries"
  228. - print("--- Writing", FILE, "...")
  229. + print "--- Writing", FILE, "..."
  230. fp = open(FILE, "w")
  231. - print("/* this file was generated by %s %s */" % (SCRIPT, VERSION), file=fp)
  232. - print(file=fp)
  233. - print("/* a list of unique character type descriptors */", file=fp)
  234. - print("const _PyUnicode_TypeRecord _PyUnicode_TypeRecords[] = {", file=fp)
  235. + fp.write("/* this file was generated by %s %s */\n" % (SCRIPT, VERSION))
  236. + fp.write("\n")
  237. + fp.write("/* a list of unique character type descriptors */\n")
  238. + fp.write("const _PyUnicode_TypeRecord _PyUnicode_TypeRecords[] = {\n")
  239. for item in table:
  240. - print(" {%d, %d, %d, %d, %d, %d}," % item, file=fp)
  241. - print("};", file=fp)
  242. - print(file=fp)
  243. + fp.write(" {%d, %d, %d, %d, %d, %d},\n" % item)
  244. + fp.write("};\n")
  245. + fp.write("\n")
  246. # split decomposition index table
  247. index1, index2, shift = splitbins(index, trace)
  248. - print("/* type indexes */", file=fp)
  249. - print("#define SHIFT", shift, file=fp)
  250. + fp.write("/* type indexes */\n")
  251. + fp.write("#define SHIFT %d\n" % shift)
  252. Array("index1", index1).dump(fp, trace)
  253. Array("index2", index2).dump(fp, trace)
  254. @@ -464,7 +462,7 @@
  255. FILE = "Modules/unicodename_db.h"
  256. - print("--- Preparing", FILE, "...")
  257. + print "--- Preparing", FILE, "..."
  258. # collect names
  259. names = [None] * len(unicode.chars)
  260. @@ -476,7 +474,7 @@
  261. if name and name[0] != "<":
  262. names[char] = name + chr(0)
  263. - print(len(list(n for n in names if n is not None)), "distinct names")
  264. + print len(list(n for n in names if n is not None)), "distinct names"
  265. # collect unique words from names (note that we differ between
  266. # words inside a sentence, and words ending a sentence. the
  267. @@ -497,7 +495,7 @@
  268. else:
  269. words[w] = [len(words)]
  270. - print(n, "words in text;", b, "bytes")
  271. + print n, "words in text;", b, "bytes"
  272. wordlist = list(words.items())
  273. @@ -511,19 +509,19 @@
  274. escapes = 0
  275. while escapes * 256 < len(wordlist):
  276. escapes = escapes + 1
  277. - print(escapes, "escapes")
  278. + print escapes, "escapes"
  279. short = 256 - escapes
  280. assert short > 0
  281. - print(short, "short indexes in lexicon")
  282. + print short, "short indexes in lexicon"
  283. # statistics
  284. n = 0
  285. for i in range(short):
  286. n = n + len(wordlist[i][1])
  287. - print(n, "short indexes in phrasebook")
  288. + print n, "short indexes in phrasebook"
  289. # pick the most commonly used words, and sort the rest on falling
  290. # length (to maximize overlap)
  291. @@ -592,29 +590,29 @@
  292. codehash = Hash("code", data, 47)
  293. - print("--- Writing", FILE, "...")
  294. + print "--- Writing", FILE, "..."
  295. fp = open(FILE, "w")
  296. - print("/* this file was generated by %s %s */" % (SCRIPT, VERSION), file=fp)
  297. - print(file=fp)
  298. - print("#define NAME_MAXLEN", 256, file=fp)
  299. - print(file=fp)
  300. - print("/* lexicon */", file=fp)
  301. + fp.write("/* this file was generated by %s %s */\n" % (SCRIPT, VERSION))
  302. + fp.write("\n")
  303. + fp.write("#define NAME_MAXLEN 256")
  304. + fp.write("\n")
  305. + fp.write("/* lexicon */\n")
  306. Array("lexicon", lexicon).dump(fp, trace)
  307. Array("lexicon_offset", lexicon_offset).dump(fp, trace)
  308. # split decomposition index table
  309. offset1, offset2, shift = splitbins(phrasebook_offset, trace)
  310. - print("/* code->name phrasebook */", file=fp)
  311. - print("#define phrasebook_shift", shift, file=fp)
  312. - print("#define phrasebook_short", short, file=fp)
  313. + fp.write("/* code->name phrasebook */\n")
  314. + fp.write("#define phrasebook_shift %d\n" % shift)
  315. + fp.write("#define phrasebook_short %d\n" % short)
  316. Array("phrasebook", phrasebook).dump(fp, trace)
  317. Array("phrasebook_offset1", offset1).dump(fp, trace)
  318. Array("phrasebook_offset2", offset2).dump(fp, trace)
  319. - print("/* name->code dictionary */", file=fp)
  320. + fp.write("/* name->code dictionary */\n")
  321. codehash.dump(fp, trace)
  322. fp.close()
  323. @@ -868,7 +866,7 @@
  324. else:
  325. raise AssertionError("ran out of polynomials")
  326. - print(size, "slots in hash table")
  327. + print size, "slots in hash table"
  328. table = [None] * size
  329. @@ -900,7 +898,7 @@
  330. if incr > mask:
  331. incr = incr ^ poly
  332. - print(n, "collisions")
  333. + print n, "collisions"
  334. self.collisions = n
  335. for i in range(len(table)):
  336. @@ -931,8 +929,6 @@
  337. def dump(self, file, trace=0):
  338. # write data to file, as a C array
  339. size = getsize(self.data)
  340. - if trace:
  341. - print(self.name+":", size*len(self.data), "bytes", file=sys.stderr)
  342. file.write("static ")
  343. if size == 1:
  344. file.write("unsigned char")
  345. @@ -980,12 +976,6 @@
  346. """
  347. import sys
  348. - if trace:
  349. - def dump(t1, t2, shift, bytes):
  350. - print("%d+%d bins at shift %d; %d bytes" % (
  351. - len(t1), len(t2), shift, bytes), file=sys.stderr)
  352. - print("Size of original table:", len(t)*getsize(t), \
  353. - "bytes", file=sys.stderr)
  354. n = len(t)-1 # last valid index
  355. maxshift = 0 # the most we can shift n and still have something left
  356. if n > 0:
  357. @@ -993,7 +983,7 @@
  358. n >>= 1
  359. maxshift += 1
  360. del n
  361. - bytes = sys.maxsize # smallest total size so far
  362. + bytes_size = 2**31 - 1 # smallest total size so far
  363. t = tuple(t) # so slices can be dict keys
  364. for shift in range(maxshift + 1):
  365. t1 = []
  366. @@ -1010,15 +1000,10 @@
  367. t1.append(index >> shift)
  368. # determine memory size
  369. b = len(t1)*getsize(t1) + len(t2)*getsize(t2)
  370. - if trace > 1:
  371. - dump(t1, t2, shift, b)
  372. - if b < bytes:
  373. + if b < bytes_size:
  374. best = t1, t2, shift
  375. - bytes = b
  376. + bytes_size = b
  377. t1, t2, shift = best
  378. - if trace:
  379. - print("Best:", end=' ', file=sys.stderr)
  380. - dump(t1, t2, shift, bytes)
  381. if __debug__:
  382. # exhaustively verify that the decomposition is correct
  383. mask = ~((~0) << shift) # i.e., low-bit mask of shift bits
  384. --- Python-3.1.5/Lib/token.py 2012-04-10 09:25:36.000000000 +1000
  385. +++ Python-3.1.5/Lib/token.py 2022-07-13 14:13:37.893821468 +1000
  386. @@ -93,11 +93,7 @@
  387. outFileName = "Lib/token.py"
  388. if len(args) > 1:
  389. outFileName = args[1]
  390. - try:
  391. - fp = open(inFileName)
  392. - except IOError as err:
  393. - sys.stdout.write("I/O error: %s\n" % str(err))
  394. - sys.exit(1)
  395. + fp = open(inFileName)
  396. lines = fp.read().split("\n")
  397. fp.close()
  398. prog = re.compile(
  399. @@ -114,7 +110,7 @@
  400. # load the output skeleton from the target:
  401. try:
  402. fp = open(outFileName)
  403. - except IOError as err:
  404. + except IOError:
  405. sys.stderr.write("I/O error: %s\n" % str(err))
  406. sys.exit(2)
  407. format = fp.read().split("\n")
  408. @@ -131,7 +127,7 @@
  409. format[start:end] = lines
  410. try:
  411. fp = open(outFileName, 'w')
  412. - except IOError as err:
  413. + except IOError:
  414. sys.stderr.write("I/O error: %s\n" % str(err))
  415. sys.exit(4)
  416. fp.write("\n".join(format))