Compare commits

...

No commits in common. 'c8' and 'c8-beta' have entirely different histories.
c8 ... c8-beta

@ -1,90 +0,0 @@
From 87acab66e124912549fbc3151f27ca7fae76386c Mon Sep 17 00:00:00 2001
From: Serhiy Storchaka <storchaka@gmail.com>
Date: Tue, 23 Apr 2024 19:54:00 +0200
Subject: [PATCH] gh-115133: Fix tests for XMLPullParser with Expat 2.6.0
Feeding the parser by too small chunks defers parsing to prevent
CVE-2023-52425. Future versions of Expat may be more reactive.
(cherry picked from commit 4a08e7b3431cd32a0daf22a33421cd3035343dc4)
---
Lib/test/test_xml_etree.py | 53 +++++++++++--------
...-02-08-14-21-28.gh-issue-115133.ycl4ko.rst | 2 +
2 files changed, 33 insertions(+), 22 deletions(-)
create mode 100644 Misc/NEWS.d/next/Library/2024-02-08-14-21-28.gh-issue-115133.ycl4ko.rst
diff --git a/Lib/test/test_xml_etree.py b/Lib/test/test_xml_etree.py
index acaa519..c01af47 100644
--- a/Lib/test/test_xml_etree.py
+++ b/Lib/test/test_xml_etree.py
@@ -1044,28 +1044,37 @@ class XMLPullParserTest(unittest.TestCase):
self.assertEqual([(action, elem.tag) for action, elem in events],
expected)
- def test_simple_xml(self):
- for chunk_size in (None, 1, 5):
- with self.subTest(chunk_size=chunk_size):
- parser = ET.XMLPullParser()
- self.assert_event_tags(parser, [])
- self._feed(parser, "<!-- comment -->\n", chunk_size)
- self.assert_event_tags(parser, [])
- self._feed(parser,
- "<root>\n <element key='value'>text</element",
- chunk_size)
- self.assert_event_tags(parser, [])
- self._feed(parser, ">\n", chunk_size)
- self.assert_event_tags(parser, [('end', 'element')])
- self._feed(parser, "<element>text</element>tail\n", chunk_size)
- self._feed(parser, "<empty-element/>\n", chunk_size)
- self.assert_event_tags(parser, [
- ('end', 'element'),
- ('end', 'empty-element'),
- ])
- self._feed(parser, "</root>\n", chunk_size)
- self.assert_event_tags(parser, [('end', 'root')])
- self.assertIsNone(parser.close())
+ def test_simple_xml(self, chunk_size=None):
+ parser = ET.XMLPullParser()
+ self.assert_event_tags(parser, [])
+ self._feed(parser, "<!-- comment -->\n", chunk_size)
+ self.assert_event_tags(parser, [])
+ self._feed(parser,
+ "<root>\n <element key='value'>text</element",
+ chunk_size)
+ self.assert_event_tags(parser, [])
+ self._feed(parser, ">\n", chunk_size)
+ self.assert_event_tags(parser, [('end', 'element')])
+ self._feed(parser, "<element>text</element>tail\n", chunk_size)
+ self._feed(parser, "<empty-element/>\n", chunk_size)
+ self.assert_event_tags(parser, [
+ ('end', 'element'),
+ ('end', 'empty-element'),
+ ])
+ self._feed(parser, "</root>\n", chunk_size)
+ self.assert_event_tags(parser, [('end', 'root')])
+ self.assertIsNone(parser.close())
+
+ @unittest.expectedFailure
+ def test_simple_xml_chunk_1(self):
+ self.test_simple_xml(chunk_size=1)
+
+ @unittest.expectedFailure
+ def test_simple_xml_chunk_5(self):
+ self.test_simple_xml(chunk_size=5)
+
+ def test_simple_xml_chunk_22(self):
+ self.test_simple_xml(chunk_size=22)
def test_feed_while_iterating(self):
parser = ET.XMLPullParser()
diff --git a/Misc/NEWS.d/next/Library/2024-02-08-14-21-28.gh-issue-115133.ycl4ko.rst b/Misc/NEWS.d/next/Library/2024-02-08-14-21-28.gh-issue-115133.ycl4ko.rst
new file mode 100644
index 0000000..6f10152
--- /dev/null
+++ b/Misc/NEWS.d/next/Library/2024-02-08-14-21-28.gh-issue-115133.ycl4ko.rst
@@ -0,0 +1,2 @@
+Fix tests for :class:`~xml.etree.ElementTree.XMLPullParser` with Expat
+2.6.0.
--
2.44.0

@ -1,291 +0,0 @@
From 82f1ea4b72be40f58fd0a9a37f8d8d2f7d16f9e0 Mon Sep 17 00:00:00 2001
From: Lumir Balhar <lbalhar@redhat.com>
Date: Wed, 24 Apr 2024 00:19:23 +0200
Subject: [PATCH] CVE-2023-6597
Co-authored-by: Søren Løvborg <sorenl@unity3d.com>
Co-authored-by: Serhiy Storchaka <storchaka@gmail.com>
---
Lib/tempfile.py | 44 +++++++++-
Lib/test/test_tempfile.py | 166 +++++++++++++++++++++++++++++++++++---
2 files changed, 199 insertions(+), 11 deletions(-)
diff --git a/Lib/tempfile.py b/Lib/tempfile.py
index 2cb5434..d79b70c 100644
--- a/Lib/tempfile.py
+++ b/Lib/tempfile.py
@@ -276,6 +276,23 @@ def _mkstemp_inner(dir, pre, suf, flags, output_type):
"No usable temporary file name found")
+def _dont_follow_symlinks(func, path, *args):
+ # Pass follow_symlinks=False, unless not supported on this platform.
+ if func in _os.supports_follow_symlinks:
+ func(path, *args, follow_symlinks=False)
+ elif _os.name == 'nt' or not _os.path.islink(path):
+ func(path, *args)
+
+
+def _resetperms(path):
+ try:
+ chflags = _os.chflags
+ except AttributeError:
+ pass
+ else:
+ _dont_follow_symlinks(chflags, path, 0)
+ _dont_follow_symlinks(_os.chmod, path, 0o700)
+
# User visible interfaces.
def gettempprefix():
@@ -794,9 +811,32 @@ class TemporaryDirectory(object):
self, self._cleanup, self.name,
warn_message="Implicitly cleaning up {!r}".format(self))
+ @classmethod
+ def _rmtree(cls, name):
+ def onerror(func, path, exc_info):
+ if issubclass(exc_info[0], PermissionError):
+ try:
+ if path != name:
+ _resetperms(_os.path.dirname(path))
+ _resetperms(path)
+
+ try:
+ _os.unlink(path)
+ # PermissionError is raised on FreeBSD for directories
+ except (IsADirectoryError, PermissionError):
+ cls._rmtree(path)
+ except FileNotFoundError:
+ pass
+ elif issubclass(exc_info[0], FileNotFoundError):
+ pass
+ else:
+ raise
+
+ _shutil.rmtree(name, onerror=onerror)
+
@classmethod
def _cleanup(cls, name, warn_message):
- _shutil.rmtree(name)
+ cls._rmtree(name)
_warnings.warn(warn_message, ResourceWarning)
def __repr__(self):
@@ -810,4 +850,4 @@ class TemporaryDirectory(object):
def cleanup(self):
if self._finalizer.detach():
- _shutil.rmtree(self.name)
+ self._rmtree(self.name)
diff --git a/Lib/test/test_tempfile.py b/Lib/test/test_tempfile.py
index 710756b..c5560e1 100644
--- a/Lib/test/test_tempfile.py
+++ b/Lib/test/test_tempfile.py
@@ -1298,19 +1298,25 @@ class NulledModules:
class TestTemporaryDirectory(BaseTestCase):
"""Test TemporaryDirectory()."""
- def do_create(self, dir=None, pre="", suf="", recurse=1):
+ def do_create(self, dir=None, pre="", suf="", recurse=1, dirs=1, files=1):
if dir is None:
dir = tempfile.gettempdir()
tmp = tempfile.TemporaryDirectory(dir=dir, prefix=pre, suffix=suf)
self.nameCheck(tmp.name, dir, pre, suf)
- # Create a subdirectory and some files
- if recurse:
- d1 = self.do_create(tmp.name, pre, suf, recurse-1)
- d1.name = None
- with open(os.path.join(tmp.name, "test.txt"), "wb") as f:
- f.write(b"Hello world!")
+ self.do_create2(tmp.name, recurse, dirs, files)
return tmp
+ def do_create2(self, path, recurse=1, dirs=1, files=1):
+ # Create subdirectories and some files
+ if recurse:
+ for i in range(dirs):
+ name = os.path.join(path, "dir%d" % i)
+ os.mkdir(name)
+ self.do_create2(name, recurse-1, dirs, files)
+ for i in range(files):
+ with open(os.path.join(path, "test%d.txt" % i), "wb") as f:
+ f.write(b"Hello world!")
+
def test_mkdtemp_failure(self):
# Check no additional exception if mkdtemp fails
# Previously would raise AttributeError instead
@@ -1350,11 +1356,108 @@ class TestTemporaryDirectory(BaseTestCase):
"TemporaryDirectory %s exists after cleanup" % d1.name)
self.assertTrue(os.path.exists(d2.name),
"Directory pointed to by a symlink was deleted")
- self.assertEqual(os.listdir(d2.name), ['test.txt'],
+ self.assertEqual(os.listdir(d2.name), ['test0.txt'],
"Contents of the directory pointed to by a symlink "
"were deleted")
d2.cleanup()
+ @support.skip_unless_symlink
+ def test_cleanup_with_symlink_modes(self):
+ # cleanup() should not follow symlinks when fixing mode bits (#91133)
+ with self.do_create(recurse=0) as d2:
+ file1 = os.path.join(d2, 'file1')
+ open(file1, 'wb').close()
+ dir1 = os.path.join(d2, 'dir1')
+ os.mkdir(dir1)
+ for mode in range(8):
+ mode <<= 6
+ with self.subTest(mode=format(mode, '03o')):
+ def test(target, target_is_directory):
+ d1 = self.do_create(recurse=0)
+ symlink = os.path.join(d1.name, 'symlink')
+ os.symlink(target, symlink,
+ target_is_directory=target_is_directory)
+ try:
+ os.chmod(symlink, mode, follow_symlinks=False)
+ except NotImplementedError:
+ pass
+ try:
+ os.chmod(symlink, mode)
+ except FileNotFoundError:
+ pass
+ os.chmod(d1.name, mode)
+ d1.cleanup()
+ self.assertFalse(os.path.exists(d1.name))
+
+ with self.subTest('nonexisting file'):
+ test('nonexisting', target_is_directory=False)
+ with self.subTest('nonexisting dir'):
+ test('nonexisting', target_is_directory=True)
+
+ with self.subTest('existing file'):
+ os.chmod(file1, mode)
+ old_mode = os.stat(file1).st_mode
+ test(file1, target_is_directory=False)
+ new_mode = os.stat(file1).st_mode
+ self.assertEqual(new_mode, old_mode,
+ '%03o != %03o' % (new_mode, old_mode))
+
+ with self.subTest('existing dir'):
+ os.chmod(dir1, mode)
+ old_mode = os.stat(dir1).st_mode
+ test(dir1, target_is_directory=True)
+ new_mode = os.stat(dir1).st_mode
+ self.assertEqual(new_mode, old_mode,
+ '%03o != %03o' % (new_mode, old_mode))
+
+ @unittest.skipUnless(hasattr(os, 'chflags'), 'requires os.chflags')
+ @support.skip_unless_symlink
+ def test_cleanup_with_symlink_flags(self):
+ # cleanup() should not follow symlinks when fixing flags (#91133)
+ flags = stat.UF_IMMUTABLE | stat.UF_NOUNLINK
+ self.check_flags(flags)
+
+ with self.do_create(recurse=0) as d2:
+ file1 = os.path.join(d2, 'file1')
+ open(file1, 'wb').close()
+ dir1 = os.path.join(d2, 'dir1')
+ os.mkdir(dir1)
+ def test(target, target_is_directory):
+ d1 = self.do_create(recurse=0)
+ symlink = os.path.join(d1.name, 'symlink')
+ os.symlink(target, symlink,
+ target_is_directory=target_is_directory)
+ try:
+ os.chflags(symlink, flags, follow_symlinks=False)
+ except NotImplementedError:
+ pass
+ try:
+ os.chflags(symlink, flags)
+ except FileNotFoundError:
+ pass
+ os.chflags(d1.name, flags)
+ d1.cleanup()
+ self.assertFalse(os.path.exists(d1.name))
+
+ with self.subTest('nonexisting file'):
+ test('nonexisting', target_is_directory=False)
+ with self.subTest('nonexisting dir'):
+ test('nonexisting', target_is_directory=True)
+
+ with self.subTest('existing file'):
+ os.chflags(file1, flags)
+ old_flags = os.stat(file1).st_flags
+ test(file1, target_is_directory=False)
+ new_flags = os.stat(file1).st_flags
+ self.assertEqual(new_flags, old_flags)
+
+ with self.subTest('existing dir'):
+ os.chflags(dir1, flags)
+ old_flags = os.stat(dir1).st_flags
+ test(dir1, target_is_directory=True)
+ new_flags = os.stat(dir1).st_flags
+ self.assertEqual(new_flags, old_flags)
+
@support.cpython_only
def test_del_on_collection(self):
# A TemporaryDirectory is deleted when garbage collected
@@ -1385,7 +1488,7 @@ class TestTemporaryDirectory(BaseTestCase):
tmp2 = os.path.join(tmp.name, 'test_dir')
os.mkdir(tmp2)
- with open(os.path.join(tmp2, "test.txt"), "w") as f:
+ with open(os.path.join(tmp2, "test0.txt"), "w") as f:
f.write("Hello world!")
{mod}.tmp = tmp
@@ -1453,6 +1556,51 @@ class TestTemporaryDirectory(BaseTestCase):
self.assertEqual(name, d.name)
self.assertFalse(os.path.exists(name))
+ def test_modes(self):
+ for mode in range(8):
+ mode <<= 6
+ with self.subTest(mode=format(mode, '03o')):
+ d = self.do_create(recurse=3, dirs=2, files=2)
+ with d:
+ # Change files and directories mode recursively.
+ for root, dirs, files in os.walk(d.name, topdown=False):
+ for name in files:
+ os.chmod(os.path.join(root, name), mode)
+ os.chmod(root, mode)
+ d.cleanup()
+ self.assertFalse(os.path.exists(d.name))
+
+ def check_flags(self, flags):
+ # skip the test if these flags are not supported (ex: FreeBSD 13)
+ filename = support.TESTFN
+ try:
+ open(filename, "w").close()
+ try:
+ os.chflags(filename, flags)
+ except OSError as exc:
+ # "OSError: [Errno 45] Operation not supported"
+ self.skipTest(f"chflags() doesn't support flags "
+ f"{flags:#b}: {exc}")
+ else:
+ os.chflags(filename, 0)
+ finally:
+ support.unlink(filename)
+
+ @unittest.skipUnless(hasattr(os, 'chflags'), 'requires os.lchflags')
+ def test_flags(self):
+ flags = stat.UF_IMMUTABLE | stat.UF_NOUNLINK
+ self.check_flags(flags)
+
+ d = self.do_create(recurse=3, dirs=2, files=2)
+ with d:
+ # Change files and directories flags recursively.
+ for root, dirs, files in os.walk(d.name, topdown=False):
+ for name in files:
+ os.chflags(os.path.join(root, name), flags)
+ os.chflags(root, flags)
+ d.cleanup()
+ self.assertFalse(os.path.exists(d.name))
+
if __name__ == "__main__":
unittest.main()
--
2.44.0

@ -1,346 +0,0 @@
From 066df4fd454d6ff9be66e80b2a65995b10af174f Mon Sep 17 00:00:00 2001
From: John Jolly <john.jolly@gmail.com>
Date: Tue, 30 Jan 2018 01:51:35 -0700
Subject: [PATCH] bpo-22908: Add seek and tell functionality to ZipExtFile
(GH-4966)
This allows for nested zip files, tar files within zip files, zip files within tar files, etc.
Contributed by: John Jolly
---
Doc/library/zipfile.rst | 6 +-
Lib/test/test_zipfile.py | 34 ++++++++
Lib/zipfile.py | 82 +++++++++++++++++++
.../2017-12-21-22-00-11.bpo-22908.cVm89I.rst | 2 +
4 files changed, 121 insertions(+), 3 deletions(-)
create mode 100644 Misc/NEWS.d/next/Library/2017-12-21-22-00-11.bpo-22908.cVm89I.rst
diff --git a/Doc/library/zipfile.rst b/Doc/library/zipfile.rst
index d58efe0b417516..7c9a8c80225491 100644
--- a/Doc/library/zipfile.rst
+++ b/Doc/library/zipfile.rst
@@ -246,9 +246,9 @@ ZipFile Objects
With *mode* ``'r'`` the file-like object
(``ZipExtFile``) is read-only and provides the following methods:
:meth:`~io.BufferedIOBase.read`, :meth:`~io.IOBase.readline`,
- :meth:`~io.IOBase.readlines`, :meth:`__iter__`,
- :meth:`~iterator.__next__`. These objects can operate independently of
- the ZipFile.
+ :meth:`~io.IOBase.readlines`, :meth:`~io.IOBase.seek`,
+ :meth:`~io.IOBase.tell`, :meth:`__iter__`, :meth:`~iterator.__next__`.
+ These objects can operate independently of the ZipFile.
With ``mode='w'``, a writable file handle is returned, which supports the
:meth:`~io.BufferedIOBase.write` method. While a writable file handle is open,
diff --git a/Lib/test/test_zipfile.py b/Lib/test/test_zipfile.py
index 94db858a1517c4..61c3e349a69ef4 100644
--- a/Lib/test/test_zipfile.py
+++ b/Lib/test/test_zipfile.py
@@ -1628,6 +1628,40 @@ def test_open_conflicting_handles(self):
self.assertEqual(zipf.read('baz'), msg3)
self.assertEqual(zipf.namelist(), ['foo', 'bar', 'baz'])
+ def test_seek_tell(self):
+ # Test seek functionality
+ txt = b"Where's Bruce?"
+ bloc = txt.find(b"Bruce")
+ # Check seek on a file
+ with zipfile.ZipFile(TESTFN, "w") as zipf:
+ zipf.writestr("foo.txt", txt)
+ with zipfile.ZipFile(TESTFN, "r") as zipf:
+ with zipf.open("foo.txt", "r") as fp:
+ fp.seek(bloc, os.SEEK_SET)
+ self.assertEqual(fp.tell(), bloc)
+ fp.seek(-bloc, os.SEEK_CUR)
+ self.assertEqual(fp.tell(), 0)
+ fp.seek(bloc, os.SEEK_CUR)
+ self.assertEqual(fp.tell(), bloc)
+ self.assertEqual(fp.read(5), txt[bloc:bloc+5])
+ fp.seek(0, os.SEEK_END)
+ self.assertEqual(fp.tell(), len(txt))
+ # Check seek on memory file
+ data = io.BytesIO()
+ with zipfile.ZipFile(data, mode="w") as zipf:
+ zipf.writestr("foo.txt", txt)
+ with zipfile.ZipFile(data, mode="r") as zipf:
+ with zipf.open("foo.txt", "r") as fp:
+ fp.seek(bloc, os.SEEK_SET)
+ self.assertEqual(fp.tell(), bloc)
+ fp.seek(-bloc, os.SEEK_CUR)
+ self.assertEqual(fp.tell(), 0)
+ fp.seek(bloc, os.SEEK_CUR)
+ self.assertEqual(fp.tell(), bloc)
+ self.assertEqual(fp.read(5), txt[bloc:bloc+5])
+ fp.seek(0, os.SEEK_END)
+ self.assertEqual(fp.tell(), len(txt))
+
def tearDown(self):
unlink(TESTFN)
unlink(TESTFN2)
diff --git a/Lib/zipfile.py b/Lib/zipfile.py
index f9db45f58a2bde..5df7b1bf75b9d9 100644
--- a/Lib/zipfile.py
+++ b/Lib/zipfile.py
@@ -696,6 +696,18 @@ def __init__(self, file, pos, close, lock, writing):
self._close = close
self._lock = lock
self._writing = writing
+ self.seekable = file.seekable
+ self.tell = file.tell
+
+ def seek(self, offset, whence=0):
+ with self._lock:
+ if self.writing():
+ raise ValueError("Can't reposition in the ZIP file while "
+ "there is an open writing handle on it. "
+ "Close the writing handle before trying to read.")
+ self._file.seek(self._pos)
+ self._pos = self._file.tell()
+ return self._pos
def read(self, n=-1):
with self._lock:
@@ -746,6 +758,9 @@ class ZipExtFile(io.BufferedIOBase):
# Read from compressed files in 4k blocks.
MIN_READ_SIZE = 4096
+ # Chunk size to read during seek
+ MAX_SEEK_READ = 1 << 24
+
def __init__(self, fileobj, mode, zipinfo, decrypter=None,
close_fileobj=False):
self._fileobj = fileobj
@@ -778,6 +793,17 @@ def __init__(self, fileobj, mode, zipinfo, decrypter=None,
else:
self._expected_crc = None
+ self._seekable = False
+ try:
+ if fileobj.seekable():
+ self._orig_compress_start = fileobj.tell()
+ self._orig_compress_size = zipinfo.compress_size
+ self._orig_file_size = zipinfo.file_size
+ self._orig_start_crc = self._running_crc
+ self._seekable = True
+ except AttributeError:
+ pass
+
def __repr__(self):
result = ['<%s.%s' % (self.__class__.__module__,
self.__class__.__qualname__)]
@@ -963,6 +989,62 @@ def close(self):
finally:
super().close()
+ def seekable(self):
+ return self._seekable
+
+ def seek(self, offset, whence=0):
+ if not self._seekable:
+ raise io.UnsupportedOperation("underlying stream is not seekable")
+ curr_pos = self.tell()
+ if whence == 0: # Seek from start of file
+ new_pos = offset
+ elif whence == 1: # Seek from current position
+ new_pos = curr_pos + offset
+ elif whence == 2: # Seek from EOF
+ new_pos = self._orig_file_size + offset
+ else:
+ raise ValueError("whence must be os.SEEK_SET (0), "
+ "os.SEEK_CUR (1), or os.SEEK_END (2)")
+
+ if new_pos > self._orig_file_size:
+ new_pos = self._orig_file_size
+
+ if new_pos < 0:
+ new_pos = 0
+
+ read_offset = new_pos - curr_pos
+ buff_offset = read_offset + self._offset
+
+ if buff_offset >= 0 and buff_offset < len(self._readbuffer):
+ # Just move the _offset index if the new position is in the _readbuffer
+ self._offset = buff_offset
+ read_offset = 0
+ elif read_offset < 0:
+ # Position is before the current position. Reset the ZipExtFile
+
+ self._fileobj.seek(self._orig_compress_start)
+ self._running_crc = self._orig_start_crc
+ self._compress_left = self._orig_compress_size
+ self._left = self._orig_file_size
+ self._readbuffer = b''
+ self._offset = 0
+ self._decompressor = zipfile._get_decompressor(self._compress_type)
+ self._eof = False
+ read_offset = new_pos
+
+ while read_offset > 0:
+ read_len = min(self.MAX_SEEK_READ, read_offset)
+ self.read(read_len)
+ read_offset -= read_len
+
+ return self.tell()
+
+ def tell(self):
+ if not self._seekable:
+ raise io.UnsupportedOperation("underlying stream is not seekable")
+ filepos = self._orig_file_size - self._left - len(self._readbuffer) + self._offset
+ return filepos
+
class _ZipWriteFile(io.BufferedIOBase):
def __init__(self, zf, zinfo, zip64):
diff --git a/Misc/NEWS.d/next/Library/2017-12-21-22-00-11.bpo-22908.cVm89I.rst b/Misc/NEWS.d/next/Library/2017-12-21-22-00-11.bpo-22908.cVm89I.rst
new file mode 100644
index 00000000000000..4f3cc0166019f1
--- /dev/null
+++ b/Misc/NEWS.d/next/Library/2017-12-21-22-00-11.bpo-22908.cVm89I.rst
@@ -0,0 +1,2 @@
+Added seek and tell to the ZipExtFile class. This only works if the file
+object used to open the zipfile is seekable.
From 55beb125db2942b5362454e05542e9661e964a65 Mon Sep 17 00:00:00 2001
From: Serhiy Storchaka <storchaka@gmail.com>
Date: Tue, 23 Apr 2024 14:29:31 +0200
Subject: [PATCH] gh-109858: Protect zipfile from "quoted-overlap" zipbomb
(GH-110016) (GH-113916)
Raise BadZipFile when try to read an entry that overlaps with other entry or
central directory.
(cherry picked from commit 66363b9a7b9fe7c99eba3a185b74c5fdbf842eba)
---
Lib/test/test_zipfile.py | 60 +++++++++++++++++++
Lib/zipfile.py | 12 ++++
...-09-28-13-15-51.gh-issue-109858.43e2dg.rst | 3 +
3 files changed, 75 insertions(+)
create mode 100644 Misc/NEWS.d/next/Library/2023-09-28-13-15-51.gh-issue-109858.43e2dg.rst
diff --git a/Lib/test/test_zipfile.py b/Lib/test/test_zipfile.py
index 7f82586..0379909 100644
--- a/Lib/test/test_zipfile.py
+++ b/Lib/test/test_zipfile.py
@@ -1644,6 +1644,66 @@ class OtherTests(unittest.TestCase):
fp.seek(0, os.SEEK_END)
self.assertEqual(fp.tell(), len(txt))
+ @requires_zlib
+ def test_full_overlap(self):
+ data = (
+ b'PK\x03\x04\x14\x00\x00\x00\x08\x00\xa0lH\x05\xe2\x1e'
+ b'8\xbb\x10\x00\x00\x00\t\x04\x00\x00\x01\x00\x00\x00a\xed'
+ b'\xc0\x81\x08\x00\x00\x00\xc00\xd6\xfbK\\d\x0b`P'
+ b'K\x01\x02\x14\x00\x14\x00\x00\x00\x08\x00\xa0lH\x05\xe2'
+ b'\x1e8\xbb\x10\x00\x00\x00\t\x04\x00\x00\x01\x00\x00\x00\x00'
+ b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00aPK'
+ b'\x01\x02\x14\x00\x14\x00\x00\x00\x08\x00\xa0lH\x05\xe2\x1e'
+ b'8\xbb\x10\x00\x00\x00\t\x04\x00\x00\x01\x00\x00\x00\x00\x00'
+ b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00bPK\x05'
+ b'\x06\x00\x00\x00\x00\x02\x00\x02\x00^\x00\x00\x00/\x00\x00'
+ b'\x00\x00\x00'
+ )
+ with zipfile.ZipFile(io.BytesIO(data), 'r') as zipf:
+ self.assertEqual(zipf.namelist(), ['a', 'b'])
+ zi = zipf.getinfo('a')
+ self.assertEqual(zi.header_offset, 0)
+ self.assertEqual(zi.compress_size, 16)
+ self.assertEqual(zi.file_size, 1033)
+ zi = zipf.getinfo('b')
+ self.assertEqual(zi.header_offset, 0)
+ self.assertEqual(zi.compress_size, 16)
+ self.assertEqual(zi.file_size, 1033)
+ self.assertEqual(len(zipf.read('a')), 1033)
+ with self.assertRaisesRegex(zipfile.BadZipFile, 'File name.*differ'):
+ zipf.read('b')
+
+ @requires_zlib
+ def test_quoted_overlap(self):
+ data = (
+ b'PK\x03\x04\x14\x00\x00\x00\x08\x00\xa0lH\x05Y\xfc'
+ b'8\x044\x00\x00\x00(\x04\x00\x00\x01\x00\x00\x00a\x00'
+ b'\x1f\x00\xe0\xffPK\x03\x04\x14\x00\x00\x00\x08\x00\xa0l'
+ b'H\x05\xe2\x1e8\xbb\x10\x00\x00\x00\t\x04\x00\x00\x01\x00'
+ b'\x00\x00b\xed\xc0\x81\x08\x00\x00\x00\xc00\xd6\xfbK\\'
+ b'd\x0b`PK\x01\x02\x14\x00\x14\x00\x00\x00\x08\x00\xa0'
+ b'lH\x05Y\xfc8\x044\x00\x00\x00(\x04\x00\x00\x01'
+ b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+ b'\x00aPK\x01\x02\x14\x00\x14\x00\x00\x00\x08\x00\xa0l'
+ b'H\x05\xe2\x1e8\xbb\x10\x00\x00\x00\t\x04\x00\x00\x01\x00'
+ b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00$\x00\x00\x00'
+ b'bPK\x05\x06\x00\x00\x00\x00\x02\x00\x02\x00^\x00\x00'
+ b'\x00S\x00\x00\x00\x00\x00'
+ )
+ with zipfile.ZipFile(io.BytesIO(data), 'r') as zipf:
+ self.assertEqual(zipf.namelist(), ['a', 'b'])
+ zi = zipf.getinfo('a')
+ self.assertEqual(zi.header_offset, 0)
+ self.assertEqual(zi.compress_size, 52)
+ self.assertEqual(zi.file_size, 1064)
+ zi = zipf.getinfo('b')
+ self.assertEqual(zi.header_offset, 36)
+ self.assertEqual(zi.compress_size, 16)
+ self.assertEqual(zi.file_size, 1033)
+ with self.assertRaisesRegex(zipfile.BadZipFile, 'Overlapped entries'):
+ zipf.read('a')
+ self.assertEqual(len(zipf.read('b')), 1033)
+
def tearDown(self):
unlink(TESTFN)
unlink(TESTFN2)
diff --git a/Lib/zipfile.py b/Lib/zipfile.py
index 0ab9fac..e6d7676 100644
--- a/Lib/zipfile.py
+++ b/Lib/zipfile.py
@@ -338,6 +338,7 @@ class ZipInfo (object):
'compress_size',
'file_size',
'_raw_time',
+ '_end_offset',
)
def __init__(self, filename="NoName", date_time=(1980,1,1,0,0,0)):
@@ -376,6 +377,7 @@ class ZipInfo (object):
self.volume = 0 # Volume number of file header
self.internal_attr = 0 # Internal attributes
self.external_attr = 0 # External file attributes
+ self._end_offset = None # Start of the next local header or central directory
# Other attributes are set by class ZipFile:
# header_offset Byte offset to the file header
# CRC CRC-32 of the uncompressed file
@@ -1346,6 +1348,12 @@ class ZipFile:
if self.debug > 2:
print("total", total)
+ end_offset = self.start_dir
+ for zinfo in sorted(self.filelist,
+ key=lambda zinfo: zinfo.header_offset,
+ reverse=True):
+ zinfo._end_offset = end_offset
+ end_offset = zinfo.header_offset
def namelist(self):
"""Return a list of file names in the archive."""
@@ -1500,6 +1508,10 @@ class ZipFile:
'File name in directory %r and header %r differ.'
% (zinfo.orig_filename, fname))
+ if (zinfo._end_offset is not None and
+ zef_file.tell() + zinfo.compress_size > zinfo._end_offset):
+ raise BadZipFile(f"Overlapped entries: {zinfo.orig_filename!r} (possible zip bomb)")
+
# check for encrypted flag & handle password
is_encrypted = zinfo.flag_bits & 0x1
zd = None
diff --git a/Misc/NEWS.d/next/Library/2023-09-28-13-15-51.gh-issue-109858.43e2dg.rst b/Misc/NEWS.d/next/Library/2023-09-28-13-15-51.gh-issue-109858.43e2dg.rst
new file mode 100644
index 0000000..be279ca
--- /dev/null
+++ b/Misc/NEWS.d/next/Library/2023-09-28-13-15-51.gh-issue-109858.43e2dg.rst
@@ -0,0 +1,3 @@
+Protect :mod:`zipfile` from "quoted-overlap" zipbomb. It now raises
+BadZipFile when try to read an entry that overlaps with other entry or
+central directory.
--
2.44.0

@ -1,356 +0,0 @@
From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
From: Petr Viktorin <encukou@gmail.com>
Date: Tue, 7 May 2024 11:58:20 +0200
Subject: [PATCH] 00431: CVE-2024-4032: incorrect IPv4 and IPv6 private ranges
Upstream issue: https://github.com/python/cpython/issues/113171
Backported from 3.8.
---
Doc/library/ipaddress.rst | 43 ++++++++-
Doc/tools/susp-ignored.csv | 8 ++
Lib/ipaddress.py | 95 +++++++++++++++----
Lib/test/test_ipaddress.py | 52 ++++++++++
...-03-14-01-38-44.gh-issue-113171.VFnObz.rst | 9 ++
5 files changed, 186 insertions(+), 21 deletions(-)
create mode 100644 Misc/NEWS.d/next/Library/2024-03-14-01-38-44.gh-issue-113171.VFnObz.rst
diff --git a/Doc/library/ipaddress.rst b/Doc/library/ipaddress.rst
index 4ce1ed1ced..18613babc9 100644
--- a/Doc/library/ipaddress.rst
+++ b/Doc/library/ipaddress.rst
@@ -166,18 +166,53 @@ write code that handles both IP versions correctly. Address objects are
.. attribute:: is_private
- ``True`` if the address is allocated for private networks. See
+ ``True`` if the address is defined as not globally reachable by
iana-ipv4-special-registry_ (for IPv4) or iana-ipv6-special-registry_
- (for IPv6).
+ (for IPv6) with the following exceptions:
+
+ * ``is_private`` is ``False`` for the shared address space (``100.64.0.0/10``)
+ * For IPv4-mapped IPv6-addresses the ``is_private`` value is determined by the
+ semantics of the underlying IPv4 addresses and the following condition holds
+ (see :attr:`IPv6Address.ipv4_mapped`)::
+
+ address.is_private == address.ipv4_mapped.is_private
+
+ ``is_private`` has value opposite to :attr:`is_global`, except for the shared address space
+ (``100.64.0.0/10`` range) where they are both ``False``.
+
+ .. versionchanged:: 3.8.20
+
+ Fixed some false positives and false negatives.
+
+ * ``192.0.0.0/24`` is considered private with the exception of ``192.0.0.9/32`` and
+ ``192.0.0.10/32`` (previously: only the ``192.0.0.0/29`` sub-range was considered private).
+ * ``64:ff9b:1::/48`` is considered private.
+ * ``2002::/16`` is considered private.
+ * There are exceptions within ``2001::/23`` (otherwise considered private): ``2001:1::1/128``,
+ ``2001:1::2/128``, ``2001:3::/32``, ``2001:4:112::/48``, ``2001:20::/28``, ``2001:30::/28``.
+ The exceptions are not considered private.
.. attribute:: is_global
- ``True`` if the address is allocated for public networks. See
+ ``True`` if the address is defined as globally reachable by
iana-ipv4-special-registry_ (for IPv4) or iana-ipv6-special-registry_
- (for IPv6).
+ (for IPv6) with the following exception:
+
+ For IPv4-mapped IPv6-addresses the ``is_private`` value is determined by the
+ semantics of the underlying IPv4 addresses and the following condition holds
+ (see :attr:`IPv6Address.ipv4_mapped`)::
+
+ address.is_global == address.ipv4_mapped.is_global
+
+ ``is_global`` has value opposite to :attr:`is_private`, except for the shared address space
+ (``100.64.0.0/10`` range) where they are both ``False``.
.. versionadded:: 3.4
+ .. versionchanged:: 3.8.20
+
+ Fixed some false positives and false negatives, see :attr:`is_private` for details.
+
.. attribute:: is_unspecified
``True`` if the address is unspecified. See :RFC:`5735` (for IPv4)
diff --git a/Doc/tools/susp-ignored.csv b/Doc/tools/susp-ignored.csv
index ed434ce77d..6bc0741b12 100644
--- a/Doc/tools/susp-ignored.csv
+++ b/Doc/tools/susp-ignored.csv
@@ -160,6 +160,14 @@ library/ipaddress,,:db00,2001:db00::0/24
library/ipaddress,,::,2001:db00::0/24
library/ipaddress,,:db00,2001:db00::0/ffff:ff00::
library/ipaddress,,::,2001:db00::0/ffff:ff00::
+library/ipaddress,,:ff9b,64:ff9b:1::/48
+library/ipaddress,,::,64:ff9b:1::/48
+library/ipaddress,,::,2001::
+library/ipaddress,,::,2001:1::
+library/ipaddress,,::,2001:3::
+library/ipaddress,,::,2001:4:112::
+library/ipaddress,,::,2001:20::
+library/ipaddress,,::,2001:30::
library/itertools,,:step,elements from seq[start:stop:step]
library/itertools,,:stop,elements from seq[start:stop:step]
library/logging.handlers,,:port,host:port
diff --git a/Lib/ipaddress.py b/Lib/ipaddress.py
index 98492136ca..55d4d62d70 100644
--- a/Lib/ipaddress.py
+++ b/Lib/ipaddress.py
@@ -1302,18 +1302,41 @@ class IPv4Address(_BaseV4, _BaseAddress):
@property
@functools.lru_cache()
def is_private(self):
- """Test if this address is allocated for private networks.
+ """``True`` if the address is defined as not globally reachable by
+ iana-ipv4-special-registry_ (for IPv4) or iana-ipv6-special-registry_
+ (for IPv6) with the following exceptions:
- Returns:
- A boolean, True if the address is reserved per
- iana-ipv4-special-registry.
+ * ``is_private`` is ``False`` for ``100.64.0.0/10``
+ * For IPv4-mapped IPv6-addresses the ``is_private`` value is determined by the
+ semantics of the underlying IPv4 addresses and the following condition holds
+ (see :attr:`IPv6Address.ipv4_mapped`)::
+ address.is_private == address.ipv4_mapped.is_private
+
+ ``is_private`` has value opposite to :attr:`is_global`, except for the ``100.64.0.0/10``
+ IPv4 range where they are both ``False``.
"""
- return any(self in net for net in self._constants._private_networks)
+ return (
+ any(self in net for net in self._constants._private_networks)
+ and all(self not in net for net in self._constants._private_networks_exceptions)
+ )
@property
@functools.lru_cache()
def is_global(self):
+ """``True`` if the address is defined as globally reachable by
+ iana-ipv4-special-registry_ (for IPv4) or iana-ipv6-special-registry_
+ (for IPv6) with the following exception:
+
+ For IPv4-mapped IPv6-addresses the ``is_private`` value is determined by the
+ semantics of the underlying IPv4 addresses and the following condition holds
+ (see :attr:`IPv6Address.ipv4_mapped`)::
+
+ address.is_global == address.ipv4_mapped.is_global
+
+ ``is_global`` has value opposite to :attr:`is_private`, except for the ``100.64.0.0/10``
+ IPv4 range where they are both ``False``.
+ """
return self not in self._constants._public_network and not self.is_private
@property
@@ -1548,13 +1571,15 @@ class _IPv4Constants:
_public_network = IPv4Network('100.64.0.0/10')
+ # Not globally reachable address blocks listed on
+ # https://www.iana.org/assignments/iana-ipv4-special-registry/iana-ipv4-special-registry.xhtml
_private_networks = [
IPv4Network('0.0.0.0/8'),
IPv4Network('10.0.0.0/8'),
IPv4Network('127.0.0.0/8'),
IPv4Network('169.254.0.0/16'),
IPv4Network('172.16.0.0/12'),
- IPv4Network('192.0.0.0/29'),
+ IPv4Network('192.0.0.0/24'),
IPv4Network('192.0.0.170/31'),
IPv4Network('192.0.2.0/24'),
IPv4Network('192.168.0.0/16'),
@@ -1565,6 +1590,11 @@ class _IPv4Constants:
IPv4Network('255.255.255.255/32'),
]
+ _private_networks_exceptions = [
+ IPv4Network('192.0.0.9/32'),
+ IPv4Network('192.0.0.10/32'),
+ ]
+
_reserved_network = IPv4Network('240.0.0.0/4')
_unspecified_address = IPv4Address('0.0.0.0')
@@ -1953,23 +1983,42 @@ class IPv6Address(_BaseV6, _BaseAddress):
@property
@functools.lru_cache()
def is_private(self):
- """Test if this address is allocated for private networks.
+ """``True`` if the address is defined as not globally reachable by
+ iana-ipv4-special-registry_ (for IPv4) or iana-ipv6-special-registry_
+ (for IPv6) with the following exceptions:
- Returns:
- A boolean, True if the address is reserved per
- iana-ipv6-special-registry.
+ * ``is_private`` is ``False`` for ``100.64.0.0/10``
+ * For IPv4-mapped IPv6-addresses the ``is_private`` value is determined by the
+ semantics of the underlying IPv4 addresses and the following condition holds
+ (see :attr:`IPv6Address.ipv4_mapped`)::
+ address.is_private == address.ipv4_mapped.is_private
+
+ ``is_private`` has value opposite to :attr:`is_global`, except for the ``100.64.0.0/10``
+ IPv4 range where they are both ``False``.
"""
- return any(self in net for net in self._constants._private_networks)
+ ipv4_mapped = self.ipv4_mapped
+ if ipv4_mapped is not None:
+ return ipv4_mapped.is_private
+ return (
+ any(self in net for net in self._constants._private_networks)
+ and all(self not in net for net in self._constants._private_networks_exceptions)
+ )
@property
def is_global(self):
- """Test if this address is allocated for public networks.
+ """``True`` if the address is defined as globally reachable by
+ iana-ipv4-special-registry_ (for IPv4) or iana-ipv6-special-registry_
+ (for IPv6) with the following exception:
- Returns:
- A boolean, true if the address is not reserved per
- iana-ipv6-special-registry.
+ For IPv4-mapped IPv6-addresses the ``is_private`` value is determined by the
+ semantics of the underlying IPv4 addresses and the following condition holds
+ (see :attr:`IPv6Address.ipv4_mapped`)::
+ address.is_global == address.ipv4_mapped.is_global
+
+ ``is_global`` has value opposite to :attr:`is_private`, except for the ``100.64.0.0/10``
+ IPv4 range where they are both ``False``.
"""
return not self.is_private
@@ -2236,19 +2285,31 @@ class _IPv6Constants:
_multicast_network = IPv6Network('ff00::/8')
+ # Not globally reachable address blocks listed on
+ # https://www.iana.org/assignments/iana-ipv6-special-registry/iana-ipv6-special-registry.xhtml
_private_networks = [
IPv6Network('::1/128'),
IPv6Network('::/128'),
IPv6Network('::ffff:0:0/96'),
+ IPv6Network('64:ff9b:1::/48'),
IPv6Network('100::/64'),
IPv6Network('2001::/23'),
- IPv6Network('2001:2::/48'),
IPv6Network('2001:db8::/32'),
- IPv6Network('2001:10::/28'),
+ # IANA says N/A, let's consider it not globally reachable to be safe
+ IPv6Network('2002::/16'),
IPv6Network('fc00::/7'),
IPv6Network('fe80::/10'),
]
+ _private_networks_exceptions = [
+ IPv6Network('2001:1::1/128'),
+ IPv6Network('2001:1::2/128'),
+ IPv6Network('2001:3::/32'),
+ IPv6Network('2001:4:112::/48'),
+ IPv6Network('2001:20::/28'),
+ IPv6Network('2001:30::/28'),
+ ]
+
_reserved_networks = [
IPv6Network('::/8'), IPv6Network('100::/8'),
IPv6Network('200::/7'), IPv6Network('400::/6'),
diff --git a/Lib/test/test_ipaddress.py b/Lib/test/test_ipaddress.py
index 7de444af4a..716846b2ae 100644
--- a/Lib/test/test_ipaddress.py
+++ b/Lib/test/test_ipaddress.py
@@ -1665,6 +1665,10 @@ class IpaddrUnitTest(unittest.TestCase):
self.assertEqual(True, ipaddress.ip_address(
'172.31.255.255').is_private)
self.assertEqual(False, ipaddress.ip_address('172.32.0.0').is_private)
+ self.assertFalse(ipaddress.ip_address('192.0.0.0').is_global)
+ self.assertTrue(ipaddress.ip_address('192.0.0.9').is_global)
+ self.assertTrue(ipaddress.ip_address('192.0.0.10').is_global)
+ self.assertFalse(ipaddress.ip_address('192.0.0.255').is_global)
self.assertEqual(True,
ipaddress.ip_address('169.254.100.200').is_link_local)
@@ -1680,6 +1684,40 @@ class IpaddrUnitTest(unittest.TestCase):
self.assertEqual(False, ipaddress.ip_address('128.0.0.0').is_loopback)
self.assertEqual(True, ipaddress.ip_network('0.0.0.0').is_unspecified)
+ def testPrivateNetworks(self):
+ self.assertEqual(True, ipaddress.ip_network("0.0.0.0/0").is_private)
+ self.assertEqual(False, ipaddress.ip_network("1.0.0.0/8").is_private)
+
+ self.assertEqual(True, ipaddress.ip_network("0.0.0.0/8").is_private)
+ self.assertEqual(True, ipaddress.ip_network("10.0.0.0/8").is_private)
+ self.assertEqual(True, ipaddress.ip_network("127.0.0.0/8").is_private)
+ self.assertEqual(True, ipaddress.ip_network("169.254.0.0/16").is_private)
+ self.assertEqual(True, ipaddress.ip_network("172.16.0.0/12").is_private)
+ self.assertEqual(True, ipaddress.ip_network("192.0.0.0/29").is_private)
+ self.assertEqual(False, ipaddress.ip_network("192.0.0.9/32").is_private)
+ self.assertEqual(True, ipaddress.ip_network("192.0.0.170/31").is_private)
+ self.assertEqual(True, ipaddress.ip_network("192.0.2.0/24").is_private)
+ self.assertEqual(True, ipaddress.ip_network("192.168.0.0/16").is_private)
+ self.assertEqual(True, ipaddress.ip_network("198.18.0.0/15").is_private)
+ self.assertEqual(True, ipaddress.ip_network("198.51.100.0/24").is_private)
+ self.assertEqual(True, ipaddress.ip_network("203.0.113.0/24").is_private)
+ self.assertEqual(True, ipaddress.ip_network("240.0.0.0/4").is_private)
+ self.assertEqual(True, ipaddress.ip_network("255.255.255.255/32").is_private)
+
+ self.assertEqual(False, ipaddress.ip_network("::/0").is_private)
+ self.assertEqual(False, ipaddress.ip_network("::ff/128").is_private)
+
+ self.assertEqual(True, ipaddress.ip_network("::1/128").is_private)
+ self.assertEqual(True, ipaddress.ip_network("::/128").is_private)
+ self.assertEqual(True, ipaddress.ip_network("::ffff:0:0/96").is_private)
+ self.assertEqual(True, ipaddress.ip_network("100::/64").is_private)
+ self.assertEqual(True, ipaddress.ip_network("2001:2::/48").is_private)
+ self.assertEqual(False, ipaddress.ip_network("2001:3::/48").is_private)
+ self.assertEqual(True, ipaddress.ip_network("2001:db8::/32").is_private)
+ self.assertEqual(True, ipaddress.ip_network("2001:10::/28").is_private)
+ self.assertEqual(True, ipaddress.ip_network("fc00::/7").is_private)
+ self.assertEqual(True, ipaddress.ip_network("fe80::/10").is_private)
+
def testReservedIpv6(self):
self.assertEqual(True, ipaddress.ip_network('ffff::').is_multicast)
@@ -1753,6 +1791,20 @@ class IpaddrUnitTest(unittest.TestCase):
self.assertEqual(True, ipaddress.ip_address('0::0').is_unspecified)
self.assertEqual(False, ipaddress.ip_address('::1').is_unspecified)
+ self.assertFalse(ipaddress.ip_address('64:ff9b:1::').is_global)
+ self.assertFalse(ipaddress.ip_address('2001::').is_global)
+ self.assertTrue(ipaddress.ip_address('2001:1::1').is_global)
+ self.assertTrue(ipaddress.ip_address('2001:1::2').is_global)
+ self.assertFalse(ipaddress.ip_address('2001:2::').is_global)
+ self.assertTrue(ipaddress.ip_address('2001:3::').is_global)
+ self.assertFalse(ipaddress.ip_address('2001:4::').is_global)
+ self.assertTrue(ipaddress.ip_address('2001:4:112::').is_global)
+ self.assertFalse(ipaddress.ip_address('2001:10::').is_global)
+ self.assertTrue(ipaddress.ip_address('2001:20::').is_global)
+ self.assertTrue(ipaddress.ip_address('2001:30::').is_global)
+ self.assertFalse(ipaddress.ip_address('2001:40::').is_global)
+ self.assertFalse(ipaddress.ip_address('2002::').is_global)
+
# some generic IETF reserved addresses
self.assertEqual(True, ipaddress.ip_address('100::').is_reserved)
self.assertEqual(True, ipaddress.ip_network('4000::1/128').is_reserved)
diff --git a/Misc/NEWS.d/next/Library/2024-03-14-01-38-44.gh-issue-113171.VFnObz.rst b/Misc/NEWS.d/next/Library/2024-03-14-01-38-44.gh-issue-113171.VFnObz.rst
new file mode 100644
index 0000000000..f9a72473be
--- /dev/null
+++ b/Misc/NEWS.d/next/Library/2024-03-14-01-38-44.gh-issue-113171.VFnObz.rst
@@ -0,0 +1,9 @@
+Fixed various false positives and false negatives in
+
+* :attr:`ipaddress.IPv4Address.is_private` (see these docs for details)
+* :attr:`ipaddress.IPv4Address.is_global`
+* :attr:`ipaddress.IPv6Address.is_private`
+* :attr:`ipaddress.IPv6Address.is_global`
+
+Also in the corresponding :class:`ipaddress.IPv4Network` and :class:`ipaddress.IPv6Network`
+attributes.

@ -1,384 +0,0 @@
From 6c97acbb39693b94606b499f0c472fba2f5fd274 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Tom=C3=A1=C5=A1=20Hrn=C4=8Diar?= <thrnciar@redhat.com>
Date: Tue, 20 Aug 2024 10:44:06 +0200
Subject: [PATCH] 00435: gh-121650: Encode newlines in headers, and verify
headers are sound (GH-122233)
Per RFC 2047:
> [...] these encoding schemes allow the
> encoding of arbitrary octet values, mail readers that implement this
> decoding should also ensure that display of the decoded data on the
> recipient's terminal will not cause unwanted side-effects
It seems that the "quoted-word" scheme is a valid way to include
a newline character in a header value, just like we already allow
undecodable bytes or control characters.
They do need to be properly quoted when serialized to text, though.
This should fail for custom fold() implementations that aren't careful
about newlines.
(cherry picked from commit 097633981879b3c9de9a1dd120d3aa585ecc2384)
This patch also contains modified commit cherry picked from
c5bba853d5e7836f6d4340e18721d3fb3a6ee0f7.
This commit was backported to simplify the backport of the other commit
fixing CVE. The only modification is a removal of one test case which
tests multiple changes in Python 3.7 and it wasn't working properly
with Python 3.6 where we backported only one change.
Co-authored-by: Petr Viktorin <encukou@gmail.com>
Co-authored-by: Bas Bloemsaat <bas@bloemsaat.org>
Co-authored-by: Serhiy Storchaka <storchaka@gmail.com>
Co-authored-by: bsiem <52461103+bsiem@users.noreply.github.com>
---
Doc/library/email.errors.rst | 6 ++
Doc/library/email.policy.rst | 18 ++++++
Lib/email/_header_value_parser.py | 9 +++
Lib/email/_policybase.py | 8 +++
Lib/email/errors.py | 4 ++
Lib/email/generator.py | 16 ++++-
Lib/test/test_email/test_generator.py | 62 +++++++++++++++++++
Lib/test/test_email/test_headerregistry.py | 16 +++++
Lib/test/test_email/test_policy.py | 26 ++++++++
.../2019-07-09-11-20-21.bpo-37482.auzvev.rst | 1 +
...-07-27-16-10-41.gh-issue-121650.nf6oc9.rst | 5 ++
11 files changed, 170 insertions(+), 1 deletion(-)
create mode 100644 Misc/NEWS.d/next/Library/2019-07-09-11-20-21.bpo-37482.auzvev.rst
create mode 100644 Misc/NEWS.d/next/Library/2024-07-27-16-10-41.gh-issue-121650.nf6oc9.rst
diff --git a/Doc/library/email.errors.rst b/Doc/library/email.errors.rst
index 511ad16..7e51f74 100644
--- a/Doc/library/email.errors.rst
+++ b/Doc/library/email.errors.rst
@@ -59,6 +59,12 @@ The following exception classes are defined in the :mod:`email.errors` module:
:class:`~email.mime.image.MIMEImage`).
+.. exception:: HeaderWriteError()
+
+ Raised when an error occurs when the :mod:`~email.generator` outputs
+ headers.
+
+
Here is the list of the defects that the :class:`~email.parser.FeedParser`
can find while parsing messages. Note that the defects are added to the message
where the problem was found, so for example, if a message nested inside a
diff --git a/Doc/library/email.policy.rst b/Doc/library/email.policy.rst
index 8e70762..8617b2e 100644
--- a/Doc/library/email.policy.rst
+++ b/Doc/library/email.policy.rst
@@ -229,6 +229,24 @@ added matters. To illustrate::
.. versionadded:: 3.6
+
+ .. attribute:: verify_generated_headers
+
+ If ``True`` (the default), the generator will raise
+ :exc:`~email.errors.HeaderWriteError` instead of writing a header
+ that is improperly folded or delimited, such that it would
+ be parsed as multiple headers or joined with adjacent data.
+ Such headers can be generated by custom header classes or bugs
+ in the ``email`` module.
+
+ As it's a security feature, this defaults to ``True`` even in the
+ :class:`~email.policy.Compat32` policy.
+ For backwards compatible, but unsafe, behavior, it must be set to
+ ``False`` explicitly.
+
+ .. versionadded:: 3.8.20
+
+
The following :class:`Policy` method is intended to be called by code using
the email library to create policy instances with custom settings:
diff --git a/Lib/email/_header_value_parser.py b/Lib/email/_header_value_parser.py
index 9815e4e..dab4cbb 100644
--- a/Lib/email/_header_value_parser.py
+++ b/Lib/email/_header_value_parser.py
@@ -92,6 +92,8 @@ TOKEN_ENDS = TSPECIALS | WSP
ASPECIALS = TSPECIALS | set("*'%")
ATTRIBUTE_ENDS = ASPECIALS | WSP
EXTENDED_ATTRIBUTE_ENDS = ATTRIBUTE_ENDS - set('%')
+NLSET = {'\n', '\r'}
+SPECIALSNL = SPECIALS | NLSET
def quote_string(value):
return '"'+str(value).replace('\\', '\\\\').replace('"', r'\"')+'"'
@@ -2608,6 +2610,13 @@ def _refold_parse_tree(parse_tree, *, policy):
wrap_as_ew_blocked -= 1
continue
tstr = str(part)
+ if not want_encoding:
+ if part.token_type == 'ptext':
+ # Encode if tstr contains special characters.
+ want_encoding = not SPECIALSNL.isdisjoint(tstr)
+ else:
+ # Encode if tstr contains newlines.
+ want_encoding = not NLSET.isdisjoint(tstr)
try:
tstr.encode(encoding)
charset = encoding
diff --git a/Lib/email/_policybase.py b/Lib/email/_policybase.py
index c9cbadd..d1f4821 100644
--- a/Lib/email/_policybase.py
+++ b/Lib/email/_policybase.py
@@ -157,6 +157,13 @@ class Policy(_PolicyBase, metaclass=abc.ABCMeta):
message_factory -- the class to use to create new message objects.
If the value is None, the default is Message.
+ verify_generated_headers
+ -- if true, the generator verifies that each header
+ they are properly folded, so that a parser won't
+ treat it as multiple headers, start-of-body, or
+ part of another header.
+ This is a check against custom Header & fold()
+ implementations.
"""
raise_on_defect = False
@@ -165,6 +172,7 @@ class Policy(_PolicyBase, metaclass=abc.ABCMeta):
max_line_length = 78
mangle_from_ = False
message_factory = None
+ verify_generated_headers = True
def handle_defect(self, obj, defect):
"""Based on policy, either raise defect or call register_defect.
diff --git a/Lib/email/errors.py b/Lib/email/errors.py
index d28a680..1a0d5c6 100644
--- a/Lib/email/errors.py
+++ b/Lib/email/errors.py
@@ -29,6 +29,10 @@ class CharsetError(MessageError):
"""An illegal charset was given."""
+class HeaderWriteError(MessageError):
+ """Error while writing headers."""
+
+
# These are parsing defects which the parser was able to work around.
class MessageDefect(ValueError):
"""Base class for a message defect."""
diff --git a/Lib/email/generator.py b/Lib/email/generator.py
index ae670c2..6deb95b 100644
--- a/Lib/email/generator.py
+++ b/Lib/email/generator.py
@@ -14,12 +14,14 @@ import random
from copy import deepcopy
from io import StringIO, BytesIO
from email.utils import _has_surrogates
+from email.errors import HeaderWriteError
UNDERSCORE = '_'
NL = '\n' # XXX: no longer used by the code below.
NLCRE = re.compile(r'\r\n|\r|\n')
fcre = re.compile(r'^From ', re.MULTILINE)
+NEWLINE_WITHOUT_FWSP = re.compile(r'\r\n[^ \t]|\r[^ \n\t]|\n[^ \t]')
@@ -219,7 +221,19 @@ class Generator:
def _write_headers(self, msg):
for h, v in msg.raw_items():
- self.write(self.policy.fold(h, v))
+ folded = self.policy.fold(h, v)
+ if self.policy.verify_generated_headers:
+ linesep = self.policy.linesep
+ if not folded.endswith(self.policy.linesep):
+ raise HeaderWriteError(
+ f'folded header does not end with {linesep!r}: {folded!r}')
+ folded_no_linesep = folded
+ if folded.endswith(linesep):
+ folded_no_linesep = folded[:-len(linesep)]
+ if NEWLINE_WITHOUT_FWSP.search(folded_no_linesep):
+ raise HeaderWriteError(
+ f'folded header contains newline: {folded!r}')
+ self.write(folded)
# A blank line always separates headers from body
self.write(self._NL)
diff --git a/Lib/test/test_email/test_generator.py b/Lib/test/test_email/test_generator.py
index c1aeaef..cdf1075 100644
--- a/Lib/test/test_email/test_generator.py
+++ b/Lib/test/test_email/test_generator.py
@@ -5,6 +5,7 @@ from email import message_from_string, message_from_bytes
from email.message import EmailMessage
from email.generator import Generator, BytesGenerator
from email import policy
+import email.errors
from test.test_email import TestEmailBase, parameterize
@@ -215,6 +216,44 @@ class TestGeneratorBase:
g.flatten(msg)
self.assertEqual(s.getvalue(), self.typ(expected))
+ def test_keep_encoded_newlines(self):
+ msg = self.msgmaker(self.typ(textwrap.dedent("""\
+ To: nobody
+ Subject: Bad subject=?UTF-8?Q?=0A?=Bcc: injection@example.com
+
+ None
+ """)))
+ expected = textwrap.dedent("""\
+ To: nobody
+ Subject: Bad subject=?UTF-8?Q?=0A?=Bcc: injection@example.com
+
+ None
+ """)
+ s = self.ioclass()
+ g = self.genclass(s, policy=self.policy.clone(max_line_length=80))
+ g.flatten(msg)
+ self.assertEqual(s.getvalue(), self.typ(expected))
+
+ def test_keep_long_encoded_newlines(self):
+ msg = self.msgmaker(self.typ(textwrap.dedent("""\
+ To: nobody
+ Subject: Bad subject =?UTF-8?Q?=0A?=Bcc: injection@example.com
+
+ None
+ """)))
+ expected = textwrap.dedent("""\
+ To: nobody
+ Subject: Bad subject \n\
+ =?utf-8?q?=0A?=Bcc:
+ injection@example.com
+
+ None
+ """)
+ s = self.ioclass()
+ g = self.genclass(s, policy=self.policy.clone(max_line_length=30))
+ g.flatten(msg)
+ self.assertEqual(s.getvalue(), self.typ(expected))
+
class TestGenerator(TestGeneratorBase, TestEmailBase):
@@ -223,6 +262,29 @@ class TestGenerator(TestGeneratorBase, TestEmailBase):
ioclass = io.StringIO
typ = str
+ def test_verify_generated_headers(self):
+ """gh-121650: by default the generator prevents header injection"""
+ class LiteralHeader(str):
+ name = 'Header'
+ def fold(self, **kwargs):
+ return self
+
+ for text in (
+ 'Value\r\nBad Injection\r\n',
+ 'NoNewLine'
+ ):
+ with self.subTest(text=text):
+ message = message_from_string(
+ "Header: Value\r\n\r\nBody",
+ policy=self.policy,
+ )
+
+ del message['Header']
+ message['Header'] = LiteralHeader(text)
+
+ with self.assertRaises(email.errors.HeaderWriteError):
+ message.as_string()
+
class TestBytesGenerator(TestGeneratorBase, TestEmailBase):
diff --git a/Lib/test/test_email/test_headerregistry.py b/Lib/test/test_email/test_headerregistry.py
index 30ce0ba..d5004b3 100644
--- a/Lib/test/test_email/test_headerregistry.py
+++ b/Lib/test/test_email/test_headerregistry.py
@@ -1527,6 +1527,22 @@ class TestAddressAndGroup(TestEmailBase):
class TestFolding(TestHeaderBase):
+ def test_address_display_names(self):
+ """Test the folding and encoding of address headers."""
+ for name, result in (
+ ('Foo Bar, France', '"Foo Bar, France"'),
+ ('Foo Bar (France)', '"Foo Bar (France)"'),
+ ('Foo Bar, España', 'Foo =?utf-8?q?Bar=2C_Espa=C3=B1a?='),
+ ('Foo Bar (España)', 'Foo Bar =?utf-8?b?KEVzcGHDsWEp?='),
+ ('Foo, Bar España', '=?utf-8?q?Foo=2C_Bar_Espa=C3=B1a?='),
+ ('Foo, Bar [España]', '=?utf-8?q?Foo=2C_Bar_=5BEspa=C3=B1a=5D?='),
+ ('Foo Bär, France', 'Foo =?utf-8?q?B=C3=A4r=2C?= France'),
+ ('Foo Bär <France>', 'Foo =?utf-8?q?B=C3=A4r_=3CFrance=3E?='),
+ ):
+ h = self.make_header('To', Address(name, addr_spec='a@b.com'))
+ self.assertEqual(h.fold(policy=policy.default),
+ 'To: %s <a@b.com>\n' % result)
+
def test_short_unstructured(self):
h = self.make_header('subject', 'this is a test')
self.assertEqual(h.fold(policy=policy.default),
diff --git a/Lib/test/test_email/test_policy.py b/Lib/test/test_email/test_policy.py
index 8fecb8a..6793422 100644
--- a/Lib/test/test_email/test_policy.py
+++ b/Lib/test/test_email/test_policy.py
@@ -25,6 +25,7 @@ class PolicyAPITests(unittest.TestCase):
'raise_on_defect': False,
'mangle_from_': True,
'message_factory': None,
+ 'verify_generated_headers': True,
}
# These default values are the ones set on email.policy.default.
# If any of these defaults change, the docs must be updated.
@@ -237,6 +238,31 @@ class PolicyAPITests(unittest.TestCase):
email.policy.EmailPolicy.header_factory)
self.assertEqual(newpolicy.__dict__, {'raise_on_defect': True})
+ def test_verify_generated_headers(self):
+ """Turning protection off allows header injection"""
+ policy = email.policy.default.clone(verify_generated_headers=False)
+ for text in (
+ 'Header: Value\r\nBad: Injection\r\n',
+ 'Header: NoNewLine'
+ ):
+ with self.subTest(text=text):
+ message = email.message_from_string(
+ "Header: Value\r\n\r\nBody",
+ policy=policy,
+ )
+ class LiteralHeader(str):
+ name = 'Header'
+ def fold(self, **kwargs):
+ return self
+
+ del message['Header']
+ message['Header'] = LiteralHeader(text)
+
+ self.assertEqual(
+ message.as_string(),
+ f"{text}\nBody",
+ )
+
# XXX: Need subclassing tests.
# For adding subclassed objects, make sure the usual rules apply (subclass
# wins), but that the order still works (right overrides left).
diff --git a/Misc/NEWS.d/next/Library/2019-07-09-11-20-21.bpo-37482.auzvev.rst b/Misc/NEWS.d/next/Library/2019-07-09-11-20-21.bpo-37482.auzvev.rst
new file mode 100644
index 0000000..e09ff63
--- /dev/null
+++ b/Misc/NEWS.d/next/Library/2019-07-09-11-20-21.bpo-37482.auzvev.rst
@@ -0,0 +1 @@
+Fix serialization of display name in originator or destination address fields with both encoded words and special chars.
diff --git a/Misc/NEWS.d/next/Library/2024-07-27-16-10-41.gh-issue-121650.nf6oc9.rst b/Misc/NEWS.d/next/Library/2024-07-27-16-10-41.gh-issue-121650.nf6oc9.rst
new file mode 100644
index 0000000..83dd28d
--- /dev/null
+++ b/Misc/NEWS.d/next/Library/2024-07-27-16-10-41.gh-issue-121650.nf6oc9.rst
@@ -0,0 +1,5 @@
+:mod:`email` headers with embedded newlines are now quoted on output. The
+:mod:`~email.generator` will now refuse to serialize (write) headers that
+are unsafely folded or delimited; see
+:attr:`~email.policy.Policy.verify_generated_headers`. (Contributed by Bas
+Bloemsaat and Petr Viktorin in :gh:`121650`.)
--
2.45.2

@ -1,249 +0,0 @@
From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
From: Seth Michael Larson <seth@python.org>
Date: Wed, 4 Sep 2024 10:41:42 -0500
Subject: [PATCH] 00437: CVE-2024-6232 Remove backtracking when parsing tarfile
headers
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
* Remove backtracking when parsing tarfile headers
* Rewrite PAX header parsing to be stricter
* Optimize parsing of GNU extended sparse headers v0.0
(cherry picked from commit 34ddb64d088dd7ccc321f6103d23153256caa5d4)
Co-authored-by: Seth Michael Larson <seth@python.org>
Co-authored-by: Kirill Podoprigora <kirill.bast9@mail.ru>
Co-authored-by: Gregory P. Smith <greg@krypto.org>
Co-authored-by: Lumír Balhar <lbalhar@redhat.com>
---
Lib/tarfile.py | 104 +++++++++++-------
Lib/test/test_tarfile.py | 42 +++++++
...-07-02-13-39-20.gh-issue-121285.hrl-yI.rst | 2 +
3 files changed, 111 insertions(+), 37 deletions(-)
create mode 100644 Misc/NEWS.d/next/Security/2024-07-02-13-39-20.gh-issue-121285.hrl-yI.rst
diff --git a/Lib/tarfile.py b/Lib/tarfile.py
index c18590325a..ee1bf37bfd 100755
--- a/Lib/tarfile.py
+++ b/Lib/tarfile.py
@@ -846,6 +846,9 @@ _NAMED_FILTERS = {
# Sentinel for replace() defaults, meaning "don't change the attribute"
_KEEP = object()
+# Header length is digits followed by a space.
+_header_length_prefix_re = re.compile(br"([0-9]{1,20}) ")
+
class TarInfo(object):
"""Informational class which holds the details about an
archive member given by a tar header block.
@@ -1371,41 +1374,60 @@ class TarInfo(object):
else:
pax_headers = tarfile.pax_headers.copy()
- # Check if the pax header contains a hdrcharset field. This tells us
- # the encoding of the path, linkpath, uname and gname fields. Normally,
- # these fields are UTF-8 encoded but since POSIX.1-2008 tar
- # implementations are allowed to store them as raw binary strings if
- # the translation to UTF-8 fails.
- match = re.search(br"\d+ hdrcharset=([^\n]+)\n", buf)
- if match is not None:
- pax_headers["hdrcharset"] = match.group(1).decode("utf-8")
-
- # For the time being, we don't care about anything other than "BINARY".
- # The only other value that is currently allowed by the standard is
- # "ISO-IR 10646 2000 UTF-8" in other words UTF-8.
- hdrcharset = pax_headers.get("hdrcharset")
- if hdrcharset == "BINARY":
- encoding = tarfile.encoding
- else:
- encoding = "utf-8"
-
# Parse pax header information. A record looks like that:
# "%d %s=%s\n" % (length, keyword, value). length is the size
# of the complete record including the length field itself and
- # the newline. keyword and value are both UTF-8 encoded strings.
- regex = re.compile(br"(\d+) ([^=]+)=")
+ # the newline.
pos = 0
- while True:
- match = regex.match(buf, pos)
+ encoding = None
+ raw_headers = []
+ while len(buf) > pos and buf[pos] != 0x00:
+ match = _header_length_prefix_re.match(buf, pos)
if not match:
- break
+ raise InvalidHeaderError("invalid header")
+ try:
+ length = int(match.group(1))
+ except ValueError:
+ raise InvalidHeaderError("invalid header")
+ # Headers must be at least 5 bytes, shortest being '5 x=\n'.
+ # Value is allowed to be empty.
+ if length < 5:
+ raise InvalidHeaderError("invalid header")
+ if pos + length > len(buf):
+ raise InvalidHeaderError("invalid header")
- length, keyword = match.groups()
- length = int(length)
- if length == 0:
+ header_value_end_offset = match.start(1) + length - 1 # Last byte of the header
+ keyword_and_value = buf[match.end(1) + 1:header_value_end_offset]
+ raw_keyword, equals, raw_value = keyword_and_value.partition(b"=")
+
+ # Check the framing of the header. The last character must be '\n' (0x0A)
+ if not raw_keyword or equals != b"=" or buf[header_value_end_offset] != 0x0A:
raise InvalidHeaderError("invalid header")
- value = buf[match.end(2) + 1:match.start(1) + length - 1]
+ raw_headers.append((length, raw_keyword, raw_value))
+
+ # Check if the pax header contains a hdrcharset field. This tells us
+ # the encoding of the path, linkpath, uname and gname fields. Normally,
+ # these fields are UTF-8 encoded but since POSIX.1-2008 tar
+ # implementations are allowed to store them as raw binary strings if
+ # the translation to UTF-8 fails. For the time being, we don't care about
+ # anything other than "BINARY". The only other value that is currently
+ # allowed by the standard is "ISO-IR 10646 2000 UTF-8" in other words UTF-8.
+ # Note that we only follow the initial 'hdrcharset' setting to preserve
+ # the initial behavior of the 'tarfile' module.
+ if raw_keyword == b"hdrcharset" and encoding is None:
+ if raw_value == b"BINARY":
+ encoding = tarfile.encoding
+ else: # This branch ensures only the first 'hdrcharset' header is used.
+ encoding = "utf-8"
+
+ pos += length
+
+ # If no explicit hdrcharset is set, we use UTF-8 as a default.
+ if encoding is None:
+ encoding = "utf-8"
+ # After parsing the raw headers we can decode them to text.
+ for length, raw_keyword, raw_value in raw_headers:
# Normally, we could just use "utf-8" as the encoding and "strict"
# as the error handler, but we better not take the risk. For
# example, GNU tar <= 1.23 is known to store filenames it cannot
@@ -1413,17 +1435,16 @@ class TarInfo(object):
# hdrcharset=BINARY header).
# We first try the strict standard encoding, and if that fails we
# fall back on the user's encoding and error handler.
- keyword = self._decode_pax_field(keyword, "utf-8", "utf-8",
+ keyword = self._decode_pax_field(raw_keyword, "utf-8", "utf-8",
tarfile.errors)
if keyword in PAX_NAME_FIELDS:
- value = self._decode_pax_field(value, encoding, tarfile.encoding,
+ value = self._decode_pax_field(raw_value, encoding, tarfile.encoding,
tarfile.errors)
else:
- value = self._decode_pax_field(value, "utf-8", "utf-8",
+ value = self._decode_pax_field(raw_value, "utf-8", "utf-8",
tarfile.errors)
pax_headers[keyword] = value
- pos += length
# Fetch the next header.
try:
@@ -1438,7 +1459,7 @@ class TarInfo(object):
elif "GNU.sparse.size" in pax_headers:
# GNU extended sparse format version 0.0.
- self._proc_gnusparse_00(next, pax_headers, buf)
+ self._proc_gnusparse_00(next, raw_headers)
elif pax_headers.get("GNU.sparse.major") == "1" and pax_headers.get("GNU.sparse.minor") == "0":
# GNU extended sparse format version 1.0.
@@ -1460,15 +1481,24 @@ class TarInfo(object):
return next
- def _proc_gnusparse_00(self, next, pax_headers, buf):
+ def _proc_gnusparse_00(self, next, raw_headers):
"""Process a GNU tar extended sparse header, version 0.0.
"""
offsets = []
- for match in re.finditer(br"\d+ GNU.sparse.offset=(\d+)\n", buf):
- offsets.append(int(match.group(1)))
numbytes = []
- for match in re.finditer(br"\d+ GNU.sparse.numbytes=(\d+)\n", buf):
- numbytes.append(int(match.group(1)))
+ for _, keyword, value in raw_headers:
+ if keyword == b"GNU.sparse.offset":
+ try:
+ offsets.append(int(value.decode()))
+ except ValueError:
+ raise InvalidHeaderError("invalid header")
+
+ elif keyword == b"GNU.sparse.numbytes":
+ try:
+ numbytes.append(int(value.decode()))
+ except ValueError:
+ raise InvalidHeaderError("invalid header")
+
next.sparse = list(zip(offsets, numbytes))
def _proc_gnusparse_01(self, next, pax_headers):
diff --git a/Lib/test/test_tarfile.py b/Lib/test/test_tarfile.py
index f261048615..04ef000e71 100644
--- a/Lib/test/test_tarfile.py
+++ b/Lib/test/test_tarfile.py
@@ -1046,6 +1046,48 @@ class PaxReadTest(LongnameTest, ReadTest, unittest.TestCase):
finally:
tar.close()
+ def test_pax_header_bad_formats(self):
+ # The fields from the pax header have priority over the
+ # TarInfo.
+ pax_header_replacements = (
+ b" foo=bar\n",
+ b"0 \n",
+ b"1 \n",
+ b"2 \n",
+ b"3 =\n",
+ b"4 =a\n",
+ b"1000000 foo=bar\n",
+ b"0 foo=bar\n",
+ b"-12 foo=bar\n",
+ b"000000000000000000000000036 foo=bar\n",
+ )
+ pax_headers = {"foo": "bar"}
+
+ for replacement in pax_header_replacements:
+ with self.subTest(header=replacement):
+ tar = tarfile.open(tmpname, "w", format=tarfile.PAX_FORMAT,
+ encoding="iso8859-1")
+ try:
+ t = tarfile.TarInfo()
+ t.name = "pax" # non-ASCII
+ t.uid = 1
+ t.pax_headers = pax_headers
+ tar.addfile(t)
+ finally:
+ tar.close()
+
+ with open(tmpname, "rb") as f:
+ data = f.read()
+ self.assertIn(b"11 foo=bar\n", data)
+ data = data.replace(b"11 foo=bar\n", replacement)
+
+ with open(tmpname, "wb") as f:
+ f.truncate()
+ f.write(data)
+
+ with self.assertRaisesRegex(tarfile.ReadError, r"file could not be opened successfully"):
+ tarfile.open(tmpname, encoding="iso8859-1")
+
class WriteTestBase(TarTest):
# Put all write tests in here that are supposed to be tested
diff --git a/Misc/NEWS.d/next/Security/2024-07-02-13-39-20.gh-issue-121285.hrl-yI.rst b/Misc/NEWS.d/next/Security/2024-07-02-13-39-20.gh-issue-121285.hrl-yI.rst
new file mode 100644
index 0000000000..81f918bfe2
--- /dev/null
+++ b/Misc/NEWS.d/next/Security/2024-07-02-13-39-20.gh-issue-121285.hrl-yI.rst
@@ -0,0 +1,2 @@
+Remove backtracking from tarfile header parsing for ``hdrcharset``, PAX, and
+GNU sparse headers.

@ -1,281 +0,0 @@
From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
From: Victor Stinner <vstinner@python.org>
Date: Fri, 1 Nov 2024 14:11:47 +0100
Subject: [PATCH] 00443: gh-124651: Quote template strings in `venv` activation
scripts
(cherry picked from 3.9)
---
Lib/test/test_venv.py | 82 +++++++++++++++++++
Lib/venv/__init__.py | 42 ++++++++--
Lib/venv/scripts/common/activate | 8 +-
Lib/venv/scripts/posix/activate.csh | 8 +-
Lib/venv/scripts/posix/activate.fish | 8 +-
...-09-28-02-03-04.gh-issue-124651.bLBGtH.rst | 1 +
6 files changed, 132 insertions(+), 17 deletions(-)
create mode 100644 Misc/NEWS.d/next/Library/2024-09-28-02-03-04.gh-issue-124651.bLBGtH.rst
diff --git a/Lib/test/test_venv.py b/Lib/test/test_venv.py
index 842470fef0..67fdcd86bb 100644
--- a/Lib/test/test_venv.py
+++ b/Lib/test/test_venv.py
@@ -13,6 +13,8 @@ import struct
import subprocess
import sys
import tempfile
+import shlex
+import shutil
from test.support import (captured_stdout, captured_stderr, requires_zlib,
can_symlink, EnvironmentVarGuard, rmtree)
import unittest
@@ -80,6 +82,10 @@ class BaseTest(unittest.TestCase):
result = f.read()
return result
+ def assertEndsWith(self, string, tail):
+ if not string.endswith(tail):
+ self.fail(f"String {string!r} does not end with {tail!r}")
+
class BasicTest(BaseTest):
"""Test venv module functionality."""
@@ -293,6 +299,82 @@ class BasicTest(BaseTest):
'import sys; print(sys.executable)'])
self.assertEqual(out.strip(), envpy.encode())
+ # gh-124651: test quoted strings
+ @unittest.skipIf(os.name == 'nt', 'contains invalid characters on Windows')
+ def test_special_chars_bash(self):
+ """
+ Test that the template strings are quoted properly (bash)
+ """
+ rmtree(self.env_dir)
+ bash = shutil.which('bash')
+ if bash is None:
+ self.skipTest('bash required for this test')
+ env_name = '"\';&&$e|\'"'
+ env_dir = os.path.join(os.path.realpath(self.env_dir), env_name)
+ builder = venv.EnvBuilder(clear=True)
+ builder.create(env_dir)
+ activate = os.path.join(env_dir, self.bindir, 'activate')
+ test_script = os.path.join(self.env_dir, 'test_special_chars.sh')
+ with open(test_script, "w") as f:
+ f.write(f'source {shlex.quote(activate)}\n'
+ 'python -c \'import sys; print(sys.executable)\'\n'
+ 'python -c \'import os; print(os.environ["VIRTUAL_ENV"])\'\n'
+ 'deactivate\n')
+ out, err = check_output([bash, test_script])
+ lines = out.splitlines()
+ self.assertTrue(env_name.encode() in lines[0])
+ self.assertEndsWith(lines[1], env_name.encode())
+
+ # gh-124651: test quoted strings
+ @unittest.skipIf(os.name == 'nt', 'contains invalid characters on Windows')
+ def test_special_chars_csh(self):
+ """
+ Test that the template strings are quoted properly (csh)
+ """
+ rmtree(self.env_dir)
+ csh = shutil.which('tcsh') or shutil.which('csh')
+ if csh is None:
+ self.skipTest('csh required for this test')
+ env_name = '"\';&&$e|\'"'
+ env_dir = os.path.join(os.path.realpath(self.env_dir), env_name)
+ builder = venv.EnvBuilder(clear=True)
+ builder.create(env_dir)
+ activate = os.path.join(env_dir, self.bindir, 'activate.csh')
+ test_script = os.path.join(self.env_dir, 'test_special_chars.csh')
+ with open(test_script, "w") as f:
+ f.write(f'source {shlex.quote(activate)}\n'
+ 'python -c \'import sys; print(sys.executable)\'\n'
+ 'python -c \'import os; print(os.environ["VIRTUAL_ENV"])\'\n'
+ 'deactivate\n')
+ out, err = check_output([csh, test_script])
+ lines = out.splitlines()
+ self.assertTrue(env_name.encode() in lines[0])
+ self.assertEndsWith(lines[1], env_name.encode())
+
+ # gh-124651: test quoted strings on Windows
+ @unittest.skipUnless(os.name == 'nt', 'only relevant on Windows')
+ def test_special_chars_windows(self):
+ """
+ Test that the template strings are quoted properly on Windows
+ """
+ rmtree(self.env_dir)
+ env_name = "'&&^$e"
+ env_dir = os.path.join(os.path.realpath(self.env_dir), env_name)
+ builder = venv.EnvBuilder(clear=True)
+ builder.create(env_dir)
+ activate = os.path.join(env_dir, self.bindir, 'activate.bat')
+ test_batch = os.path.join(self.env_dir, 'test_special_chars.bat')
+ with open(test_batch, "w") as f:
+ f.write('@echo off\n'
+ f'"{activate}" & '
+ f'{self.exe} -c "import sys; print(sys.executable)" & '
+ f'{self.exe} -c "import os; print(os.environ[\'VIRTUAL_ENV\'])" & '
+ 'deactivate')
+ out, err = check_output([test_batch])
+ lines = out.splitlines()
+ self.assertTrue(env_name.encode() in lines[0])
+ self.assertEndsWith(lines[1], env_name.encode())
+
@unittest.skipUnless(os.name == 'nt', 'only relevant on Windows')
def test_unicode_in_batch_file(self):
"""
diff --git a/Lib/venv/__init__.py b/Lib/venv/__init__.py
index 716129d139..0c44dfd07d 100644
--- a/Lib/venv/__init__.py
+++ b/Lib/venv/__init__.py
@@ -10,6 +10,7 @@ import shutil
import subprocess
import sys
import types
+import shlex
logger = logging.getLogger(__name__)
@@ -280,11 +281,41 @@ class EnvBuilder:
:param context: The information for the environment creation request
being processed.
"""
- text = text.replace('__VENV_DIR__', context.env_dir)
- text = text.replace('__VENV_NAME__', context.env_name)
- text = text.replace('__VENV_PROMPT__', context.prompt)
- text = text.replace('__VENV_BIN_NAME__', context.bin_name)
- text = text.replace('__VENV_PYTHON__', context.env_exe)
+ replacements = {
+ '__VENV_DIR__': context.env_dir,
+ '__VENV_NAME__': context.env_name,
+ '__VENV_PROMPT__': context.prompt,
+ '__VENV_BIN_NAME__': context.bin_name,
+ '__VENV_PYTHON__': context.env_exe,
+ }
+
+ def quote_ps1(s):
+ """
+ This should satisfy PowerShell quoting rules [1], unless the quoted
+ string is passed directly to Windows native commands [2].
+ [1]: https://learn.microsoft.com/en-us/powershell/module/microsoft.powershell.core/about/about_quoting_rules
+ [2]: https://learn.microsoft.com/en-us/powershell/module/microsoft.powershell.core/about/about_parsing#passing-arguments-that-contain-quote-characters
+ """
+ s = s.replace("'", "''")
+ return f"'{s}'"
+
+ def quote_bat(s):
+ return s
+
+ # gh-124651: need to quote the template strings properly
+ quote = shlex.quote
+ script_path = context.script_path
+ if script_path.endswith('.ps1'):
+ quote = quote_ps1
+ elif script_path.endswith('.bat'):
+ quote = quote_bat
+ else:
+ # fallbacks to POSIX shell compliant quote
+ quote = shlex.quote
+
+ replacements = {key: quote(s) for key, s in replacements.items()}
+ for key, quoted in replacements.items():
+ text = text.replace(key, quoted)
return text
def install_scripts(self, context, path):
@@ -321,6 +352,7 @@ class EnvBuilder:
with open(srcfile, 'rb') as f:
data = f.read()
if not srcfile.endswith('.exe'):
+ context.script_path = srcfile
try:
data = data.decode('utf-8')
data = self.replace_variables(data, context)
diff --git a/Lib/venv/scripts/common/activate b/Lib/venv/scripts/common/activate
index fff0765af5..c2e2f968fa 100644
--- a/Lib/venv/scripts/common/activate
+++ b/Lib/venv/scripts/common/activate
@@ -37,11 +37,11 @@ deactivate () {
# unset irrelevant variables
deactivate nondestructive
-VIRTUAL_ENV="__VENV_DIR__"
+VIRTUAL_ENV=__VENV_DIR__
export VIRTUAL_ENV
_OLD_VIRTUAL_PATH="$PATH"
-PATH="$VIRTUAL_ENV/__VENV_BIN_NAME__:$PATH"
+PATH="$VIRTUAL_ENV/"__VENV_BIN_NAME__":$PATH"
export PATH
# unset PYTHONHOME if set
@@ -54,8 +54,8 @@ fi
if [ -z "${VIRTUAL_ENV_DISABLE_PROMPT:-}" ] ; then
_OLD_VIRTUAL_PS1="${PS1:-}"
- if [ "x__VENV_PROMPT__" != x ] ; then
- PS1="__VENV_PROMPT__${PS1:-}"
+ if [ "x"__VENV_PROMPT__ != x ] ; then
+ PS1=__VENV_PROMPT__"${PS1:-}"
else
if [ "`basename \"$VIRTUAL_ENV\"`" = "__" ] ; then
# special case for Aspen magic directories
diff --git a/Lib/venv/scripts/posix/activate.csh b/Lib/venv/scripts/posix/activate.csh
index b0c7028a92..0e90d54008 100644
--- a/Lib/venv/scripts/posix/activate.csh
+++ b/Lib/venv/scripts/posix/activate.csh
@@ -8,17 +8,17 @@ alias deactivate 'test $?_OLD_VIRTUAL_PATH != 0 && setenv PATH "$_OLD_VIRTUAL_PA
# Unset irrelevant variables.
deactivate nondestructive
-setenv VIRTUAL_ENV "__VENV_DIR__"
+setenv VIRTUAL_ENV __VENV_DIR__
set _OLD_VIRTUAL_PATH="$PATH"
-setenv PATH "$VIRTUAL_ENV/__VENV_BIN_NAME__:$PATH"
+setenv PATH "$VIRTUAL_ENV/"__VENV_BIN_NAME__":$PATH"
set _OLD_VIRTUAL_PROMPT="$prompt"
if (! "$?VIRTUAL_ENV_DISABLE_PROMPT") then
- if ("__VENV_NAME__" != "") then
- set env_name = "__VENV_NAME__"
+ if (__VENV_NAME__ != "") then
+ set env_name = __VENV_NAME__
else
if (`basename "VIRTUAL_ENV"` == "__") then
# special case for Aspen magic directories
diff --git a/Lib/venv/scripts/posix/activate.fish b/Lib/venv/scripts/posix/activate.fish
index 4d4f0bd7a4..0407f9c7be 100644
--- a/Lib/venv/scripts/posix/activate.fish
+++ b/Lib/venv/scripts/posix/activate.fish
@@ -29,10 +29,10 @@ end
# unset irrelevant variables
deactivate nondestructive
-set -gx VIRTUAL_ENV "__VENV_DIR__"
+set -gx VIRTUAL_ENV __VENV_DIR__
set -gx _OLD_VIRTUAL_PATH $PATH
-set -gx PATH "$VIRTUAL_ENV/__VENV_BIN_NAME__" $PATH
+set -gx PATH "$VIRTUAL_ENV/"__VENV_BIN_NAME__ $PATH
# unset PYTHONHOME if set
if set -q PYTHONHOME
@@ -52,8 +52,8 @@ if test -z "$VIRTUAL_ENV_DISABLE_PROMPT"
set -l old_status $status
# Prompt override?
- if test -n "__VENV_PROMPT__"
- printf "%s%s" "__VENV_PROMPT__" (set_color normal)
+ if test -n __VENV_PROMPT__
+ printf "%s%s" __VENV_PROMPT__ (set_color normal)
else
# ...Otherwise, prepend env
set -l _checkbase (basename "$VIRTUAL_ENV")
diff --git a/Misc/NEWS.d/next/Library/2024-09-28-02-03-04.gh-issue-124651.bLBGtH.rst b/Misc/NEWS.d/next/Library/2024-09-28-02-03-04.gh-issue-124651.bLBGtH.rst
new file mode 100644
index 0000000000..17fc917139
--- /dev/null
+++ b/Misc/NEWS.d/next/Library/2024-09-28-02-03-04.gh-issue-124651.bLBGtH.rst
@@ -0,0 +1 @@
+Properly quote template strings in :mod:`venv` activation scripts.

@ -1,110 +0,0 @@
From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
From: "Miss Islington (bot)"
<31488909+miss-islington@users.noreply.github.com>
Date: Tue, 9 May 2023 23:35:24 -0700
Subject: [PATCH] 00444: Security fix for CVE-2024-11168
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
gh-103848: Adds checks to ensure that bracketed hosts found by urlsplit are of IPv6 or IPvFuture format (GH-103849)
Tests are adjusted because Python <3.9 don't support scoped IPv6 addresses.
(cherry picked from commit 29f348e232e82938ba2165843c448c2b291504c5)
Co-authored-by: JohnJamesUtley <81572567+JohnJamesUtley@users.noreply.github.com>
Co-authored-by: Gregory P. Smith <greg@krypto.org>
Co-authored-by: Lumír Balhar <lbalhar@redhat.com>
---
Lib/test/test_urlparse.py | 26 +++++++++++++++++++
Lib/urllib/parse.py | 15 +++++++++++
...-04-26-09-54-25.gh-issue-103848.aDSnpR.rst | 2 ++
3 files changed, 43 insertions(+)
create mode 100644 Misc/NEWS.d/next/Library/2023-04-26-09-54-25.gh-issue-103848.aDSnpR.rst
diff --git a/Lib/test/test_urlparse.py b/Lib/test/test_urlparse.py
index 7fd61ffea9..090d2f17bf 100644
--- a/Lib/test/test_urlparse.py
+++ b/Lib/test/test_urlparse.py
@@ -1076,6 +1076,32 @@ class UrlParseTestCase(unittest.TestCase):
self.assertEqual(p2.scheme, 'tel')
self.assertEqual(p2.path, '+31641044153')
+ def test_invalid_bracketed_hosts(self):
+ self.assertRaises(ValueError, urllib.parse.urlsplit, 'Scheme://user@[192.0.2.146]/Path?Query')
+ self.assertRaises(ValueError, urllib.parse.urlsplit, 'Scheme://user@[important.com:8000]/Path?Query')
+ self.assertRaises(ValueError, urllib.parse.urlsplit, 'Scheme://user@[v123r.IP]/Path?Query')
+ self.assertRaises(ValueError, urllib.parse.urlsplit, 'Scheme://user@[v12ae]/Path?Query')
+ self.assertRaises(ValueError, urllib.parse.urlsplit, 'Scheme://user@[v.IP]/Path?Query')
+ self.assertRaises(ValueError, urllib.parse.urlsplit, 'Scheme://user@[v123.]/Path?Query')
+ self.assertRaises(ValueError, urllib.parse.urlsplit, 'Scheme://user@[v]/Path?Query')
+ self.assertRaises(ValueError, urllib.parse.urlsplit, 'Scheme://user@[0439:23af::2309::fae7:1234]/Path?Query')
+ self.assertRaises(ValueError, urllib.parse.urlsplit, 'Scheme://user@[0439:23af:2309::fae7:1234:2342:438e:192.0.2.146]/Path?Query')
+ self.assertRaises(ValueError, urllib.parse.urlsplit, 'Scheme://user@]v6a.ip[/Path')
+
+ def test_splitting_bracketed_hosts(self):
+ p1 = urllib.parse.urlsplit('scheme://user@[v6a.ip]/path?query')
+ self.assertEqual(p1.hostname, 'v6a.ip')
+ self.assertEqual(p1.username, 'user')
+ self.assertEqual(p1.path, '/path')
+ p2 = urllib.parse.urlsplit('scheme://user@[0439:23af:2309::fae7]/path?query')
+ self.assertEqual(p2.hostname, '0439:23af:2309::fae7')
+ self.assertEqual(p2.username, 'user')
+ self.assertEqual(p2.path, '/path')
+ p3 = urllib.parse.urlsplit('scheme://user@[0439:23af:2309::fae7:1234:192.0.2.146]/path?query')
+ self.assertEqual(p3.hostname, '0439:23af:2309::fae7:1234:192.0.2.146')
+ self.assertEqual(p3.username, 'user')
+ self.assertEqual(p3.path, '/path')
+
def test_telurl_params(self):
p1 = urllib.parse.urlparse('tel:123-4;phone-context=+1-650-516')
self.assertEqual(p1.scheme, 'tel')
diff --git a/Lib/urllib/parse.py b/Lib/urllib/parse.py
index 717e990997..bf186b7984 100644
--- a/Lib/urllib/parse.py
+++ b/Lib/urllib/parse.py
@@ -34,6 +34,7 @@ It serves as a useful guide when making changes.
import os
import sys
import collections
+import ipaddress
__all__ = ["urlparse", "urlunparse", "urljoin", "urldefrag",
"urlsplit", "urlunsplit", "urlencode", "parse_qs",
@@ -425,6 +426,17 @@ def _remove_unsafe_bytes_from_url(url):
url = url.replace(b, "")
return url
+# Valid bracketed hosts are defined in
+# https://www.rfc-editor.org/rfc/rfc3986#page-49 and https://url.spec.whatwg.org/
+def _check_bracketed_host(hostname):
+ if hostname.startswith('v'):
+ if not re.match(r"\Av[a-fA-F0-9]+\..+\Z", hostname):
+ raise ValueError(f"IPvFuture address is invalid")
+ else:
+ ip = ipaddress.ip_address(hostname) # Throws Value Error if not IPv6 or IPv4
+ if isinstance(ip, ipaddress.IPv4Address):
+ raise ValueError(f"An IPv4 address cannot be in brackets")
+
def urlsplit(url, scheme='', allow_fragments=True):
"""Parse a URL into 5 components:
<scheme>://<netloc>/<path>?<query>#<fragment>
@@ -480,6 +492,9 @@ def urlsplit(url, scheme='', allow_fragments=True):
if (('[' in netloc and ']' not in netloc) or
(']' in netloc and '[' not in netloc)):
raise ValueError("Invalid IPv6 URL")
+ if '[' in netloc and ']' in netloc:
+ bracketed_host = netloc.partition('[')[2].partition(']')[0]
+ _check_bracketed_host(bracketed_host)
if allow_fragments and '#' in url:
url, fragment = url.split('#', 1)
if '?' in url:
diff --git a/Misc/NEWS.d/next/Library/2023-04-26-09-54-25.gh-issue-103848.aDSnpR.rst b/Misc/NEWS.d/next/Library/2023-04-26-09-54-25.gh-issue-103848.aDSnpR.rst
new file mode 100644
index 0000000000..81e5904aa6
--- /dev/null
+++ b/Misc/NEWS.d/next/Library/2023-04-26-09-54-25.gh-issue-103848.aDSnpR.rst
@@ -0,0 +1,2 @@
+Add checks to ensure that ``[`` bracketed ``]`` hosts found by
+:func:`urllib.parse.urlsplit` are of IPv6 or IPvFuture format.

@ -14,7 +14,7 @@ URL: https://www.python.org/
# WARNING When rebasing to a new Python version,
# remember to update the python3-docs package as well
Version: %{pybasever}.8
Release: 69%{?dist}
Release: 59%{?dist}
License: Python
@ -158,12 +158,6 @@ License: Python
%global py_INSTSONAME_optimized libpython%{LDVERSION_optimized}.so.%{py_SOVERSION}
%global py_INSTSONAME_debug libpython%{LDVERSION_debug}.so.%{py_SOVERSION}
# The -O flag for the compiler, optimized builds
# https://fedoraproject.org/wiki/Changes/Python_built_with_gcc_O3
%global optflags_optimized -O3
# The -O flag for the compiler, debug builds
%global optflags_debug -Og
# Disable automatic bytecompilation. The python3 binary is not yet be
# available in /usr/bin when Python is built. Also, the bytecompilation fails
# on files that test invalid syntax.
@ -843,81 +837,6 @@ Patch414: 00414-skip_test_zlib_s390x.patch
# config file or environment variable.
Patch415: 00415-cve-2023-27043-gh-102988-reject-malformed-addresses-in-email-parseaddr-111116.patch
# 00422 #
# gh-115133: Fix tests for XMLPullParser with Expat 2.6.0
#
# Feeding the parser by too small chunks defers parsing to prevent
# CVE-2023-52425. Future versions of Expat may be more reactive.
#
# Patch rebased because the CVE fix is backported to older expat in RHEL.
Patch422: 00422-gh-115133-fix-tests-for-xmlpullparser-with-expat-2-6-0.patch
# 426 #
# CVE-2023-6597
#
# Path traversal on tempfile.TemporaryDirectory
#
# Upstream: https://github.com/python/cpython/issues/91133
# Tracking bug: https://bugzilla.redhat.com/show_bug.cgi?id=CVE-2023-6597
#
# To backport the fix cleanly the patch contains also this rebased commit:
# Fix permission errors in TemporaryDirectory cleanup
# https://github.com/python/cpython/commit/e9b51c0ad81da1da11ae65840ac8b50a8521373c
Patch426: 00426-CVE-2023-6597.patch
# 427 #
# CVE-2024-0450
#
# The zipfile module is vulnerable to zip-bombs leading to denial of service.
#
# Upstream: https://github.com/python/cpython/issues/109858
# Tracking bug: https://bugzilla.redhat.com/show_bug.cgi?id=CVE-2024-0450
#
# To backport the fix cleanly also this change is backported:
# Add seek and tell functionality to ZipExtFile
# https://github.com/python/cpython/commit/066df4fd454d6ff9be66e80b2a65995b10af174f
#
# Patch rebased from 3.8.
Patch427: 00427-CVE-2024-0450.patch
# 00431 #
# CVE-2024-4032: incorrect IPv4 and IPv6 private ranges
#
# Upstream issue: https://github.com/python/cpython/issues/113171
#
# Backported from 3.8.
Patch431: 00431-cve-2024-4032.patch
# 00435 #
# CVE-2024-6923: encode newlines in email headers
#
# Upstream issue: https://github.com/python/cpython/issues/121650
#
# Backported from 3.8.
Patch435: 00435-cve-2024-6923.patch
# 00437 #
# CVE-2024-6232: tarfile: ReDos via excessive backtracking while parsing header values
#
# Upstream issue: https://github.com/python/cpython/issues/121285
#
# Cherry-picked from 3.8.
Patch437: 00437-cve-2024-6232.patch
# 00443 # 49e939f29e3551ec4e7bdb2cc8b8745e3d1fca35
# gh-124651: Quote template strings in `venv` activation scripts
#
# (cherry picked from 3.9)
Patch443: 00443-gh-124651-quote-template-strings-in-venv-activation-scripts.patch
# 00444 # fed0071c8c86599091f93967a5fa2cce42ceb840
# Security fix for CVE-2024-11168
#
# gh-103848: Adds checks to ensure that bracketed hosts found by urlsplit are of IPv6 or IPvFuture format (GH-103849)
#
# Tests are adjusted because Python <3.9 don't support scoped IPv6 addresses.
Patch444: 00444-security-fix-for-cve-2024-11168.patch
# (New patches go here ^^^)
#
# When adding new patches to "python" and "python3" in Fedora, EL, etc.,
@ -941,14 +860,6 @@ Conflicts: python3 < 3.6.6-13
# depend on python(abi). Provide that here.
Provides: python(abi) = %{pybasever}
# With https://fedoraproject.org/wiki/Changes/DNFConditionalFilelists
# it is no longer possible to Require paths from %%_libexecdir
# However, python3-dnf requires this path
# and that breaks the mock bootstrap chroot for rhel-8 on on Fedora 40+.
# Fixes https://issues.redhat.com/browse/RHEL-48605
# Fixes https://pagure.io/releng/issue/12199
Provides: %{_libexecdir}/platform-python
Requires: %{name}-libs%{?_isa} = %{version}-%{release}
%if %{with rpmwheels}
@ -1279,14 +1190,6 @@ git apply %{PATCH351}
%patch413 -p1
%patch414 -p1
%patch415 -p1
%patch422 -p1
%patch426 -p1
%patch427 -p1
%patch431 -p1
%patch435 -p1
%patch437 -p1
%patch443 -p1
%patch444 -p1
# Remove files that should be generated by the build
# (This is after patching, so that we can use patches directly from upstream)
@ -1393,12 +1296,12 @@ BuildPython() {
%if %{with debug_build}
BuildPython debug \
"--without-ensurepip --with-pydebug" \
"%{optflags_debug}"
"-Og"
%endif # with debug_build
BuildPython optimized \
"--without-ensurepip %{optimizations_flag}" \
"%{optflags_optimized}"
""
# ======================================================
# Installing the built code:
@ -1515,14 +1418,14 @@ EOF
%if %{with debug_build}
InstallPython debug \
%{py_INSTSONAME_debug} \
"%{optflags_debug}" \
-O0 \
%{LDVERSION_debug}
%endif # with debug_build
# Now the optimized build:
InstallPython optimized \
%{py_INSTSONAME_optimized} \
"%{optflags_optimized}" \
"" \
%{LDVERSION_optimized}
# Install directories for additional packages
@ -2218,46 +2121,6 @@ fi
# ======================================================
%changelog
* Thu Nov 14 2024 Lumír Balhar <lbalhar@redhat.com> - 3.6.8-69
- Security fix for CVE-2024-11168
Resolves: RHEL-67252
* Tue Nov 05 2024 Lumír Balhar <lbalhar@redhat.com> - 3.6.8-68
- Security fix for CVE-2024-9287
Resolves: RHEL-64878
* Thu Sep 05 2024 Lumír Balhar <lbalhar@redhat.com> - 3.6.8-67
- Security fix for CVE-2024-6232
Resolves: RHEL-57399
* Mon Aug 19 2024 Tomáš Hrnčiar <thrnciar@redhat.com> - 3.6.8-66
- Security fix for CVE-2024-6923
Resolves: RHEL-53065
* Wed Jul 24 2024 Charalampos Stratakis <cstratak@redhat.com> - 3.6.8-65
- Build Python with -O3
- https://fedoraproject.org/wiki/Changes/Python_built_with_gcc_O3
* Thu Jul 18 2024 Miro Hrončok <mhroncok@redhat.com> - 3.6.8-64
- Add explicit RPM Provides for /usr/libexec/platform-python
Resolves: RHEL-48605
* Thu Jul 04 2024 Lumír Balhar <lbalhar@redhat.com> - 3.6.8-63
- Security fix for CVE-2024-4032
Resolves: RHEL-44060
* Wed Apr 24 2024 Lumír Balhar <lbalhar@redhat.com> - 3.6.8-62
- Security fix for CVE-2024-0450
Resolves: RHEL-33683
* Wed Apr 24 2024 Lumír Balhar <lbalhar@redhat.com> - 3.6.8-61
- Security fix for CVE-2023-6597
Resolves: RHEL-33671
* Wed Apr 24 2024 Lumír Balhar <lbalhar@redhat.com> - 3.6.8-60
- Fix build with expat with fixed CVE-2023-52425
Related: RHEL-33671
* Thu Jan 04 2024 Lumír Balhar <lbalhar@redhat.com> - 3.6.8-59
- Security fix for CVE-2023-27043
Resolves: RHEL-20610

Loading…
Cancel
Save