diff --git a/Lib/idlelib/IOBinding.py b/Lib/idlelib/IOBinding.py index a4cc205c307..84f39a2fee8 100644 --- a/Lib/idlelib/IOBinding.py +++ b/Lib/idlelib/IOBinding.py @@ -62,7 +62,7 @@ locale_encoding = locale_encoding.lower() encoding = locale_encoding ### KBK 07Sep07 This is used all over IDLE, check! ### 'encoding' is used below in encode(), check! -coding_re = re.compile(r'^[ \t\f]*#.*coding[:=][ \t]*([-\w.]+)', re.ASCII) +coding_re = re.compile(r'^[ \t\f]*#.*?coding[:=][ \t]*([-\w.]+)', re.ASCII) blank_re = re.compile(r'^[ \t\f]*(?:[#\r\n]|$)', re.ASCII) def coding_spec(data): diff --git a/Lib/lib2to3/pgen2/tokenize.py b/Lib/lib2to3/pgen2/tokenize.py index 1ff1c61ee22..d14db60f7da 100644 --- a/Lib/lib2to3/pgen2/tokenize.py +++ b/Lib/lib2to3/pgen2/tokenize.py @@ -236,7 +236,7 @@ class Untokenizer: startline = False toks_append(tokval) -cookie_re = re.compile(r'^[ \t\f]*#.*coding[:=][ \t]*([-\w.]+)', re.ASCII) +cookie_re = re.compile(r'^[ \t\f]*#.*?coding[:=][ \t]*([-\w.]+)', re.ASCII) blank_re = re.compile(br'^[ \t\f]*(?:[#\r\n]|$)', re.ASCII) def _get_normal_name(orig_enc): diff --git a/Lib/test/test_importlib/source/test_source_encoding.py b/Lib/test/test_importlib/source/test_source_encoding.py index b604afb5ec8..1e0771b19de 100644 --- a/Lib/test/test_importlib/source/test_source_encoding.py +++ b/Lib/test/test_importlib/source/test_source_encoding.py @@ -14,7 +14,7 @@ import unittest import warnings -CODING_RE = re.compile(r'^[ \t\f]*#.*coding[:=][ \t]*([-\w.]+)', re.ASCII) +CODING_RE = re.compile(r'^[ \t\f]*#.*?coding[:=][ \t]*([-\w.]+)', re.ASCII) class EncodingTest: diff --git a/Lib/test/test_source_encoding.py b/Lib/test/test_source_encoding.py index 7979c820109..38734009c00 100644 --- a/Lib/test/test_source_encoding.py +++ b/Lib/test/test_source_encoding.py @@ -178,7 +178,7 @@ class AbstractSourceEncodingTest: def test_double_coding_same_line(self): src = (b'#coding:iso8859-15 coding:latin1\n' b'print(ascii("\xc3\xa4"))\n') - self.check_script_output(src, br"'\xc3\xa4'") + self.check_script_output(src, br"'\xc3\u20ac'") def test_first_non_utf8_coding_line(self): src = (b'#coding:iso-8859-15 \xa4\n' diff --git a/Lib/tokenize.py b/Lib/tokenize.py index 7a003580a4a..ec79ec886da 100644 --- a/Lib/tokenize.py +++ b/Lib/tokenize.py @@ -34,7 +34,7 @@ import re import sys from token import * -cookie_re = re.compile(r'^[ \t\f]*#.*coding[:=][ \t]*([-\w.]+)', re.ASCII) +cookie_re = re.compile(r'^[ \t\f]*#.*?coding[:=][ \t]*([-\w.]+)', re.ASCII) blank_re = re.compile(br'^[ \t\f]*(?:[#\r\n]|$)', re.ASCII) import token diff --git a/Misc/NEWS b/Misc/NEWS index ae0a1001686..6f5c7ab0bf7 100644 --- a/Misc/NEWS +++ b/Misc/NEWS @@ -10,6 +10,9 @@ Release date: tba Core and Builtins ----------------- +- Issue #26581: If coding cookie is specified multiple times on a line in + Python source code file, only the first one is taken to account. + - Issue #26563: Debug hooks on Python memory allocators now raise a fatal error if functions of the :c:func:`PyMem_Malloc` family are called without holding the GIL. diff --git a/Parser/tokenizer.c b/Parser/tokenizer.c index be7cf497c44..c653121706b 100644 --- a/Parser/tokenizer.c +++ b/Parser/tokenizer.c @@ -275,6 +275,7 @@ get_coding_spec(const char *s, char **spec, Py_ssize_t size, struct tok_state *t return 0; } *spec = r; + break; } } } diff --git a/Tools/scripts/findnocoding.py b/Tools/scripts/findnocoding.py index 5f3795e6575..6c16b1ce151 100755 --- a/Tools/scripts/findnocoding.py +++ b/Tools/scripts/findnocoding.py @@ -32,7 +32,7 @@ except ImportError: "no sophisticated Python source file search will be done.", file=sys.stderr) -decl_re = re.compile(rb'^[ \t\f]*#.*coding[:=][ \t]*([-\w.]+)') +decl_re = re.compile(rb'^[ \t\f]*#.*?coding[:=][ \t]*([-\w.]+)') blank_re = re.compile(rb'^[ \t\f]*(?:[#\r\n]|$)') def get_declaration(line):