diff --git a/Lib/test/test_exceptions.py b/Lib/test/test_exceptions.py index bc0404ea4b0..3c427fed561 100644 --- a/Lib/test/test_exceptions.py +++ b/Lib/test/test_exceptions.py @@ -210,7 +210,7 @@ def testSyntaxErrorOffset(self): check('x = "a', 1, 5) check('lambda x: x = 2', 1, 1) check('f{a + b + c}', 1, 2) - check('[file for str(file) in []\n])', 1, 11) + check('[file for str(file) in []\n])', 2, 2) check('[\nfile\nfor str(file)\nin\n[]\n]', 3, 5) check('[file for\n str(file) in []]', 2, 2) diff --git a/Misc/NEWS.d/next/Core and Builtins/2021-05-04-01-01-04.bpo-43822.9VeCg0.rst b/Misc/NEWS.d/next/Core and Builtins/2021-05-04-01-01-04.bpo-43822.9VeCg0.rst new file mode 100644 index 00000000000..b8815cddf4e --- /dev/null +++ b/Misc/NEWS.d/next/Core and Builtins/2021-05-04-01-01-04.bpo-43822.9VeCg0.rst @@ -0,0 +1,2 @@ +The parser will prioritize tokenizer errors over custom syntax errors when +raising exceptions. Patch by Pablo Galindo. diff --git a/Parser/pegen.c b/Parser/pegen.c index e32b2710dbd..6080cec1489 100644 --- a/Parser/pegen.c +++ b/Parser/pegen.c @@ -1283,6 +1283,9 @@ _PyPegen_run_parser(Parser *p) reset_parser_state(p); _PyPegen_parse(p); if (PyErr_Occurred()) { + if (PyErr_ExceptionMatches(PyExc_SyntaxError)) { + _PyPegen_check_tokenizer_errors(p); + } return NULL; } if (p->fill == 0) {