Skip to content

Commit ed1237a

Browse files
committed
Reuse flag and allow f-string check
1 parent 1c38078 commit ed1237a

9 files changed

+20
-44
lines changed

Include/internal/pycore_global_objects_fini_generated.h

Lines changed: 0 additions & 1 deletion
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

Include/internal/pycore_global_strings.h

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -463,7 +463,6 @@ struct _Py_global_strings {
463463
STRUCT_FOR_ID(id)
464464
STRUCT_FOR_ID(ident)
465465
STRUCT_FOR_ID(ignore)
466-
STRUCT_FOR_ID(ignore_unmatched_parens)
467466
STRUCT_FOR_ID(imag)
468467
STRUCT_FOR_ID(importlib)
469468
STRUCT_FOR_ID(in_fd)

Include/internal/pycore_runtime_init_generated.h

Lines changed: 0 additions & 1 deletion
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

Include/internal/pycore_unicodeobject_generated.h

Lines changed: 0 additions & 3 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

Lib/tokenize.py

Lines changed: 3 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -447,9 +447,7 @@ def tokenize(readline):
447447

448448
def _tokenize(rl_gen, encoding):
449449
source = b"".join(rl_gen).decode(encoding)
450-
for token in _generate_tokens_from_c_tokenizer(source,
451-
extra_tokens=True,
452-
ignore_unmatched_parens=True):
450+
for token in _generate_tokens_from_c_tokenizer(source, extra_tokens=True):
453451
yield token
454452

455453
def generate_tokens(readline):
@@ -533,12 +531,10 @@ def error(message, filename=None, location=None):
533531
perror("unexpected error: %s" % err)
534532
raise
535533

536-
def _generate_tokens_from_c_tokenizer(source, extra_tokens=False, ignore_unmatched_parens=False):
534+
def _generate_tokens_from_c_tokenizer(source, extra_tokens=False):
537535
"""Tokenize a source reading Python code as unicode strings using the internal C tokenizer"""
538536
import _tokenize as c_tokenizer
539-
for info in c_tokenizer.TokenizerIter(source,
540-
extra_tokens=extra_tokens,
541-
ignore_unmatched_parens=ignore_unmatched_parens):
537+
for info in c_tokenizer.TokenizerIter(source, extra_tokens=extra_tokens):
542538
yield TokenInfo._make(info)
543539

544540

Parser/tokenizer.c

Lines changed: 7 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -113,7 +113,6 @@ tok_new(void)
113113
tok->report_warnings = 1;
114114
tok->tok_extra_tokens = 0;
115115
tok->comment_newline = 0;
116-
tok->ignore_unmatched_parens = 0;
117116
tok->tok_mode_stack[0] = (tokenizer_mode){.kind =TOK_REGULAR_MODE, .f_string_quote='\0', .f_string_quote_size = 0, .f_string_debug=0};
118117
tok->tok_mode_stack_index = 0;
119118
tok->tok_report_warnings = 1;
@@ -2497,18 +2496,18 @@ tok_get_normal_mode(struct tok_state *tok, tokenizer_mode* current_tok, struct t
24972496
case ')':
24982497
case ']':
24992498
case '}':
2500-
if (!tok->ignore_unmatched_parens && !tok->level) {
2501-
if (INSIDE_FSTRING(tok) && !current_tok->curly_bracket_depth && c == '}') {
2502-
return MAKE_TOKEN(syntaxerror(tok, "f-string: single '}' is not allowed"));
2503-
}
2499+
if (INSIDE_FSTRING(tok) && !current_tok->curly_bracket_depth && c == '}') {
2500+
return MAKE_TOKEN(syntaxerror(tok, "f-string: single '}' is not allowed"));
2501+
}
2502+
if (!tok->tok_extra_tokens && !tok->level) {
25042503
return MAKE_TOKEN(syntaxerror(tok, "unmatched '%c'", c));
25052504
}
25062505
if (tok->level > 0) {
25072506
tok->level--;
25082507
int opening = tok->parenstack[tok->level];
2509-
if (!tok->ignore_unmatched_parens && !((opening == '(' && c == ')') ||
2510-
(opening == '[' && c == ']') ||
2511-
(opening == '{' && c == '}'))) {
2508+
if (!tok->tok_extra_tokens && !((opening == '(' && c == ')') ||
2509+
(opening == '[' && c == ']') ||
2510+
(opening == '{' && c == '}'))) {
25122511
/* If the opening bracket belongs to an f-string's expression
25132512
part (e.g. f"{)}") and the closing bracket is an arbitrary
25142513
nested expression, then instead of matching a different

Parser/tokenizer.h

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -130,7 +130,6 @@ struct tok_state {
130130
int tok_report_warnings;
131131
int tok_extra_tokens;
132132
int comment_newline;
133-
int ignore_unmatched_parens;
134133
#ifdef Py_DEBUG
135134
int debug;
136135
#endif

Python/Python-tokenize.c

Lines changed: 2 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -40,13 +40,12 @@ _tokenizer.tokenizeriter.__new__ as tokenizeriter_new
4040
source: str
4141
*
4242
extra_tokens: bool
43-
ignore_unmatched_parens: bool
4443
[clinic start generated code]*/
4544

4645
static PyObject *
4746
tokenizeriter_new_impl(PyTypeObject *type, const char *source,
48-
int extra_tokens, int ignore_unmatched_parens)
49-
/*[clinic end generated code: output=5437e7bbc30de3f4 input=7f6b22d7c235ffd7]*/
47+
int extra_tokens)
48+
/*[clinic end generated code: output=f6f9d8b4beec8106 input=90dc5b6a5df180c2]*/
5049
{
5150
tokenizeriterobject *self = (tokenizeriterobject *)type->tp_alloc(type, 0);
5251
if (self == NULL) {
@@ -65,12 +64,6 @@ tokenizeriter_new_impl(PyTypeObject *type, const char *source,
6564
if (extra_tokens) {
6665
self->tok->tok_extra_tokens = 1;
6766
}
68-
if (ignore_unmatched_parens) {
69-
self->tok->ignore_unmatched_parens = 1;
70-
}
71-
if (ignore_unmatched_parens) {
72-
self->tok->ignore_unmatched_parens = 1;
73-
}
7467
self->done = 0;
7568
return (PyObject *)self;
7669
}

Python/clinic/Python-tokenize.c.h

Lines changed: 8 additions & 13 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

0 commit comments

Comments
 (0)