Skip to content
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Next Next commit
gh-112243: Don't include comments in f-string debug expressions
  • Loading branch information
pablogsal committed Nov 20, 2023
commit f204fbd88a2ea153461f302fb7340fa84c7a2ea5
3 changes: 3 additions & 0 deletions Lib/test/test_fstring.py
Original file line number Diff line number Diff line change
Expand Up @@ -1627,6 +1627,9 @@ def __repr__(self):
self.assertEqual(f'X{x = }Y', 'Xx = '+repr(x)+'Y')
self.assertEqual(f"sadsd {1 + 1 = :{1 + 1:1d}f}", "sadsd 1 + 1 = 2.000000")

self.assertEqual(f"{1+2 = # my comment
}", '1+2 = \n 3')

# These next lines contains tabs. Backslash escapes don't
# work in f-strings.
# patchcheck doesn't like these tabs. So the only way to test
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
Don't include comments in f-string debug expressions. Patch by Pablo Galindo
55 changes: 49 additions & 6 deletions Parser/lexer/lexer.c
Original file line number Diff line number Diff line change
Expand Up @@ -112,13 +112,56 @@ set_fstring_expr(struct tok_state* tok, struct token *token, char c) {
if (!tok_mode->f_string_debug || token->metadata) {
return 0;
}
PyObject *res = NULL;

PyObject *res = PyUnicode_DecodeUTF8(
tok_mode->last_expr_buffer,
tok_mode->last_expr_size - tok_mode->last_expr_end,
NULL
);
if (!res) {
// Check if there is a # character in the expression
int hash_detected;
for (Py_ssize_t i = 0; i < tok_mode->last_expr_size - tok_mode->last_expr_end; i++) {
if (tok_mode->last_expr_buffer[i] == '#') {
hash_detected = 1;
break;
}
}

if (hash_detected) {
Py_ssize_t input_length = tok_mode->last_expr_size - tok_mode->last_expr_end;
char *result = (char *)PyObject_Malloc((input_length + 1) * sizeof(char));
if (!result) {
return -1;
}

Py_ssize_t i = 0;
Py_ssize_t j = 0;

for (i = 0, j = 0; i < input_length; i++) {
if (tok_mode->last_expr_buffer[i] == '#') {
// Skip characters until newline or end of string
while (tok_mode->last_expr_buffer[i] != '\0') {
if (tok_mode->last_expr_buffer[i] == '\n') {
result[j++] = tok_mode->last_expr_buffer[i];
break;
}
i++;
}
} else {
result[j++] = tok_mode->last_expr_buffer[i];
}
}

result[j] = '\0'; // Null-terminate the result string
res = PyUnicode_DecodeUTF8(result, j, NULL);
PyObject_Free(result);
} else {
res = PyUnicode_DecodeUTF8(
tok_mode->last_expr_buffer,
tok_mode->last_expr_size - tok_mode->last_expr_end,
NULL
);

}


if (!res) {
return -1;
}
token->metadata = res;
Expand Down