Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
gh-119118: Fix performance regression in tokenize module (GH-119615)
* gh-119118: Fix performance regression in tokenize module

- Cache line object to avoid creating a Unicode object
  for all of the tokens in the same line.
- Speed up byte offset to column offset conversion by using the
  smallest buffer possible to measure the difference.

(cherry picked from commit d87b015)

Co-authored-by: Lysandros Nikolaou <lisandrosnik@gmail.com>
Co-authored-by: Pablo Galindo <pablogsal@gmail.com>
  • Loading branch information
2 people authored and miss-islington committed May 28, 2024
commit 8e935c9e388c8dbd53f8e9e4ad3189b9fdf890eb
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
Fix performance regression in the :mod:`tokenize` module by caching the ``line``
token attribute and calculating the column offset more efficiently.
25 changes: 25 additions & 0 deletions Parser/pegen.c
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,31 @@ _PyPegen_interactive_exit(Parser *p)
return NULL;
}

Py_ssize_t
_PyPegen_byte_offset_to_character_offset_line(PyObject *line, Py_ssize_t col_offset, Py_ssize_t end_col_offset)
{
const char *data = PyUnicode_AsUTF8(line);

Py_ssize_t len = 0;
while (col_offset < end_col_offset) {
Py_UCS4 ch = data[col_offset];
if (ch < 0x80) {
col_offset += 1;
} else if ((ch & 0xe0) == 0xc0) {
col_offset += 2;
} else if ((ch & 0xf0) == 0xe0) {
col_offset += 3;
} else if ((ch & 0xf8) == 0xf0) {
col_offset += 4;
} else {
PyErr_SetString(PyExc_ValueError, "Invalid UTF-8 sequence");
return -1;
}
len++;
}
return len;
}

Py_ssize_t
_PyPegen_byte_offset_to_character_offset_raw(const char* str, Py_ssize_t col_offset)
{
Expand Down
1 change: 1 addition & 0 deletions Parser/pegen.h
Original file line number Diff line number Diff line change
Expand Up @@ -150,6 +150,7 @@ int _PyPegen_fill_token(Parser *p);
expr_ty _PyPegen_name_token(Parser *p);
expr_ty _PyPegen_number_token(Parser *p);
void *_PyPegen_string_token(Parser *p);
Py_ssize_t _PyPegen_byte_offset_to_character_offset_line(PyObject *line, Py_ssize_t col_offset, Py_ssize_t end_col_offset);
Py_ssize_t _PyPegen_byte_offset_to_character_offset(PyObject *line, Py_ssize_t col_offset);
Py_ssize_t _PyPegen_byte_offset_to_character_offset_raw(const char*, Py_ssize_t col_offset);
Py_ssize_t _PyPegen_calculate_display_width(PyObject *segment, Py_ssize_t character_offset);
Expand Down
44 changes: 40 additions & 4 deletions Python/Python-tokenize.c
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,11 @@ typedef struct
{
PyObject_HEAD struct tok_state *tok;
int done;

/* Needed to cache line for performance */
PyObject *last_line;
Py_ssize_t last_lineno;
Py_ssize_t byte_col_offset_diff;
} tokenizeriterobject;

/*[clinic input]
Expand Down Expand Up @@ -67,6 +72,11 @@ tokenizeriter_new_impl(PyTypeObject *type, PyObject *readline,
self->tok->tok_extra_tokens = 1;
}
self->done = 0;

self->last_line = NULL;
self->byte_col_offset_diff = 0;
self->last_lineno = 0;

return (PyObject *)self;
}

Expand Down Expand Up @@ -209,7 +219,18 @@ tokenizeriter_next(tokenizeriterobject *it)
if (size >= 1 && it->tok->implicit_newline) {
size -= 1;
}
line = PyUnicode_DecodeUTF8(line_start, size, "replace");

if (it->tok->lineno != it->last_lineno) {
// Line has changed since last token, so we fetch the new line and cache it
// in the iter object.
Py_XDECREF(it->last_line);
line = PyUnicode_DecodeUTF8(line_start, size, "replace");
it->last_line = line;
it->byte_col_offset_diff = 0;
} else {
// Line hasn't changed so we reuse the cached one.
line = it->last_line;
}
}
if (line == NULL) {
Py_DECREF(str);
Expand All @@ -218,13 +239,28 @@ tokenizeriter_next(tokenizeriterobject *it)

Py_ssize_t lineno = ISSTRINGLIT(type) ? it->tok->first_lineno : it->tok->lineno;
Py_ssize_t end_lineno = it->tok->lineno;
it->last_lineno = lineno;

Py_ssize_t col_offset = -1;
Py_ssize_t end_col_offset = -1;
Py_ssize_t byte_offset = -1;
if (token.start != NULL && token.start >= line_start) {
col_offset = _PyPegen_byte_offset_to_character_offset(line, token.start - line_start);
byte_offset = token.start - line_start;
col_offset = byte_offset - it->byte_col_offset_diff;
}
if (token.end != NULL && token.end >= it->tok->line_start) {
end_col_offset = _PyPegen_byte_offset_to_character_offset_raw(it->tok->line_start, token.end - it->tok->line_start);
Py_ssize_t end_byte_offset = token.end - it->tok->line_start;
if (lineno == end_lineno) {
// If the whole token is at the same line, we can just use the token.start
// buffer for figuring out the new column offset, since using line is not
// performant for very long lines.
Py_ssize_t token_col_offset = _PyPegen_byte_offset_to_character_offset_line(line, byte_offset, end_byte_offset);
end_col_offset = col_offset + token_col_offset;
it->byte_col_offset_diff += token.end - token.start - token_col_offset;
} else {
end_col_offset = _PyPegen_byte_offset_to_character_offset_raw(it->tok->line_start, end_byte_offset);
it->byte_col_offset_diff += end_byte_offset - end_col_offset;
}
}

if (it->tok->tok_extra_tokens) {
Expand Down Expand Up @@ -264,7 +300,7 @@ tokenizeriter_next(tokenizeriterobject *it)
}
}

result = Py_BuildValue("(iN(nn)(nn)N)", type, str, lineno, col_offset, end_lineno, end_col_offset, line);
result = Py_BuildValue("(iN(nn)(nn)O)", type, str, lineno, col_offset, end_lineno, end_col_offset, line);
exit:
_PyToken_Free(&token);
if (type == ENDMARKER) {
Expand Down