Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[3.13] gh-130794: Process interpreter QSBR queue in _PyMem_AbandonDelayed. (gh-130808) #130857

Merged
merged 1 commit into from
Mar 4, 2025
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
Fix memory leak in the :term:`free threaded <free threading>` build when
resizing a shared list or dictionary from multiple short-lived threads.
28 changes: 20 additions & 8 deletions Objects/obmalloc.c
Original file line number Diff line number Diff line change
@@ -1208,18 +1208,25 @@ process_queue(struct llist_node *head, struct _qsbr_thread_state *qsbr,
static void
process_interp_queue(struct _Py_mem_interp_free_queue *queue,
struct _qsbr_thread_state *qsbr)
{
assert(PyMutex_IsLocked(&queue->mutex));
process_queue(&queue->head, qsbr, false);

int more_work = !llist_empty(&queue->head);
_Py_atomic_store_int_relaxed(&queue->has_work, more_work);
}

static void
maybe_process_interp_queue(struct _Py_mem_interp_free_queue *queue,
struct _qsbr_thread_state *qsbr)
{
if (!_Py_atomic_load_int_relaxed(&queue->has_work)) {
return;
}

// Try to acquire the lock, but don't block if it's already held.
if (_PyMutex_LockTimed(&queue->mutex, 0, 0) == PY_LOCK_ACQUIRED) {
process_queue(&queue->head, qsbr, false);

int more_work = !llist_empty(&queue->head);
_Py_atomic_store_int_relaxed(&queue->has_work, more_work);

process_interp_queue(queue, qsbr);
PyMutex_Unlock(&queue->mutex);
}
}
@@ -1234,7 +1241,7 @@ _PyMem_ProcessDelayed(PyThreadState *tstate)
process_queue(&tstate_impl->mem_free_queue, tstate_impl->qsbr, true);

// Process shared interpreter work
process_interp_queue(&interp->mem_free_queue, tstate_impl->qsbr);
maybe_process_interp_queue(&interp->mem_free_queue, tstate_impl->qsbr);
}

void
@@ -1256,10 +1263,15 @@ _PyMem_AbandonDelayed(PyThreadState *tstate)
return;
}

// Merge the thread's work queue into the interpreter's work queue.
PyMutex_Lock(&interp->mem_free_queue.mutex);

// Merge the thread's work queue into the interpreter's work queue.
llist_concat(&interp->mem_free_queue.head, queue);
_Py_atomic_store_int_relaxed(&interp->mem_free_queue.has_work, 1);

// Process the merged queue now (see gh-130794).
_PyThreadStateImpl *this_tstate = (_PyThreadStateImpl *)_PyThreadState_GET();
process_interp_queue(&interp->mem_free_queue, this_tstate->qsbr);

PyMutex_Unlock(&interp->mem_free_queue.mutex);

assert(llist_empty(queue)); // the thread's queue is now empty
1 change: 1 addition & 0 deletions Python/qsbr.c
Original file line number Diff line number Diff line change
@@ -161,6 +161,7 @@ bool
_Py_qsbr_poll(struct _qsbr_thread_state *qsbr, uint64_t goal)
{
assert(_Py_atomic_load_int_relaxed(&_PyThreadState_GET()->state) == _Py_THREAD_ATTACHED);
assert(((_PyThreadStateImpl *)_PyThreadState_GET())->qsbr == qsbr);

if (_Py_qbsr_goal_reached(qsbr, goal)) {
return true;