Skip to content

Commit baba972

Browse files
committed
[3.13] pythongh-130794: Process interpreter QSBR queue in _PyMem_AbandonDelayed. (pythongh-130808)
This avoids a case where the interpreter's queue of memory to be freed could grow rapidly if there are many short lived threads. (cherry picked from commit 2f6e0e9) Co-authored-by: Sam Gross <[email protected]>
1 parent 22d729c commit baba972

File tree

3 files changed

+23
-8
lines changed

3 files changed

+23
-8
lines changed
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,2 @@
1+
Fix memory leak in the :term:`free threaded <free threading>` build when
2+
resizing a shared list or dictionary from multiple short-lived threads.

Objects/obmalloc.c

+20-8
Original file line numberDiff line numberDiff line change
@@ -1208,18 +1208,25 @@ process_queue(struct llist_node *head, struct _qsbr_thread_state *qsbr,
12081208
static void
12091209
process_interp_queue(struct _Py_mem_interp_free_queue *queue,
12101210
struct _qsbr_thread_state *qsbr)
1211+
{
1212+
assert(PyMutex_IsLocked(&queue->mutex));
1213+
process_queue(&queue->head, qsbr, false);
1214+
1215+
int more_work = !llist_empty(&queue->head);
1216+
_Py_atomic_store_int_relaxed(&queue->has_work, more_work);
1217+
}
1218+
1219+
static void
1220+
maybe_process_interp_queue(struct _Py_mem_interp_free_queue *queue,
1221+
struct _qsbr_thread_state *qsbr)
12111222
{
12121223
if (!_Py_atomic_load_int_relaxed(&queue->has_work)) {
12131224
return;
12141225
}
12151226

12161227
// Try to acquire the lock, but don't block if it's already held.
12171228
if (_PyMutex_LockTimed(&queue->mutex, 0, 0) == PY_LOCK_ACQUIRED) {
1218-
process_queue(&queue->head, qsbr, false);
1219-
1220-
int more_work = !llist_empty(&queue->head);
1221-
_Py_atomic_store_int_relaxed(&queue->has_work, more_work);
1222-
1229+
process_interp_queue(queue, qsbr);
12231230
PyMutex_Unlock(&queue->mutex);
12241231
}
12251232
}
@@ -1234,7 +1241,7 @@ _PyMem_ProcessDelayed(PyThreadState *tstate)
12341241
process_queue(&tstate_impl->mem_free_queue, tstate_impl->qsbr, true);
12351242

12361243
// Process shared interpreter work
1237-
process_interp_queue(&interp->mem_free_queue, tstate_impl->qsbr);
1244+
maybe_process_interp_queue(&interp->mem_free_queue, tstate_impl->qsbr);
12381245
}
12391246

12401247
void
@@ -1256,10 +1263,15 @@ _PyMem_AbandonDelayed(PyThreadState *tstate)
12561263
return;
12571264
}
12581265

1259-
// Merge the thread's work queue into the interpreter's work queue.
12601266
PyMutex_Lock(&interp->mem_free_queue.mutex);
1267+
1268+
// Merge the thread's work queue into the interpreter's work queue.
12611269
llist_concat(&interp->mem_free_queue.head, queue);
1262-
_Py_atomic_store_int_relaxed(&interp->mem_free_queue.has_work, 1);
1270+
1271+
// Process the merged queue now (see gh-130794).
1272+
_PyThreadStateImpl *this_tstate = (_PyThreadStateImpl *)_PyThreadState_GET();
1273+
process_interp_queue(&interp->mem_free_queue, this_tstate->qsbr);
1274+
12631275
PyMutex_Unlock(&interp->mem_free_queue.mutex);
12641276

12651277
assert(llist_empty(queue)); // the thread's queue is now empty

Python/qsbr.c

+1
Original file line numberDiff line numberDiff line change
@@ -161,6 +161,7 @@ bool
161161
_Py_qsbr_poll(struct _qsbr_thread_state *qsbr, uint64_t goal)
162162
{
163163
assert(_Py_atomic_load_int_relaxed(&_PyThreadState_GET()->state) == _Py_THREAD_ATTACHED);
164+
assert(((_PyThreadStateImpl *)_PyThreadState_GET())->qsbr == qsbr);
164165

165166
if (_Py_qbsr_goal_reached(qsbr, goal)) {
166167
return true;

0 commit comments

Comments
 (0)