@@ -1208,18 +1208,25 @@ process_queue(struct llist_node *head, struct _qsbr_thread_state *qsbr,
1208
1208
static void
1209
1209
process_interp_queue (struct _Py_mem_interp_free_queue * queue ,
1210
1210
struct _qsbr_thread_state * qsbr )
1211
+ {
1212
+ assert (PyMutex_IsLocked (& queue -> mutex ));
1213
+ process_queue (& queue -> head , qsbr , false);
1214
+
1215
+ int more_work = !llist_empty (& queue -> head );
1216
+ _Py_atomic_store_int_relaxed (& queue -> has_work , more_work );
1217
+ }
1218
+
1219
+ static void
1220
+ maybe_process_interp_queue (struct _Py_mem_interp_free_queue * queue ,
1221
+ struct _qsbr_thread_state * qsbr )
1211
1222
{
1212
1223
if (!_Py_atomic_load_int_relaxed (& queue -> has_work )) {
1213
1224
return ;
1214
1225
}
1215
1226
1216
1227
// Try to acquire the lock, but don't block if it's already held.
1217
1228
if (_PyMutex_LockTimed (& queue -> mutex , 0 , 0 ) == PY_LOCK_ACQUIRED ) {
1218
- process_queue (& queue -> head , qsbr , false);
1219
-
1220
- int more_work = !llist_empty (& queue -> head );
1221
- _Py_atomic_store_int_relaxed (& queue -> has_work , more_work );
1222
-
1229
+ process_interp_queue (queue , qsbr );
1223
1230
PyMutex_Unlock (& queue -> mutex );
1224
1231
}
1225
1232
}
@@ -1234,7 +1241,7 @@ _PyMem_ProcessDelayed(PyThreadState *tstate)
1234
1241
process_queue (& tstate_impl -> mem_free_queue , tstate_impl -> qsbr , true);
1235
1242
1236
1243
// Process shared interpreter work
1237
- process_interp_queue (& interp -> mem_free_queue , tstate_impl -> qsbr );
1244
+ maybe_process_interp_queue (& interp -> mem_free_queue , tstate_impl -> qsbr );
1238
1245
}
1239
1246
1240
1247
void
@@ -1256,10 +1263,15 @@ _PyMem_AbandonDelayed(PyThreadState *tstate)
1256
1263
return ;
1257
1264
}
1258
1265
1259
- // Merge the thread's work queue into the interpreter's work queue.
1260
1266
PyMutex_Lock (& interp -> mem_free_queue .mutex );
1267
+
1268
+ // Merge the thread's work queue into the interpreter's work queue.
1261
1269
llist_concat (& interp -> mem_free_queue .head , queue );
1262
- _Py_atomic_store_int_relaxed (& interp -> mem_free_queue .has_work , 1 );
1270
+
1271
+ // Process the merged queue now (see gh-130794).
1272
+ _PyThreadStateImpl * this_tstate = (_PyThreadStateImpl * )_PyThreadState_GET ();
1273
+ process_interp_queue (& interp -> mem_free_queue , this_tstate -> qsbr );
1274
+
1263
1275
PyMutex_Unlock (& interp -> mem_free_queue .mutex );
1264
1276
1265
1277
assert (llist_empty (queue )); // the thread's queue is now empty
0 commit comments