@@ -159,6 +159,15 @@ gc_decref(PyObject *op)
159
159
op -> ob_tid -= 1 ;
160
160
}
161
161
162
+ static void
163
+ disable_deferred_refcounting (PyObject * op )
164
+ {
165
+ if (_PyObject_HasDeferredRefcount (op )) {
166
+ op -> ob_gc_bits &= ~_PyGC_BITS_DEFERRED ;
167
+ op -> ob_ref_shared -= (1 << _Py_REF_SHARED_SHIFT );
168
+ }
169
+ }
170
+
162
171
static Py_ssize_t
163
172
merge_refcount (PyObject * op , Py_ssize_t extra )
164
173
{
@@ -375,9 +384,10 @@ update_refs(const mi_heap_t *heap, const mi_heap_area_t *area,
375
384
}
376
385
377
386
Py_ssize_t refcount = Py_REFCNT (op );
387
+ refcount -= _PyObject_HasDeferredRefcount (op );
378
388
_PyObject_ASSERT (op , refcount >= 0 );
379
389
380
- if (refcount > 0 ) {
390
+ if (refcount > 0 && ! _PyObject_HasDeferredRefcount ( op ) ) {
381
391
// Untrack tuples and dicts as necessary in this pass, but not objects
382
392
// with zero refcount, which we will want to collect.
383
393
if (PyTuple_CheckExact (op )) {
@@ -466,6 +476,9 @@ mark_heap_visitor(const mi_heap_t *heap, const mi_heap_area_t *area,
466
476
return true;
467
477
}
468
478
479
+ _PyObject_ASSERT_WITH_MSG (op , gc_get_refs (op ) >= 0 ,
480
+ "refcount is too small" );
481
+
469
482
if (gc_is_unreachable (op ) && gc_get_refs (op ) != 0 ) {
470
483
// Object is reachable but currently marked as unreachable.
471
484
// Mark it as reachable and traverse its pointers to find
@@ -499,6 +512,10 @@ scan_heap_visitor(const mi_heap_t *heap, const mi_heap_area_t *area,
499
512
500
513
struct collection_state * state = (struct collection_state * )args ;
501
514
if (gc_is_unreachable (op )) {
515
+ // Disable deferred refcounting for unreachable objects so that they
516
+ // are collected immediately after finalization.
517
+ disable_deferred_refcounting (op );
518
+
502
519
// Merge and add one to the refcount to prevent deallocation while we
503
520
// are holding on to it in a worklist.
504
521
merge_refcount (op , 1 );
0 commit comments