#ifdef Py_DEBUG
static inline void
_PyStaticObject_CheckRefcnt(PyObject *obj) {
- if (Py_REFCNT(obj) < _Py_IMMORTAL_REFCNT) {
+ if (!_Py_IsImmortal(obj)) {
fprintf(stderr, "Immortal Object has less refcnt than expected.\n");
_PyObject_Dump(obj);
}
#include "pycore_pystate.h" // _PyInterpreterState_GET()
#include "pycore_uniqueid.h" // _PyType_IncrefSlow
-
-#define _Py_IMMORTAL_REFCNT_LOOSE ((_Py_IMMORTAL_REFCNT >> 1) + 1)
-
// This value is added to `ob_ref_shared` for objects that use deferred
// reference counting so that they are not immediately deallocated when the
// non-deferred reference count drops to zero.
// `ob_ref_shared` are used for flags.
#define _Py_REF_DEFERRED (PY_SSIZE_T_MAX / 8)
-// gh-121528, gh-118997: Similar to _Py_IsImmortal() but be more loose when
-// comparing the reference count to stay compatible with C extensions built
-// with the stable ABI 3.11 or older. Such extensions implement INCREF/DECREF
-// as refcnt++ and refcnt-- without taking in account immortal objects. For
-// example, the reference count of an immortal object can change from
-// _Py_IMMORTAL_REFCNT to _Py_IMMORTAL_REFCNT+1 (INCREF) or
-// _Py_IMMORTAL_REFCNT-1 (DECREF).
-//
-// This function should only be used in assertions. Otherwise, _Py_IsImmortal()
-// must be used instead.
-static inline int _Py_IsImmortalLoose(PyObject *op)
-{
-#if defined(Py_GIL_DISABLED)
- return _Py_IsImmortal(op);
-#else
- return (op->ob_refcnt >= _Py_IMMORTAL_REFCNT_LOOSE);
-#endif
-}
-#define _Py_IsImmortalLoose(op) _Py_IsImmortalLoose(_PyObject_CAST(op))
+/* For backwards compatibility -- Do not use this */
+#define _Py_IsImmortalLoose(op) _Py_IsImmortal
/* Check if an object is consistent. For example, ensure that the reference
#else
#define _PyObject_HEAD_INIT(type) \
{ \
- .ob_refcnt = _Py_IMMORTAL_REFCNT, \
+ .ob_refcnt = _Py_IMMORTAL_INITIAL_REFCNT, \
.ob_type = (type) \
}
#endif
static inline void _Py_SetMortal(PyObject *op, Py_ssize_t refcnt)
{
if (op) {
- assert(_Py_IsImmortalLoose(op));
+ assert(_Py_IsImmortal(op));
#ifdef Py_GIL_DISABLED
op->ob_tid = _Py_UNOWNED_TID;
op->ob_ref_local = 0;
_Py_INCREF_TYPE(PyTypeObject *type)
{
if (!_PyType_HasFeature(type, Py_TPFLAGS_HEAPTYPE)) {
- assert(_Py_IsImmortalLoose(type));
+ assert(_Py_IsImmortal(type));
_Py_INCREF_IMMORTAL_STAT_INC();
return;
}
_Py_DECREF_TYPE(PyTypeObject *type)
{
if (!_PyType_HasFeature(type, Py_TPFLAGS_HEAPTYPE)) {
- assert(_Py_IsImmortalLoose(type));
+ assert(_Py_IsImmortal(type));
_Py_DECREF_IMMORTAL_STAT_INC();
return;
}
{
assert(op != NULL);
Py_SET_TYPE(op, typeobj);
- assert(_PyType_HasFeature(typeobj, Py_TPFLAGS_HEAPTYPE) || _Py_IsImmortalLoose(typeobj));
+ assert(_PyType_HasFeature(typeobj, Py_TPFLAGS_HEAPTYPE) || _Py_IsImmortal(typeobj));
_Py_INCREF_TYPE(typeobj);
_Py_NewReference(op);
}
#else
#define PyObject_HEAD_INIT(type) \
{ \
- { _Py_IMMORTAL_REFCNT }, \
+ { _Py_IMMORTAL_INITIAL_REFCNT }, \
(type) \
},
#endif
#if SIZEOF_VOID_P > 4
/*
-In 64+ bit systems, an object will be marked as immortal by setting all of the
-lower 32 bits of the reference count field, which is equal to: 0xFFFFFFFF
+In 64+ bit systems, any object whose 32 bit reference count is >= 2**31
+will be treated as immortal.
Using the lower 32 bits makes the value backwards compatible by allowing
C-Extensions without the updated checks in Py_INCREF and Py_DECREF to safely
-increase and decrease the objects reference count. The object would lose its
-immortality, but the execution would still be correct.
+increase and decrease the objects reference count.
+
+In order to offer sufficient resilience to C extensions using the stable ABI
+compiled against 3.11 or earlier, we set the initial value near the
+middle of the range (2**31, 2**32). That way the the refcount can be
+off by ~1 billion without affecting immortality.
Reference count increases will use saturated arithmetic, taking advantage of
having all the lower 32 bits set, which will avoid the reference count to go
beyond the refcount limit. Immortality checks for reference count decreases will
be done by checking the bit sign flag in the lower 32 bits.
+
*/
-#define _Py_IMMORTAL_REFCNT _Py_CAST(Py_ssize_t, UINT_MAX)
+#define _Py_IMMORTAL_INITIAL_REFCNT ((Py_ssize_t)(3UL << 30))
#else
/*
-In 32 bit systems, an object will be marked as immortal by setting all of the
-lower 30 bits of the reference count field, which is equal to: 0x3FFFFFFF
+In 32 bit systems, an object will be treated as immortal if its reference
+count equals or exceeds _Py_IMMORTAL_MINIMUM_REFCNT (2**30).
Using the lower 30 bits makes the value backwards compatible by allowing
C-Extensions without the updated checks in Py_INCREF and Py_DECREF to safely
immortality, but the execution would still be correct.
Reference count increases and decreases will first go through an immortality
-check by comparing the reference count field to the immortality reference count.
+check by comparing the reference count field to the minimum immortality refcount.
*/
-#define _Py_IMMORTAL_REFCNT _Py_CAST(Py_ssize_t, UINT_MAX >> 2)
+#define _Py_IMMORTAL_INITIAL_REFCNT ((Py_ssize_t)(3L << 29))
+#define _Py_IMMORTAL_MINIMUM_REFCNT ((Py_ssize_t)(1L << 30))
#endif
// Py_GIL_DISABLED builds indicate immortal objects using `ob_ref_local`, which is
#else
uint32_t local = _Py_atomic_load_uint32_relaxed(&ob->ob_ref_local);
if (local == _Py_IMMORTAL_REFCNT_LOCAL) {
- return _Py_IMMORTAL_REFCNT;
+ return _Py_IMMORTAL_INITIAL_REFCNT;
}
Py_ssize_t shared = _Py_atomic_load_ssize_relaxed(&ob->ob_ref_shared);
return _Py_STATIC_CAST(Py_ssize_t, local) +
return (_Py_atomic_load_uint32_relaxed(&op->ob_ref_local) ==
_Py_IMMORTAL_REFCNT_LOCAL);
#elif SIZEOF_VOID_P > 4
- return (_Py_CAST(PY_INT32_T, op->ob_refcnt) < 0);
+ return _Py_CAST(PY_INT32_T, op->ob_refcnt) < 0;
#else
- return (op->ob_refcnt == _Py_IMMORTAL_REFCNT);
+ return op->ob_refcnt >= _Py_IMMORTAL_MINIMUM_REFCNT;
#endif
}
#define _Py_IsImmortal(op) _Py_IsImmortal(_PyObject_CAST(op))
uint32_t new_local = local + 1;
if (new_local == 0) {
_Py_INCREF_IMMORTAL_STAT_INC();
- // local is equal to _Py_IMMORTAL_REFCNT: do nothing
+ // local is equal to _Py_IMMORTAL_REFCNT_LOCAL: do nothing
return;
}
if (_Py_IsOwnedByCurrentThread(op)) {
_Py_atomic_add_ssize(&op->ob_ref_shared, (1 << _Py_REF_SHARED_SHIFT));
}
#elif SIZEOF_VOID_P > 4
- // Portable saturated add, branching on the carry flag and set low bits
PY_UINT32_T cur_refcnt = op->ob_refcnt_split[PY_BIG_ENDIAN];
- PY_UINT32_T new_refcnt = cur_refcnt + 1;
- if (new_refcnt == 0) {
+ if (((int32_t)cur_refcnt) < 0) {
+ // the object is immortal
_Py_INCREF_IMMORTAL_STAT_INC();
- // cur_refcnt is equal to _Py_IMMORTAL_REFCNT: the object is immortal,
- // do nothing
return;
}
- op->ob_refcnt_split[PY_BIG_ENDIAN] = new_refcnt;
+ op->ob_refcnt_split[PY_BIG_ENDIAN] = cur_refcnt + 1;
#else
- // Explicitly check immortality against the immortal value
if (_Py_IsImmortal(op)) {
_Py_INCREF_IMMORTAL_STAT_INC();
return;
class ImmortalTests(unittest.TestCase):
if sys.maxsize < (1 << 32):
- IMMORTAL_REFCOUNT = (1 << 30) - 1
+ IMMORTAL_REFCOUNT = 3 << 29
else:
- IMMORTAL_REFCOUNT = (1 << 32) - 1
+ IMMORTAL_REFCOUNT = 3 << 30
IMMORTALS = (None, True, False, Ellipsis, NotImplemented, *range(-5, 257))
--- /dev/null
+Make the handling of reference counts of immortal objects more robust.
+Immortal objects with reference counts that deviate from their original
+reference count by up to a billion (half a billion on 32 bit builds) are
+still counted as immortal.
default:
assert (0);
}
- assert(_Py_IsImmortalLoose(ret));
+ assert(_Py_IsImmortal(ret));
return ret;
}
static inline PyObject* bytes_get_empty(void)
{
PyObject *empty = &EMPTY->ob_base.ob_base;
- assert(_Py_IsImmortalLoose(empty));
+ assert(_Py_IsImmortal(empty));
return empty;
}
}
if (size == 1 && str != NULL) {
op = CHARACTER(*str & 255);
- assert(_Py_IsImmortalLoose(op));
+ assert(_Py_IsImmortal(op));
return (PyObject *)op;
}
if (size == 0) {
}
else if (size == 1) {
op = CHARACTER(*str & 255);
- assert(_Py_IsImmortalLoose(op));
+ assert(_Py_IsImmortal(op));
return (PyObject *)op;
}
#define DK_MASK(dk) (DK_SIZE(dk)-1)
+#define _Py_DICT_IMMORTAL_INITIAL_REFCNT PY_SSIZE_T_MIN
+
static void free_keys_object(PyDictKeysObject *keys, bool use_qsbr);
/* PyDictKeysObject has refcounts like PyObject does, so we have the
static inline void
dictkeys_incref(PyDictKeysObject *dk)
{
- if (FT_ATOMIC_LOAD_SSIZE_RELAXED(dk->dk_refcnt) == _Py_IMMORTAL_REFCNT) {
+ if (FT_ATOMIC_LOAD_SSIZE_RELAXED(dk->dk_refcnt) < 0) {
+ assert(FT_ATOMIC_LOAD_SSIZE_RELAXED(dk->dk_refcnt) == _Py_DICT_IMMORTAL_INITIAL_REFCNT);
return;
}
#ifdef Py_REF_DEBUG
static inline void
dictkeys_decref(PyInterpreterState *interp, PyDictKeysObject *dk, bool use_qsbr)
{
- if (FT_ATOMIC_LOAD_SSIZE_RELAXED(dk->dk_refcnt) == _Py_IMMORTAL_REFCNT) {
+ if (FT_ATOMIC_LOAD_SSIZE_RELAXED(dk->dk_refcnt) < 0) {
+ assert(FT_ATOMIC_LOAD_SSIZE_RELAXED(dk->dk_refcnt) == _Py_DICT_IMMORTAL_INITIAL_REFCNT);
return;
}
assert(FT_ATOMIC_LOAD_SSIZE(dk->dk_refcnt) > 0);
* (which cannot fail and thus can do no allocation).
*/
static PyDictKeysObject empty_keys_struct = {
- _Py_IMMORTAL_REFCNT, /* dk_refcnt */
+ _Py_DICT_IMMORTAL_INITIAL_REFCNT, /* dk_refcnt */
0, /* dk_log2_size */
0, /* dk_log2_index_bytes */
DICT_KEYS_UNICODE, /* dk_kind */
op->ob_ref_local = _Py_IMMORTAL_REFCNT_LOCAL;
op->ob_ref_shared = 0;
#else
- op->ob_refcnt = _Py_IMMORTAL_REFCNT;
+ op->ob_refcnt = _Py_IMMORTAL_INITIAL_REFCNT;
#endif
}
assert(type->tp_name != NULL);
assert(type->tp_base == &PyTuple_Type);
assert((type->tp_flags & _Py_TPFLAGS_STATIC_BUILTIN));
- assert(_Py_IsImmortalLoose(type));
+ assert(_Py_IsImmortal(type));
// Cannot delete a type if it still has subclasses
if (_PyType_HasSubclasses(type)) {
assert(PyTuple_GET_SIZE(bases) == 1);
assert(PyTuple_GET_ITEM(bases, 0) == (PyObject *)self->tp_base);
assert(self->tp_base->tp_flags & _Py_TPFLAGS_STATIC_BUILTIN);
- assert(_Py_IsImmortalLoose(self->tp_base));
+ assert(_Py_IsImmortal(self->tp_base));
}
_Py_SetImmortal(bases);
}
Py_CLEAR(self->tp_bases);
}
else {
- assert(_Py_IsImmortalLoose(self->tp_bases));
+ assert(_Py_IsImmortal(self->tp_bases));
_Py_ClearImmortal(self->tp_bases);
}
}
Py_CLEAR(self->tp_mro);
}
else {
- assert(_Py_IsImmortalLoose(self->tp_mro));
+ assert(_Py_IsImmortal(self->tp_mro));
_Py_ClearImmortal(self->tp_mro);
}
}
int isbuiltin, int final)
{
assert(type->tp_flags & _Py_TPFLAGS_STATIC_BUILTIN);
- assert(_Py_IsImmortalLoose((PyObject *)type));
+ assert(_Py_IsImmortal((PyObject *)type));
type_dealloc_common(type);
EXIT_IF(!PyLong_CheckExact(value_o));
STAT_INC(TO_BOOL, hit);
if (_PyLong_IsZero((PyLongObject *)value_o)) {
- assert(_Py_IsImmortalLoose(value_o));
+ assert(_Py_IsImmortal(value_o));
DEAD(value);
res = PyStackRef_False;
}
EXIT_IF(!PyUnicode_CheckExact(value_o));
STAT_INC(TO_BOOL, hit);
if (value_o == &_Py_STR(empty)) {
- assert(_Py_IsImmortalLoose(value_o));
+ assert(_Py_IsImmortal(value_o));
DEAD(value);
res = PyStackRef_False;
}
}
STAT_INC(TO_BOOL, hit);
if (_PyLong_IsZero((PyLongObject *)value_o)) {
- assert(_Py_IsImmortalLoose(value_o));
+ assert(_Py_IsImmortal(value_o));
res = PyStackRef_False;
}
else {
}
STAT_INC(TO_BOOL, hit);
if (value_o == &_Py_STR(empty)) {
- assert(_Py_IsImmortalLoose(value_o));
+ assert(_Py_IsImmortal(value_o));
res = PyStackRef_False;
}
else {
DEOPT_IF(!PyLong_CheckExact(value_o), TO_BOOL);
STAT_INC(TO_BOOL, hit);
if (_PyLong_IsZero((PyLongObject *)value_o)) {
- assert(_Py_IsImmortalLoose(value_o));
+ assert(_Py_IsImmortal(value_o));
res = PyStackRef_False;
}
else {
DEOPT_IF(!PyUnicode_CheckExact(value_o), TO_BOOL);
STAT_INC(TO_BOOL, hit);
if (value_o == &_Py_STR(empty)) {
- assert(_Py_IsImmortalLoose(value_o));
+ assert(_Py_IsImmortal(value_o));
res = PyStackRef_False;
}
else {
However, this decref would be problematic if the module def were
dynamically allocated, it were the last ref, and this function
were called with an interpreter other than the def's owner. */
- assert(value->def == NULL || _Py_IsImmortalLoose(value->def));
+ assert(value->def == NULL || _Py_IsImmortal(value->def));
Py_XDECREF(value->def->m_base.m_copy);
value->def->m_base.m_copy = NULL;
"_Py_EnterRecursiveCallTstateUnchecked",
"_Py_ID",
"_Py_IsImmortal",
- "_Py_IsImmortalLoose",
"_Py_LeaveRecursiveCallPy",
"_Py_LeaveRecursiveCallTstate",
"_Py_NewRef",