## Immortality and reference counting
-Invariant: Every immortal string is interned.
+In the GIL-enabled build interned strings may be mortal or immortal. In the
+free-threaded build, interned strings are always immortal.
-In practice, this means that you must not use `_Py_SetImmortal` on
-a string. (If you know it's already immortal, don't immortalize it;
-if you know it's not interned you might be immortalizing a redundant copy;
-if it's interned and mortal it needs extra processing in
-`_PyUnicode_InternImmortal`.)
-
-The converse is not true: interned strings can be mortal.
For mortal interned strings:
- the 2 references from the interned dict (key & value) are excluded from
--- /dev/null
+Fix free-threading scaling bottleneck in :func:`sys.intern` and
+:c:func:`PyObject_SetAttr` by avoiding the interpreter-wide lock when the string
+is already interned and immortalized.
}
Py_INCREF(name);
- Py_INCREF(tp);
+ _Py_INCREF_TYPE(tp);
PyThreadState *tstate = _PyThreadState_GET();
_PyCStackRef cref;
}
done:
_PyThreadState_PopCStackRef(tstate, &cref);
- Py_DECREF(tp);
+ _Py_DECREF_TYPE(tp);
Py_DECREF(name);
return res;
}
void
_Py_SetImmortalUntracked(PyObject *op)
{
-#ifdef Py_DEBUG
- // For strings, use _PyUnicode_InternImmortal instead.
- if (PyUnicode_CheckExact(op)) {
- assert(PyUnicode_CHECK_INTERNED(op) == SSTATE_INTERNED_IMMORTAL
- || PyUnicode_CHECK_INTERNED(op) == SSTATE_INTERNED_IMMORTAL_STATIC);
- }
-#endif
// Check if already immortal to avoid degrading from static immortal to plain immortal
if (_Py_IsImmortal(op)) {
return;
_Py_DecRefTotal(_PyThreadState_GET());
}
#endif
- FT_ATOMIC_STORE_UINT8_RELAXED(_PyUnicode_STATE(s).interned, SSTATE_INTERNED_IMMORTAL);
_Py_SetImmortal(s);
+ // The switch to SSTATE_INTERNED_IMMORTAL must be the last thing done here
+ // to synchronize with the check in intern_common() that avoids locking if
+ // the string is already immortal.
+ FT_ATOMIC_STORE_UINT8(_PyUnicode_STATE(s).interned, SSTATE_INTERNED_IMMORTAL);
}
static /* non-null */ PyObject*
assert(interned != NULL);
#ifdef Py_GIL_DISABLED
# define INTERN_MUTEX &_Py_INTERP_CACHED_OBJECT(interp, interned_mutex)
+ // Lock-free fast path: check if there's already an interned copy that
+ // is in its final immortal state.
+ PyObject *r;
+ int res = PyDict_GetItemRef(interned, s, &r);
+ if (res < 0) {
+ PyErr_Clear();
+ return s;
+ }
+ if (res > 0) {
+ unsigned int state = _Py_atomic_load_uint8(&_PyUnicode_STATE(r).interned);
+ if (state == SSTATE_INTERNED_IMMORTAL) {
+ Py_DECREF(s);
+ return r;
+ }
+ // Not yet fully interned; fall through to the locking path.
+ Py_DECREF(r);
+ }
#endif
FT_MUTEX_LOCK(INTERN_MUTEX);
PyObject *t;
Py_DECREF(s);
Py_DECREF(s);
}
- FT_ATOMIC_STORE_UINT8_RELAXED(_PyUnicode_STATE(s).interned, SSTATE_INTERNED_MORTAL);
+ FT_ATOMIC_STORE_UINT8(_PyUnicode_STATE(s).interned, SSTATE_INTERNED_MORTAL);
/* INTERNED_MORTAL -> INTERNED_IMMORTAL (if needed) */
for i in range(40 * WORK_SCALE):
copy.deepcopy(x)
+@register_benchmark
+def setattr_non_interned():
+ prefix = "prefix"
+ obj = MyObject()
+ for _ in range(1000 * WORK_SCALE):
+ setattr(obj, f"{prefix}_a", None)
+ setattr(obj, f"{prefix}_b", None)
+ setattr(obj, f"{prefix}_c", None)
+
def bench_one_thread(func):
t0 = time.perf_counter_ns()