/* Returns the cached hash, or -1 if not cached yet. */
static inline Py_hash_t
PyUnstable_Unicode_GET_CACHED_HASH(PyObject *op) {
- assert(PyUnicode_Check(op));
#ifdef Py_GIL_DISABLED
return _Py_atomic_load_ssize_relaxed(&_PyASCIIObject_CAST(op)->hash);
#else
_PyObject_HashFast(PyObject *op)
{
if (PyUnicode_CheckExact(op)) {
- Py_hash_t hash = FT_ATOMIC_LOAD_SSIZE_RELAXED(
- _PyASCIIObject_CAST(op)->hash);
+ Py_hash_t hash = PyUnstable_Unicode_GET_CACHED_HASH(op);
if (hash != -1) {
return hash;
}
static inline Py_hash_t
unicode_get_hash(PyObject *o)
{
- assert(PyUnicode_CheckExact(o));
- return FT_ATOMIC_LOAD_SSIZE_RELAXED(_PyASCIIObject_CAST(o)->hash);
+ return PyUnstable_Unicode_GET_CACHED_HASH(o);
}
/* Print summary info about the state of the optimized allocator */
update_cache(struct type_cache_entry *entry, PyObject *name, unsigned int version_tag, PyObject *value)
{
_Py_atomic_store_ptr_relaxed(&entry->value, value); /* borrowed */
- assert(_PyASCIIObject_CAST(name)->hash != -1);
+ assert(PyUnstable_Unicode_GET_CACHED_HASH(name) != -1);
OBJECT_STAT_INC_COND(type_cache_collisions, entry->name != Py_None && entry->name != name);
// We're releasing this under the lock for simplicity sake because it's always a
// exact unicode object or Py_None so it's safe to do so.