Active upon key insertion. Dummy slots cannot be made Unused again
else the probe sequence in case of collision would have no way to know
they were once active.
+ In free-threaded builds dummy slots are not re-used to allow lock-free
+ lookups to proceed safely.
4. Pending. index >= 0, key != NULL, and value == NULL (split only)
Not yet inserted in split-table.
_Py_CRITICAL_SECTION_ASSERT_OBJECT_LOCKED(op);
}
#define ASSERT_DICT_LOCKED(op) ASSERT_DICT_LOCKED(_Py_CAST(PyObject*, op))
+#define IS_DICT_SHARED(mp) _PyObject_GC_IS_SHARED(mp)
+#define SET_DICT_SHARED(mp) _PyObject_GC_SET_SHARED(mp)
+#define LOAD_INDEX(keys, size, idx) _Py_atomic_load_int##size##_relaxed(&((const int##size##_t*)keys->dk_indices)[idx]);
+#define STORE_INDEX(keys, size, idx, value) _Py_atomic_store_int##size##_relaxed(&((int##size##_t*)keys->dk_indices)[idx], (int##size##_t)value);
+#define ASSERT_OWNED_OR_SHARED(mp) \
+ assert(_Py_IsOwnedByCurrentThread((PyObject *)mp) || IS_DICT_SHARED(mp));
+
+static inline void
+set_keys(PyDictObject *mp, PyDictKeysObject *keys)
+{
+ ASSERT_OWNED_OR_SHARED(mp);
+ _Py_atomic_store_ptr_release(&mp->ma_keys, keys);
+}
+
+static inline void
+set_values(PyDictObject *mp, PyDictValues *values)
+{
+ ASSERT_OWNED_OR_SHARED(mp);
+ _Py_atomic_store_ptr_release(&mp->ma_values, values);
+}
+
+// Defined until we get QSBR
+#define _PyMem_FreeQsbr PyMem_Free
#define LOCK_KEYS(keys) PyMutex_LockFlags(&keys->dk_mutex, _Py_LOCK_DONT_DETACH)
#define UNLOCK_KEYS(keys) PyMutex_Unlock(&keys->dk_mutex)
#define INCREF_KEYS(dk) dk->dk_refcnt++
#define DECREF_KEYS(dk) dk->dk_refcnt--
#define LOAD_KEYS_NENTIRES(keys) keys->dk_nentries
+#define IS_DICT_SHARED(mp) (false)
+#define SET_DICT_SHARED(mp)
+#define LOAD_INDEX(keys, size, idx) ((const int##size##_t*)(keys->dk_indices))[idx]
+#define STORE_INDEX(keys, size, idx, value) ((int##size##_t*)(keys->dk_indices))[idx] = (int##size##_t)value
static inline void split_keys_entry_added(PyDictKeysObject *keys)
{
keys->dk_nentries++;
}
+static inline void
+set_keys(PyDictObject *mp, PyDictKeysObject *keys)
+{
+ mp->ma_keys = keys;
+}
+
+static inline void
+set_values(PyDictObject *mp, PyDictValues *values)
+{
+ mp->ma_values = values;
+}
+
#endif
#define PERTURB_SHIFT 5
static PyObject* dict_iter(PyObject *dict);
-static int
-contains_lock_held(PyDictObject *mp, PyObject *key);
-static int
-contains_known_hash_lock_held(PyDictObject *mp, PyObject *key, Py_ssize_t hash);
static int
setitem_lock_held(PyDictObject *mp, PyObject *key, PyObject *value);
static int
#define DK_MASK(dk) (DK_SIZE(dk)-1)
-static void free_keys_object(PyDictKeysObject *keys);
+static void free_keys_object(PyDictKeysObject *keys, bool use_qsbr);
/* PyDictKeysObject has refcounts like PyObject does, so we have the
following two functions to mirror what Py_INCREF() and Py_DECREF() do.
}
static inline void
-dictkeys_decref(PyInterpreterState *interp, PyDictKeysObject *dk)
+dictkeys_decref(PyInterpreterState *interp, PyDictKeysObject *dk, bool use_qsbr)
{
if (dk->dk_refcnt == _Py_IMMORTAL_REFCNT) {
return;
Py_XDECREF(entries[i].me_value);
}
}
- free_keys_object(dk);
+ free_keys_object(dk, use_qsbr);
}
}
Py_ssize_t ix;
if (log2size < 8) {
- const int8_t *indices = (const int8_t*)(keys->dk_indices);
- ix = indices[i];
+ ix = LOAD_INDEX(keys, 8, i);
}
else if (log2size < 16) {
- const int16_t *indices = (const int16_t*)(keys->dk_indices);
- ix = indices[i];
+ ix = LOAD_INDEX(keys, 16, i);
}
#if SIZEOF_VOID_P > 4
else if (log2size >= 32) {
- const int64_t *indices = (const int64_t*)(keys->dk_indices);
- ix = indices[i];
+ ix = LOAD_INDEX(keys, 64, i);
}
#endif
else {
- const int32_t *indices = (const int32_t*)(keys->dk_indices);
- ix = indices[i];
+ ix = LOAD_INDEX(keys, 32, i);
}
assert(ix >= DKIX_DUMMY);
return ix;
assert(keys->dk_version == 0);
if (log2size < 8) {
- int8_t *indices = (int8_t*)(keys->dk_indices);
assert(ix <= 0x7f);
- indices[i] = (char)ix;
+ STORE_INDEX(keys, 8, i, ix);
}
else if (log2size < 16) {
- int16_t *indices = (int16_t*)(keys->dk_indices);
assert(ix <= 0x7fff);
- indices[i] = (int16_t)ix;
+ STORE_INDEX(keys, 16, i, ix);
}
#if SIZEOF_VOID_P > 4
else if (log2size >= 32) {
- int64_t *indices = (int64_t*)(keys->dk_indices);
- indices[i] = ix;
+ STORE_INDEX(keys, 64, i, ix);
}
#endif
else {
- int32_t *indices = (int32_t*)(keys->dk_indices);
assert(ix <= 0x7fffffff);
- indices[i] = (int32_t)ix;
+ STORE_INDEX(keys, 32, i, ix);
}
}
}
static void
-free_keys_object(PyDictKeysObject *keys)
+free_keys_object(PyDictKeysObject *keys, bool use_qsbr)
{
+#ifdef Py_GIL_DISABLED
+ if (use_qsbr) {
+ _PyMem_FreeQsbr(keys);
+ return;
+ }
+#endif
#ifdef WITH_FREELISTS
struct _Py_dictkeys_freelist *freelist = get_dictkeys_freelist();
if (DK_LOG_SIZE(keys) == PyDict_LOG_MINSIZE
}
static inline void
-free_values(PyDictValues *values)
+free_values(PyDictValues *values, bool use_qsbr)
{
- int prefix_size = ((uint8_t *)values)[-1];
+ int prefix_size = DICT_VALUES_SIZE(values);
+#ifdef Py_GIL_DISABLED
+ if (use_qsbr) {
+ _PyMem_FreeQsbr(((char *)values)-prefix_size);
+ return;
+ }
+#endif
PyMem_Free(((char *)values)-prefix_size);
}
{
mp = PyObject_GC_New(PyDictObject, &PyDict_Type);
if (mp == NULL) {
- dictkeys_decref(interp, keys);
+ dictkeys_decref(interp, keys, false);
if (free_values_on_failure) {
- free_values(values);
+ free_values(values, false);
}
return NULL;
}
size_t size = shared_keys_usable_size(keys);
PyDictValues *values = new_values(size);
if (values == NULL) {
- dictkeys_decref(interp, keys);
+ dictkeys_decref(interp, keys, false);
return PyErr_NoMemory();
}
((char *)values)[-2] = 0;
return ix;
}
+static inline void
+ensure_shared_on_read(PyDictObject *mp)
+{
+#ifdef Py_GIL_DISABLED
+ if (!_Py_IsOwnedByCurrentThread((PyObject *)mp) && !IS_DICT_SHARED(mp)) {
+ // The first time we access a dict from a non-owning thread we mark it
+ // as shared. This ensures that a concurrent resize operation will
+ // delay freeing the old keys or values using QSBR, which is necessary
+ // to safely allow concurrent reads without locking...
+ Py_BEGIN_CRITICAL_SECTION(mp);
+ if (!IS_DICT_SHARED(mp)) {
+ SET_DICT_SHARED(mp);
+ }
+ Py_END_CRITICAL_SECTION();
+ }
+#endif
+}
+
+static inline void
+ensure_shared_on_resize(PyDictObject *mp)
+{
+#ifdef Py_GIL_DISABLED
+ _Py_CRITICAL_SECTION_ASSERT_OBJECT_LOCKED(mp);
+
+ if (!_Py_IsOwnedByCurrentThread((PyObject *)mp) && !IS_DICT_SHARED(mp)) {
+ // We are writing to the dict from another thread that owns
+ // it and we haven't marked it as shared which will ensure
+ // that when we re-size ma_keys or ma_values that we will
+ // free using QSBR. We need to lock the dictionary to
+ // contend with writes from the owning thread, mark it as
+ // shared, and then we can continue with lock-free reads.
+ // Technically this is a little heavy handed, we could just
+ // free the individual old keys / old-values using qsbr
+ SET_DICT_SHARED(mp);
+ }
+#endif
+}
+
+#ifdef Py_GIL_DISABLED
+
+static inline Py_ALWAYS_INLINE
+Py_ssize_t compare_unicode_generic_threadsafe(PyDictObject *mp, PyDictKeysObject *dk,
+ void *ep0, Py_ssize_t ix, PyObject *key, Py_hash_t hash)
+{
+ PyDictUnicodeEntry *ep = &((PyDictUnicodeEntry *)ep0)[ix];
+ PyObject *startkey = _Py_atomic_load_ptr_relaxed(&ep->me_key);
+ assert(startkey == NULL || PyUnicode_CheckExact(ep->me_key));
+ assert(!PyUnicode_CheckExact(key));
+
+ if (startkey != NULL) {
+ if (!_Py_TryIncref(&ep->me_key, startkey)) {
+ return DKIX_KEY_CHANGED;
+ }
+
+ if (unicode_get_hash(startkey) == hash) {
+ int cmp = PyObject_RichCompareBool(startkey, key, Py_EQ);
+ Py_DECREF(startkey);
+ if (cmp < 0) {
+ return DKIX_ERROR;
+ }
+ if (dk == _Py_atomic_load_ptr_relaxed(&mp->ma_keys) &&
+ startkey == _Py_atomic_load_ptr_relaxed(&ep->me_key)) {
+ return cmp;
+ }
+ else {
+ /* The dict was mutated, restart */
+ return DKIX_KEY_CHANGED;
+ }
+ }
+ else {
+ Py_DECREF(startkey);
+ }
+ }
+ return 0;
+}
+
+// Search non-Unicode key from Unicode table
+static Py_ssize_t
+unicodekeys_lookup_generic_threadsafe(PyDictObject *mp, PyDictKeysObject* dk, PyObject *key, Py_hash_t hash)
+{
+ return do_lookup(mp, dk, key, hash, compare_unicode_generic_threadsafe);
+}
+
+static inline Py_ALWAYS_INLINE Py_ssize_t
+compare_unicode_unicode_threadsafe(PyDictObject *mp, PyDictKeysObject *dk,
+ void *ep0, Py_ssize_t ix, PyObject *key, Py_hash_t hash)
+{
+ PyDictUnicodeEntry *ep = &((PyDictUnicodeEntry *)ep0)[ix];
+ PyObject *startkey = _Py_atomic_load_ptr_relaxed(&ep->me_key);
+ assert(startkey == NULL || PyUnicode_CheckExact(startkey));
+ if (startkey == key) {
+ return 1;
+ }
+ if (startkey != NULL) {
+ if (_Py_IsImmortal(startkey)) {
+ return unicode_get_hash(startkey) == hash && unicode_eq(startkey, key);
+ }
+ else {
+ if (!_Py_TryIncref(&ep->me_key, startkey)) {
+ return DKIX_KEY_CHANGED;
+ }
+ if (unicode_get_hash(startkey) == hash && unicode_eq(startkey, key)) {
+ Py_DECREF(startkey);
+ return 1;
+ }
+ Py_DECREF(startkey);
+ }
+ }
+ return 0;
+}
+
+static Py_ssize_t _Py_HOT_FUNCTION
+unicodekeys_lookup_unicode_threadsafe(PyDictKeysObject* dk, PyObject *key, Py_hash_t hash)
+{
+ return do_lookup(NULL, dk, key, hash, compare_unicode_unicode_threadsafe);
+}
+
+static inline Py_ALWAYS_INLINE
+Py_ssize_t compare_generic_threadsafe(PyDictObject *mp, PyDictKeysObject *dk,
+ void *ep0, Py_ssize_t ix, PyObject *key, Py_hash_t hash)
+{
+ PyDictKeyEntry *ep = &((PyDictKeyEntry *)ep0)[ix];
+ PyObject *startkey = _Py_atomic_load_ptr_relaxed(&ep->me_key);
+ if (startkey == key) {
+ return 1;
+ }
+ Py_ssize_t ep_hash = _Py_atomic_load_ssize_relaxed(&ep->me_hash);
+ if (ep_hash == hash) {
+ if (startkey == NULL || !_Py_TryIncref(&ep->me_key, startkey)) {
+ return DKIX_KEY_CHANGED;
+ }
+ int cmp = PyObject_RichCompareBool(startkey, key, Py_EQ);
+ Py_DECREF(startkey);
+ if (cmp < 0) {
+ return DKIX_ERROR;
+ }
+ if (dk == _Py_atomic_load_ptr_relaxed(&mp->ma_keys) &&
+ startkey == _Py_atomic_load_ptr_relaxed(&ep->me_key)) {
+ return cmp;
+ }
+ else {
+ /* The dict was mutated, restart */
+ return DKIX_KEY_CHANGED;
+ }
+ }
+ return 0;
+}
+
+static Py_ssize_t
+dictkeys_generic_lookup_threadsafe(PyDictObject *mp, PyDictKeysObject* dk, PyObject *key, Py_hash_t hash)
+{
+ return do_lookup(mp, dk, key, hash, compare_generic_threadsafe);
+}
+
+static Py_ssize_t
+_Py_dict_lookup_threadsafe(PyDictObject *mp, PyObject *key, Py_hash_t hash, PyObject **value_addr)
+{
+ PyDictKeysObject *dk;
+ DictKeysKind kind;
+ Py_ssize_t ix;
+ PyObject *value;
+
+ ensure_shared_on_read(mp);
+
+ dk = _Py_atomic_load_ptr(&mp->ma_keys);
+ kind = dk->dk_kind;
+
+ if (kind != DICT_KEYS_GENERAL) {
+ if (PyUnicode_CheckExact(key)) {
+ ix = unicodekeys_lookup_unicode_threadsafe(dk, key, hash);
+ }
+ else {
+ ix = unicodekeys_lookup_generic_threadsafe(mp, dk, key, hash);
+ }
+ if (ix == DKIX_KEY_CHANGED) {
+ goto read_failed;
+ }
+
+ if (ix >= 0) {
+ if (kind == DICT_KEYS_SPLIT) {
+ PyDictValues *values = _Py_atomic_load_ptr(&mp->ma_values);
+ if (values == NULL)
+ goto read_failed;
+
+ uint8_t capacity = _Py_atomic_load_uint8_relaxed(&DICT_VALUES_SIZE(values));
+ if (ix >= (Py_ssize_t)capacity)
+ goto read_failed;
+
+ value = _Py_TryXGetRef(&values->values[ix]);
+ if (value == NULL)
+ goto read_failed;
+
+ if (values != _Py_atomic_load_ptr(&mp->ma_values)) {
+ Py_DECREF(value);
+ goto read_failed;
+ }
+ }
+ else {
+ value = _Py_TryXGetRef(&DK_UNICODE_ENTRIES(dk)[ix].me_value);
+ if (value == NULL) {
+ goto read_failed;
+ }
+
+ if (dk != _Py_atomic_load_ptr(&mp->ma_keys)) {
+ Py_DECREF(value);
+ goto read_failed;
+ }
+ }
+ }
+ else {
+ value = NULL;
+ }
+ }
+ else {
+ ix = dictkeys_generic_lookup_threadsafe(mp, dk, key, hash);
+ if (ix == DKIX_KEY_CHANGED) {
+ goto read_failed;
+ }
+ if (ix >= 0) {
+ value = _Py_TryXGetRef(&DK_ENTRIES(dk)[ix].me_value);
+ if (value == NULL)
+ goto read_failed;
+
+ if (dk != _Py_atomic_load_ptr(&mp->ma_keys)) {
+ Py_DECREF(value);
+ goto read_failed;
+ }
+ }
+ else {
+ value = NULL;
+ }
+ }
+
+ *value_addr = value;
+ return ix;
+
+read_failed:
+ // In addition to the normal races of the dict being modified the _Py_TryXGetRef
+ // can all fail if they don't yet have a shared ref count. That can happen here
+ // or in the *_lookup_* helper. In that case we need to take the lock to avoid
+ // mutation and do a normal incref which will make them shared.
+ Py_BEGIN_CRITICAL_SECTION(mp);
+ ix = _Py_dict_lookup(mp, key, hash, &value);
+ *value_addr = value;
+ if (value != NULL) {
+ assert(ix >= 0);
+ Py_INCREF(value);
+ }
+ Py_END_CRITICAL_SECTION();
+ return ix;
+}
+
+#endif
+
int
_PyDict_HasOnlyStringKeys(PyObject *dict)
{
_PyObject_GC_UNTRACK(op);
}
+static inline int
+is_unusable_slot(Py_ssize_t ix)
+{
+#ifdef Py_GIL_DISABLED
+ return ix >= 0 || ix == DKIX_DUMMY;
+#else
+ return ix >= 0;
+#endif
+}
+
/* Internal function to find slot for an item from its hash
when it is known that the key is not present in the dict.
*/
const size_t mask = DK_MASK(keys);
size_t i = hash & mask;
Py_ssize_t ix = dictkeys_get_index(keys, i);
- for (size_t perturb = hash; ix >= 0;) {
+ for (size_t perturb = hash; is_unusable_slot(ix);) {
perturb >>= PERTURB_SHIFT;
i = (i*5 + perturb + 1) & mask;
ix = dictkeys_get_index(keys, i);
return -1;
}
-// Same to insertdict but specialized for ma_keys = Py_EMPTY_KEYS.
+// Same as insertdict but specialized for ma_keys == Py_EMPTY_KEYS.
// Consumes key and value references.
static int
insert_to_emptydict(PyInterpreterState *interp, PyDictObject *mp,
return -1;
}
/* We don't decref Py_EMPTY_KEYS here because it is immortal. */
- mp->ma_keys = newkeys;
- mp->ma_values = NULL;
+ assert(mp->ma_values == NULL);
MAINTAIN_TRACKING(mp, key, value);
size_t hashpos = (size_t)hash & (PyDict_MINSIZE-1);
- dictkeys_set_index(mp->ma_keys, hashpos, 0);
+ dictkeys_set_index(newkeys, hashpos, 0);
if (unicode) {
- PyDictUnicodeEntry *ep = DK_UNICODE_ENTRIES(mp->ma_keys);
+ PyDictUnicodeEntry *ep = DK_UNICODE_ENTRIES(newkeys);
ep->me_key = key;
ep->me_value = value;
}
else {
- PyDictKeyEntry *ep = DK_ENTRIES(mp->ma_keys);
+ PyDictKeyEntry *ep = DK_ENTRIES(newkeys);
ep->me_key = key;
ep->me_hash = hash;
ep->me_value = value;
}
mp->ma_used++;
mp->ma_version_tag = new_version;
- mp->ma_keys->dk_usable--;
- mp->ma_keys->dk_nentries++;
+ newkeys->dk_usable--;
+ newkeys->dk_nentries++;
+ // We store the keys last so no one can see them in a partially inconsistent
+ // state so that we don't need to switch the keys to being shared yet for
+ // the case where we're inserting from the non-owner thread. We don't use
+ // set_keys here because the transition from empty to non-empty is safe
+ // as the empty keys will never be freed.
+#ifdef Py_GIL_DISABLED
+ _Py_atomic_store_ptr_release(&mp->ma_keys, newkeys);
+#else
+ mp->ma_keys = newkeys;
+#endif
return 0;
}
dictresize(PyInterpreterState *interp, PyDictObject *mp,
uint8_t log2_newsize, int unicode)
{
- PyDictKeysObject *oldkeys;
+ PyDictKeysObject *oldkeys, *newkeys;
PyDictValues *oldvalues;
ASSERT_DICT_LOCKED(mp);
unicode = 0;
}
+ ensure_shared_on_resize(mp);
/* NOTE: Current odict checks mp->ma_keys to detect resize happen.
* So we can't reuse oldkeys even if oldkeys->dk_size == newsize.
* TODO: Try reusing oldkeys when reimplement odict.
*/
/* Allocate a new table. */
- mp->ma_keys = new_keys_object(interp, log2_newsize, unicode);
- if (mp->ma_keys == NULL) {
- mp->ma_keys = oldkeys;
+ newkeys = new_keys_object(interp, log2_newsize, unicode);
+ if (newkeys == NULL) {
return -1;
}
// New table must be large enough.
- assert(mp->ma_keys->dk_usable >= mp->ma_used);
+ assert(newkeys->dk_usable >= mp->ma_used);
Py_ssize_t numentries = mp->ma_used;
/* Convert split table into new combined table.
* We must incref keys; we can transfer values.
*/
- if (mp->ma_keys->dk_kind == DICT_KEYS_GENERAL) {
+ if (newkeys->dk_kind == DICT_KEYS_GENERAL) {
// split -> generic
- PyDictKeyEntry *newentries = DK_ENTRIES(mp->ma_keys);
+ PyDictKeyEntry *newentries = DK_ENTRIES(newkeys);
for (Py_ssize_t i = 0; i < numentries; i++) {
int index = get_index_from_order(mp, i);
newentries[i].me_hash = unicode_get_hash(ep->me_key);
newentries[i].me_value = oldvalues->values[index];
}
- build_indices_generic(mp->ma_keys, newentries, numentries);
+ build_indices_generic(newkeys, newentries, numentries);
}
else { // split -> combined unicode
- PyDictUnicodeEntry *newentries = DK_UNICODE_ENTRIES(mp->ma_keys);
+ PyDictUnicodeEntry *newentries = DK_UNICODE_ENTRIES(newkeys);
for (Py_ssize_t i = 0; i < numentries; i++) {
int index = get_index_from_order(mp, i);
newentries[i].me_key = Py_NewRef(ep->me_key);
newentries[i].me_value = oldvalues->values[index];
}
- build_indices_unicode(mp->ma_keys, newentries, numentries);
+ build_indices_unicode(newkeys, newentries, numentries);
}
UNLOCK_KEYS(oldkeys);
- dictkeys_decref(interp, oldkeys);
- mp->ma_values = NULL;
- free_values(oldvalues);
+ set_keys(mp, newkeys);
+ dictkeys_decref(interp, oldkeys, IS_DICT_SHARED(mp));
+ set_values(mp, NULL);
+ free_values(oldvalues, IS_DICT_SHARED(mp));
}
else { // oldkeys is combined.
if (oldkeys->dk_kind == DICT_KEYS_GENERAL) {
// generic -> generic
- assert(mp->ma_keys->dk_kind == DICT_KEYS_GENERAL);
+ assert(newkeys->dk_kind == DICT_KEYS_GENERAL);
PyDictKeyEntry *oldentries = DK_ENTRIES(oldkeys);
- PyDictKeyEntry *newentries = DK_ENTRIES(mp->ma_keys);
+ PyDictKeyEntry *newentries = DK_ENTRIES(newkeys);
if (oldkeys->dk_nentries == numentries) {
memcpy(newentries, oldentries, numentries * sizeof(PyDictKeyEntry));
}
newentries[i] = *ep++;
}
}
- build_indices_generic(mp->ma_keys, newentries, numentries);
+ build_indices_generic(newkeys, newentries, numentries);
}
else { // oldkeys is combined unicode
PyDictUnicodeEntry *oldentries = DK_UNICODE_ENTRIES(oldkeys);
if (unicode) { // combined unicode -> combined unicode
- PyDictUnicodeEntry *newentries = DK_UNICODE_ENTRIES(mp->ma_keys);
+ PyDictUnicodeEntry *newentries = DK_UNICODE_ENTRIES(newkeys);
if (oldkeys->dk_nentries == numentries && mp->ma_keys->dk_kind == DICT_KEYS_UNICODE) {
memcpy(newentries, oldentries, numentries * sizeof(PyDictUnicodeEntry));
}
newentries[i] = *ep++;
}
}
- build_indices_unicode(mp->ma_keys, newentries, numentries);
+ build_indices_unicode(newkeys, newentries, numentries);
}
else { // combined unicode -> generic
- PyDictKeyEntry *newentries = DK_ENTRIES(mp->ma_keys);
+ PyDictKeyEntry *newentries = DK_ENTRIES(newkeys);
PyDictUnicodeEntry *ep = oldentries;
for (Py_ssize_t i = 0; i < numentries; i++) {
while (ep->me_value == NULL)
newentries[i].me_value = ep->me_value;
ep++;
}
- build_indices_generic(mp->ma_keys, newentries, numentries);
+ build_indices_generic(newkeys, newentries, numentries);
}
}
+ set_keys(mp, newkeys);
+
if (oldkeys != Py_EMPTY_KEYS) {
#ifdef Py_REF_DEBUG
_Py_DecRefTotal(_PyInterpreterState_GET());
#endif
assert(oldkeys->dk_kind != DICT_KEYS_SPLIT);
assert(oldkeys->dk_refcnt == 1);
- free_keys_object(oldkeys);
+ free_keys_object(oldkeys, IS_DICT_SHARED(mp));
}
}
PyObject *value;
Py_ssize_t ix; (void)ix;
-
PyObject *exc = _PyErr_GetRaisedException(tstate);
+#ifdef Py_GIL_DISABLED
+ ix = _Py_dict_lookup_threadsafe(mp, key, hash, &value);
+ Py_XDECREF(value);
+#else
ix = _Py_dict_lookup(mp, key, hash, &value);
+#endif
/* Ignore any exception raised by the lookup */
PyObject *exc2 = _PyErr_Occurred(tstate);
return NULL;
}
+#ifdef Py_GIL_DISABLED
+ ix = _Py_dict_lookup_threadsafe(mp, key, hash, &value);
+ Py_XDECREF(value);
+#else
ix = _Py_dict_lookup(mp, key, hash, &value);
+#endif
assert(ix >= 0 || value == NULL);
return value; // borrowed reference
}
+/* Gets an item and provides a new reference if the value is present.
+ * Returns 1 if the key is present, 0 if the key is missing, and -1 if an
+ * exception occurred.
+*/
+int
+_PyDict_GetItemRef_KnownHash(PyObject *op, PyObject *key, Py_hash_t hash, PyObject **result)
+{
+ PyDictObject*mp = (PyDictObject *)op;
+
+ PyObject *value;
+#ifdef Py_GIL_DISABLED
+ Py_ssize_t ix = _Py_dict_lookup_threadsafe(mp, key, hash, &value);
+#else
+ Py_ssize_t ix = _Py_dict_lookup(mp, key, hash, &value);
+#endif
+ assert(ix >= 0 || value == NULL);
+ if (ix == DKIX_ERROR) {
+ *result = NULL;
+ return -1;
+ }
+ if (value == NULL) {
+ *result = NULL;
+ return 0; // missing key
+ }
+#ifdef Py_GIL_DISABLED
+ *result = value;
+#else
+ *result = Py_NewRef(value);
+#endif
+ return 1; // key is present
+}
int
PyDict_GetItemRef(PyObject *op, PyObject *key, PyObject **result)
*result = NULL;
return -1;
}
- PyDictObject*mp = (PyDictObject *)op;
Py_hash_t hash;
if (!PyUnicode_CheckExact(key) || (hash = unicode_get_hash(key)) == -1)
}
}
- PyObject *value;
- Py_ssize_t ix = _Py_dict_lookup(mp, key, hash, &value);
- assert(ix >= 0 || value == NULL);
- if (ix == DKIX_ERROR) {
- *result = NULL;
- return -1;
- }
- if (value == NULL) {
- *result = NULL;
- return 0; // missing key
- }
- *result = Py_NewRef(value);
- return 1; // key is present
+ return _PyDict_GetItemRef_KnownHash(op, key, hash, result);
}
}
}
+#ifdef Py_GIL_DISABLED
+ ix = _Py_dict_lookup_threadsafe(mp, key, hash, &value);
+ Py_XDECREF(value);
+#else
ix = _Py_dict_lookup(mp, key, hash, &value);
+#endif
assert(ix >= 0 || value == NULL);
return value; // borrowed reference
}
return rv;
}
-
/* Fast version of global value lookup (LOAD_GLOBAL).
* Lookup in globals, then builtins.
*
PyObject *
_PyDict_LoadGlobal(PyDictObject *globals, PyDictObject *builtins, PyObject *key)
{
+ // TODO: Thread safety
Py_ssize_t ix;
Py_hash_t hash;
PyObject *value;
PyInterpreterState *interp = _PyInterpreterState_GET();
uint64_t new_version = _PyDict_NotifyEvent(
interp, PyDict_EVENT_CLEARED, mp, NULL, NULL);
- dictkeys_incref(Py_EMPTY_KEYS);
- mp->ma_keys = Py_EMPTY_KEYS;
- mp->ma_values = NULL;
+ // We don't inc ref empty keys because they're immortal
+ ensure_shared_on_resize(mp);
+
+ set_keys(mp, Py_EMPTY_KEYS);
+ set_values(mp, NULL);
mp->ma_used = 0;
mp->ma_version_tag = new_version;
/* ...then clear the keys and values */
n = oldkeys->dk_nentries;
for (i = 0; i < n; i++)
Py_CLEAR(oldvalues->values[i]);
- free_values(oldvalues);
- dictkeys_decref(interp, oldkeys);
+ free_values(oldvalues, IS_DICT_SHARED(mp));
+ dictkeys_decref(interp, oldkeys, false);
}
else {
assert(oldkeys->dk_refcnt == 1);
- dictkeys_decref(interp, oldkeys);
+ dictkeys_decref(interp, oldkeys, IS_DICT_SHARED(mp));
}
ASSERT_CONSISTENT(mp);
}
for (i = 0, n = mp->ma_keys->dk_nentries; i < n; i++) {
Py_XDECREF(values->values[i]);
}
- free_values(values);
- dictkeys_decref(interp, keys);
+ free_values(values, false);
+ dictkeys_decref(interp, keys, false);
}
else if (keys != NULL) {
assert(keys->dk_refcnt == 1 || keys == Py_EMPTY_KEYS);
- dictkeys_decref(interp, keys);
+ dictkeys_decref(interp, keys, false);
}
#ifdef WITH_FREELISTS
struct _Py_dict_freelist *freelist = get_dict_freelist();
}
static PyObject *
-dict_subscript_lock_held(PyObject *self, PyObject *key)
+dict_subscript(PyObject *self, PyObject *key)
{
PyDictObject *mp = (PyDictObject *)self;
Py_ssize_t ix;
if (hash == -1)
return NULL;
}
+#ifdef Py_GIL_DISABLED
+ ix = _Py_dict_lookup_threadsafe(mp, key, hash, &value);
+#else
ix = _Py_dict_lookup(mp, key, hash, &value);
+#endif
if (ix == DKIX_ERROR)
return NULL;
if (ix == DKIX_EMPTY || value == NULL) {
_PyErr_SetKeyError(key);
return NULL;
}
+#ifdef Py_GIL_DISABLED
+ return value;
+#else
return Py_NewRef(value);
-}
-
-static PyObject *
-dict_subscript(PyObject *self, PyObject *key)
-{
- PyObject *res;
- Py_BEGIN_CRITICAL_SECTION(self);
- res = dict_subscript_lock_held(self, key);
- Py_END_CRITICAL_SECTION();
- return res;
+#endif
}
static int
if (keys == NULL)
return -1;
- dictkeys_decref(interp, mp->ma_keys);
+ ensure_shared_on_resize(mp);
+ dictkeys_decref(interp, mp->ma_keys, IS_DICT_SHARED(mp));
mp->ma_keys = keys;
if (_PyDict_HasSplitTable(mp)) {
- free_values(mp->ma_values);
+ free_values(mp->ma_values, IS_DICT_SHARED(mp));
mp->ma_values = NULL;
}
Py_NewRef(key), hash, Py_NewRef(value));
}
else {
- err = contains_known_hash_lock_held(mp, key, hash);
+ err = _PyDict_Contains_KnownHash((PyObject *)mp, key, hash);
if (err == 0) {
err = insertdict(interp, mp,
Py_NewRef(key), hash, Py_NewRef(value));
for (key = PyIter_Next(iter); key; key = PyIter_Next(iter)) {
if (override != 1) {
- status = contains_lock_held(mp, key);
+ status = PyDict_Contains(a, key);
if (status != 0) {
if (status > 0) {
if (override == 0) {
return PyErr_NoMemory();
split_copy = PyObject_GC_New(PyDictObject, &PyDict_Type);
if (split_copy == NULL) {
- free_values(newvalues);
+ free_values(newvalues, false);
return NULL;
}
size_t prefix_size = ((uint8_t *)newvalues)[-1];
/*[clinic input]
@coexist
-@critical_section
dict.__contains__
key: object
[clinic start generated code]*/
static PyObject *
-dict___contains___impl(PyDictObject *self, PyObject *key)
-/*[clinic end generated code: output=1b314e6da7687dae input=bc76ec9c157cb81b]*/
+dict___contains__(PyDictObject *self, PyObject *key)
+/*[clinic end generated code: output=a3d03db709ed6e6b input=fe1cb42ad831e820]*/
{
- register PyDictObject *mp = self;
- Py_hash_t hash;
- Py_ssize_t ix;
- PyObject *value;
-
- if (!PyUnicode_CheckExact(key) || (hash = unicode_get_hash(key)) == -1) {
- hash = PyObject_Hash(key);
- if (hash == -1)
- return NULL;
- }
- ix = _Py_dict_lookup(mp, key, hash, &value);
- if (ix == DKIX_ERROR)
+ int contains = PyDict_Contains((PyObject *)self, key);
+ if (contains < 0) {
return NULL;
- if (ix == DKIX_EMPTY || value == NULL)
- Py_RETURN_FALSE;
- Py_RETURN_TRUE;
+ }
+ if (contains) {
+ Py_RETURN_TRUE;
+ }
+ Py_RETURN_FALSE;
}
/*[clinic input]
if (hash == -1)
return NULL;
}
+#ifdef Py_GIL_DISABLED
+ ix = _Py_dict_lookup_threadsafe(self, key, hash, &val);
+#else
ix = _Py_dict_lookup(self, key, hash, &val);
+#endif
if (ix == DKIX_ERROR)
return NULL;
+#ifdef Py_GIL_DISABLED
+ if (ix == DKIX_EMPTY || val == NULL) {
+ val = Py_NewRef(default_value);
+ }
+ return val;
+#else
if (ix == DKIX_EMPTY || val == NULL) {
val = default_value;
}
return Py_NewRef(val);
+#endif
}
static int
{NULL, NULL} /* sentinel */
};
-static int
-contains_known_hash_lock_held(PyDictObject *mp, PyObject *key, Py_ssize_t hash)
-{
- Py_ssize_t ix;
- PyObject *value;
-
- ASSERT_DICT_LOCKED(mp);
-
- ix = _Py_dict_lookup(mp, key, hash, &value);
- if (ix == DKIX_ERROR)
- return -1;
- return (ix != DKIX_EMPTY && value != NULL);
-}
-
-static int
-contains_lock_held(PyDictObject *mp, PyObject *key)
+/* Return 1 if `key` is in dict `op`, 0 if not, and -1 on error. */
+int
+PyDict_Contains(PyObject *op, PyObject *key)
{
Py_hash_t hash;
- Py_ssize_t ix;
- PyObject *value;
-
- ASSERT_DICT_LOCKED(mp);
if (!PyUnicode_CheckExact(key) || (hash = unicode_get_hash(key)) == -1) {
hash = PyObject_Hash(key);
if (hash == -1)
return -1;
}
- ix = _Py_dict_lookup(mp, key, hash, &value);
- if (ix == DKIX_ERROR)
- return -1;
- return (ix != DKIX_EMPTY && value != NULL);
-}
-/* Return 1 if `key` is in dict `op`, 0 if not, and -1 on error. */
-int
-PyDict_Contains(PyObject *op, PyObject *key)
-{
- int res;
- Py_BEGIN_CRITICAL_SECTION(op);
- res = contains_lock_held((PyDictObject *)op, key);
- Py_END_CRITICAL_SECTION();
- return res;
+ return _PyDict_Contains_KnownHash(op, key, hash);
}
int
PyObject *value;
Py_ssize_t ix;
+#ifdef Py_GIL_DISABLED
+ ix = _Py_dict_lookup_threadsafe(mp, key, hash, &value);
+#else
ix = _Py_dict_lookup(mp, key, hash, &value);
+#endif
if (ix == DKIX_ERROR)
return -1;
- return (ix != DKIX_EMPTY && value != NULL);
+ if (ix != DKIX_EMPTY && value != NULL) {
+#ifdef Py_GIL_DISABLED
+ Py_DECREF(value);
+#endif
+ return 1;
+ }
+ return 0;
}
int
/* dictreviter */
static PyObject *
-dictreviter_iter_PyDict_Next(PyDictObject *d, PyObject *self)
+dictreviter_iter_lock_held(PyDictObject *d, PyObject *self)
{
dictiterobject *di = (dictiterobject *)self;
PyObject *value;
Py_BEGIN_CRITICAL_SECTION(d);
- value = dictreviter_iter_PyDict_Next(d, self);
+ value = dictreviter_iter_lock_held(d, self);
Py_END_CRITICAL_SECTION();
return value;
{
return false;
}
+ ensure_shared_on_resize(dict);
+
assert(dict->ma_values);
// We have an opportunity to do something *really* cool: dematerialize it!
_PyDictKeys_DecRef(dict->ma_keys);
for (Py_ssize_t i = 0; i < keys->dk_nentries; i++) {
Py_XDECREF(values->values[i]);
}
- free_values(values);
+ free_values(values, IS_DICT_SHARED((PyDictObject*)self));
}
int
Py_CLEAR(values->values[i]);
}
dorv_ptr->dict = NULL;
- free_values(values);
+ free_values(values, IS_DICT_SHARED((PyDictObject*)obj));
}
else {
PyObject *dict = dorv_ptr->dict;
_PyDictKeys_DecRef(PyDictKeysObject *keys)
{
PyInterpreterState *interp = _PyInterpreterState_GET();
- dictkeys_decref(interp, keys);
+ dictkeys_decref(interp, keys, false);
}
uint32_t _PyDictKeys_GetVersionForCurrentState(PyInterpreterState *interp,