_Py_CRITICAL_SECTION_ASSERT_OBJECT_LOCKED(op);
}
#define ASSERT_DICT_LOCKED(op) ASSERT_DICT_LOCKED(_Py_CAST(PyObject*, op))
+#define ASSERT_WORLD_STOPPED_OR_DICT_LOCKED(op) \
+ if (!_PyInterpreterState_GET()->stoptheworld.world_stopped) { \
+ ASSERT_DICT_LOCKED(op); \
+ }
+
#define IS_DICT_SHARED(mp) _PyObject_GC_IS_SHARED(mp)
#define SET_DICT_SHARED(mp) _PyObject_GC_SET_SHARED(mp)
#define LOAD_INDEX(keys, size, idx) _Py_atomic_load_int##size##_relaxed(&((const int##size##_t*)keys->dk_indices)[idx]);
#else /* Py_GIL_DISABLED */
#define ASSERT_DICT_LOCKED(op)
+#define ASSERT_WORLD_STOPPED_OR_DICT_LOCKED(op)
#define LOCK_KEYS(keys)
#define UNLOCK_KEYS(keys)
#define ASSERT_KEYS_LOCKED(keys)
if (FT_ATOMIC_LOAD_SSIZE_RELAXED(dk->dk_refcnt) == _Py_IMMORTAL_REFCNT) {
return;
}
- assert(dk->dk_refcnt > 0);
+ assert(FT_ATOMIC_LOAD_SSIZE(dk->dk_refcnt) > 0);
#ifdef Py_REF_DEBUG
_Py_DecRefTotal(_PyThreadState_GET());
#endif
int
_PyDict_CheckConsistency(PyObject *op, int check_content)
{
+ ASSERT_WORLD_STOPPED_OR_DICT_LOCKED(op);
+
#define CHECK(expr) \
do { if (!(expr)) { _PyObject_ASSERT_FAILED_MSG(op, Py_STRINGIFY(expr)); } } while (0)
PyObject *value;
Py_ssize_t i, numentries;
+ ASSERT_WORLD_STOPPED_OR_DICT_LOCKED(op);
+
if (!PyDict_CheckExact(op) || !_PyObject_GC_IS_TRACKED(op))
return;
insert_split_value(PyInterpreterState *interp, PyDictObject *mp, PyObject *key, PyObject *value, Py_ssize_t ix)
{
assert(PyUnicode_CheckExact(key));
+ ASSERT_DICT_LOCKED(mp);
MAINTAIN_TRACKING(mp, key, value);
PyObject *old_value = mp->ma_values->values[ix];
if (old_value == NULL) {
uint64_t new_version = _PyDict_NotifyEvent(interp, PyDict_EVENT_ADDED, mp, key, value);
STORE_SPLIT_VALUE(mp, ix, Py_NewRef(value));
_PyDictValues_AddToInsertionOrder(mp->ma_values, ix);
- mp->ma_used++;
+ STORE_USED(mp, mp->ma_used + 1);
mp->ma_version_tag = new_version;
}
else {
goto Fail;
}
mp->ma_version_tag = new_version;
- mp->ma_used++;
+ STORE_USED(mp, mp->ma_used + 1);
ASSERT_CONSISTENT(mp);
return 0;
}
ep->me_hash = hash;
STORE_VALUE(ep, value);
}
- FT_ATOMIC_STORE_SSIZE_RELAXED(mp->ma_used, FT_ATOMIC_LOAD_SSIZE_RELAXED(mp->ma_used) + 1);
+ STORE_USED(mp, mp->ma_used + 1);
mp->ma_version_tag = new_version;
newkeys->dk_usable--;
newkeys->dk_nentries++;
// the case where we're inserting from the non-owner thread. We don't use
// set_keys here because the transition from empty to non-empty is safe
// as the empty keys will never be freed.
-#ifdef Py_GIL_DISABLED
- _Py_atomic_store_ptr_release(&mp->ma_keys, newkeys);
-#else
- mp->ma_keys = newkeys;
-#endif
+ FT_ATOMIC_STORE_PTR_RELEASE(mp->ma_keys, newkeys);
return 0;
}
Py_ssize_t hashpos = lookdict_index(mp->ma_keys, hash, ix);
assert(hashpos >= 0);
- FT_ATOMIC_STORE_SSIZE_RELAXED(mp->ma_used, FT_ATOMIC_LOAD_SSIZE(mp->ma_used) - 1);
+ STORE_USED(mp, mp->ma_used - 1);
mp->ma_version_tag = new_version;
if (_PyDict_HasSplitTable(mp)) {
assert(old_value == mp->ma_values->values[ix]);
// We don't inc ref empty keys because they're immortal
ensure_shared_on_resize(mp);
mp->ma_version_tag = new_version;
- mp->ma_used = 0;
+ STORE_USED(mp, 0);
if (oldvalues == NULL) {
set_keys(mp, Py_EMPTY_KEYS);
assert(oldkeys->dk_refcnt == 1);
_PyUnicodeWriter writer;
int first;
+ ASSERT_DICT_LOCKED(mp);
+
i = Py_ReprEnter((PyObject *)mp);
if (i != 0) {
return i > 0 ? PyUnicode_FromString("{...}") : NULL;
static Py_ssize_t
dict_length(PyObject *self)
{
- PyDictObject *mp = (PyDictObject *)self;
- return _Py_atomic_load_ssize_relaxed(&mp->ma_used);
+ return FT_ATOMIC_LOAD_SSIZE_RELAXED(((PyDictObject *)self)->ma_used);
}
static PyObject *
static int
dict_dict_merge(PyInterpreterState *interp, PyDictObject *mp, PyDictObject *other, int override)
{
+ ASSERT_DICT_LOCKED(mp);
+ ASSERT_DICT_LOCKED(other);
+
if (other == mp || other->ma_used == 0)
/* a.update(a) or a.update({}); nothing to do */
return 0;
ensure_shared_on_resize(mp);
dictkeys_decref(interp, mp->ma_keys, IS_DICT_SHARED(mp));
mp->ma_keys = keys;
- mp->ma_used = other->ma_used;
+ STORE_USED(mp, other->ma_used);
mp->ma_version_tag = new_version;
ASSERT_CONSISTENT(mp);
PyErr_BadInternalCall();
return -1;
}
- return ((PyDictObject *)mp)->ma_used;
+ return FT_ATOMIC_LOAD_SSIZE_RELAXED(((PyDictObject *)mp)->ma_used);
}
/* Return 1 if dicts equal, 0 if not, -1 if error.
}
MAINTAIN_TRACKING(mp, key, value);
- mp->ma_used++;
+ STORE_USED(mp, mp->ma_used + 1);
mp->ma_version_tag = new_version;
assert(mp->ma_keys->dk_usable >= 0);
ASSERT_CONSISTENT(mp);
uint64_t new_version;
PyInterpreterState *interp = _PyInterpreterState_GET();
+ ASSERT_DICT_LOCKED(self);
+
/* Allocate the result tuple before checking the size. Believe it
* or not, this allocation could trigger a garbage collection which
* could empty the dict, so if we checked the size first and that
static PyObject *
dictiter_new(PyDictObject *dict, PyTypeObject *itertype)
{
+ Py_ssize_t used;
dictiterobject *di;
di = PyObject_GC_New(dictiterobject, itertype);
if (di == NULL) {
return NULL;
}
di->di_dict = (PyDictObject*)Py_NewRef(dict);
- di->di_used = dict->ma_used;
- di->len = dict->ma_used;
+ used = FT_ATOMIC_LOAD_SSIZE_RELAXED(dict->ma_used);
+ di->di_used = used;
+ di->len = used;
if (itertype == &PyDictRevIterKey_Type ||
itertype == &PyDictRevIterItem_Type ||
itertype == &PyDictRevIterValue_Type) {
if (_PyDict_HasSplitTable(dict)) {
- di->di_pos = dict->ma_used - 1;
+ di->di_pos = used - 1;
}
else {
di->di_pos = load_keys_nentries(dict) - 1;
{
dictiterobject *di = (dictiterobject *)self;
Py_ssize_t len = 0;
- if (di->di_dict != NULL && di->di_used == di->di_dict->ma_used)
- len = di->len;
+ if (di->di_dict != NULL && di->di_used == FT_ATOMIC_LOAD_SSIZE_RELAXED(di->di_dict->ma_used))
+ len = FT_ATOMIC_LOAD_SSIZE_RELAXED(di->len);
return PyLong_FromSize_t(len);
}
Py_ssize_t i;
assert (PyDict_Check(d));
+ ASSERT_DICT_LOCKED(d);
if (di->di_used != d->ma_used) {
PyErr_SetString(PyExc_RuntimeError,
_PyDictViewObject *dv = (_PyDictViewObject *)self;
Py_ssize_t len = 0;
if (dv->dv_dict != NULL)
- len = dv->dv_dict->ma_used;
+ len = FT_ATOMIC_LOAD_SSIZE_RELAXED(dv->dv_dict->ma_used);
return len;
}
_PyDictValues_AddToInsertionOrder(values, ix);
if (dict) {
assert(dict->ma_values == values);
- dict->ma_used++;
+ STORE_USED(dict, dict->ma_used + 1);
}
}
else {
delete_index_from_values(values, ix);
if (dict) {
assert(dict->ma_values == values);
- dict->ma_used--;
+ STORE_USED(dict, dict->ma_used - 1);
}
}
Py_DECREF(old_value);
if (dict == NULL) {
return 1;
}
- return ((PyDictObject *)dict)->ma_used == 0;
+ return FT_ATOMIC_LOAD_SSIZE_RELAXED(((PyDictObject *)dict)->ma_used) == 0;
}
int