};
#define UCOUNTS_HASHTABLE_BITS 10
-static struct hlist_head ucounts_hashtable[(1 << UCOUNTS_HASHTABLE_BITS)];
+#define UCOUNTS_HASHTABLE_ENTRIES (1 << UCOUNTS_HASHTABLE_BITS)
+static struct hlist_nulls_head ucounts_hashtable[UCOUNTS_HASHTABLE_ENTRIES] = {
+ [0 ... UCOUNTS_HASHTABLE_ENTRIES - 1] = HLIST_NULLS_HEAD_INIT(0)
+};
static DEFINE_SPINLOCK(ucounts_lock);
#define ucounts_hashfn(ns, uid) \
#define ucounts_hashentry(ns, uid) \
(ucounts_hashtable + ucounts_hashfn(ns, uid))
-
#ifdef CONFIG_SYSCTL
static struct ctl_table_set *
set_lookup(struct ctl_table_root *root)
#endif
}
-static struct ucounts *find_ucounts(struct user_namespace *ns, kuid_t uid, struct hlist_head *hashent)
+static struct ucounts *find_ucounts(struct user_namespace *ns, kuid_t uid,
+ struct hlist_nulls_head *hashent)
{
struct ucounts *ucounts;
+ struct hlist_nulls_node *pos;
- hlist_for_each_entry(ucounts, hashent, node) {
- if (uid_eq(ucounts->uid, uid) && (ucounts->ns == ns))
- return ucounts;
+ guard(rcu)();
+ hlist_nulls_for_each_entry_rcu(ucounts, pos, hashent, node) {
+ if (uid_eq(ucounts->uid, uid) && (ucounts->ns == ns)) {
+ if (atomic_inc_not_zero(&ucounts->count))
+ return ucounts;
+ }
}
return NULL;
}
static void hlist_add_ucounts(struct ucounts *ucounts)
{
- struct hlist_head *hashent = ucounts_hashentry(ucounts->ns, ucounts->uid);
+ struct hlist_nulls_head *hashent = ucounts_hashentry(ucounts->ns, ucounts->uid);
+
spin_lock_irq(&ucounts_lock);
- hlist_add_head(&ucounts->node, hashent);
+ hlist_nulls_add_head_rcu(&ucounts->node, hashent);
spin_unlock_irq(&ucounts_lock);
}
struct ucounts *alloc_ucounts(struct user_namespace *ns, kuid_t uid)
{
- struct hlist_head *hashent = ucounts_hashentry(ns, uid);
- struct ucounts *ucounts, *new = NULL;
+ struct hlist_nulls_head *hashent = ucounts_hashentry(ns, uid);
+ struct ucounts *ucounts, *new;
+
+ ucounts = find_ucounts(ns, uid, hashent);
+ if (ucounts)
+ return ucounts;
+
+ new = kzalloc(sizeof(*new), GFP_KERNEL);
+ if (!new)
+ return NULL;
+
+ new->ns = ns;
+ new->uid = uid;
+ atomic_set(&new->count, 1);
spin_lock_irq(&ucounts_lock);
ucounts = find_ucounts(ns, uid, hashent);
- if (!ucounts) {
+ if (ucounts) {
spin_unlock_irq(&ucounts_lock);
-
- new = kzalloc(sizeof(*new), GFP_KERNEL);
- if (!new)
- return NULL;
-
- new->ns = ns;
- new->uid = uid;
- atomic_set(&new->count, 1);
-
- spin_lock_irq(&ucounts_lock);
- ucounts = find_ucounts(ns, uid, hashent);
- if (!ucounts) {
- hlist_add_head(&new->node, hashent);
- get_user_ns(new->ns);
- spin_unlock_irq(&ucounts_lock);
- return new;
- }
+ kfree(new);
+ return ucounts;
}
- if (!atomic_inc_not_zero(&ucounts->count))
- ucounts = NULL;
- spin_unlock_irq(&ucounts_lock);
- kfree(new);
- return ucounts;
+ hlist_nulls_add_head_rcu(&new->node, hashent);
+ get_user_ns(new->ns);
+ spin_unlock_irq(&ucounts_lock);
+ return new;
}
void put_ucounts(struct ucounts *ucounts)
unsigned long flags;
if (atomic_dec_and_lock_irqsave(&ucounts->count, &ucounts_lock, flags)) {
- hlist_del_init(&ucounts->node);
+ hlist_nulls_del_rcu(&ucounts->node);
spin_unlock_irqrestore(&ucounts_lock, flags);
+
put_user_ns(ucounts->ns);
- kfree(ucounts);
+ kfree_rcu(ucounts, rcu);
}
}