* do not actually share the MM.
* @lock: Spinlock to protect all fields except @pcpu. It also protects
* the MM cid cpumask and the MM cidmask bitmap.
+ * @mutex: Mutex to serialize forks and exits related to this mm
*/
struct mm_mm_cid {
struct mm_cid_pcpu __percpu *pcpu;
unsigned int nr_cpus_allowed;
unsigned int users;
raw_spinlock_t lock;
+ struct mutex mutex;
}____cacheline_aligned_in_smp;
#else /* CONFIG_SCHED_MM_CID */
struct mm_mm_cid { };
}
#ifdef CONFIG_SCHED_MM_CID
+/*
+ * Concurrency IDentifier management
+ *
+ * Serialization rules:
+ *
+ * mm::mm_cid::mutex: Serializes fork() and exit() and therefore
+ * protects mm::mm_cid::users.
+ *
+ * mm::mm_cid::lock: Serializes mm_update_max_cids() and
+ * mm_update_cpus_allowed(). Nests in mm_cid::mutex
+ * and runqueue lock.
+ *
+ * The mm_cidmask bitmap is not protected by any of the mm::mm_cid locks
+ * and can only be modified with atomic operations.
+ *
+ * The mm::mm_cid:pcpu per CPU storage is protected by the CPUs runqueue
+ * lock.
+ */
+
/*
* Update the CID range properties when the constraints change. Invoked via
* fork(), exit() and affinity changes
WARN_ON_ONCE(!mm || t->mm_cid.cid != MM_CID_UNSET);
+ guard(mutex)(&mm->mm_cid.mutex);
guard(raw_spinlock)(&mm->mm_cid.lock);
t->mm_cid.active = 1;
mm->mm_cid.users++;
if (!mm || !t->mm_cid.active)
return;
+ guard(mutex)(&mm->mm_cid.mutex);
guard(raw_spinlock)(&mm->mm_cid.lock);
t->mm_cid.active = 0;
mm->mm_cid.users--;
mm->mm_cid.nr_cpus_allowed = p->nr_cpus_allowed;
mm->mm_cid.users = 0;
raw_spin_lock_init(&mm->mm_cid.lock);
+ mutex_init(&mm->mm_cid.mutex);
cpumask_copy(mm_cpus_allowed(mm), &p->cpus_mask);
bitmap_zero(mm_cidmask(mm), num_possible_cpus());
}