From: Thomas Gleixner Date: Wed, 19 Nov 2025 17:27:03 +0000 (+0100) Subject: sched/mmcid: Convert mm CID mask to a bitmap X-Git-Url: http://git.ipfire.org/gitweb.cgi?a=commitdiff_plain;h=539115f08cf850b9fdc6526b31da0839ff6c1631;p=thirdparty%2Fkernel%2Flinux.git sched/mmcid: Convert mm CID mask to a bitmap This is truly a bitmap and just conveniently uses a cpumask because the maximum size of the bitmap is nr_cpu_ids. But that prevents to do searches for a zero bit in a limited range, which is helpful to provide an efficient mechanism to consolidate the CID space when the number of users decreases. Signed-off-by: Thomas Gleixner Signed-off-by: Peter Zijlstra (Intel) Signed-off-by: Thomas Gleixner Reviewed-by: Mathieu Desnoyers Acked-by: Yury Norov (NVIDIA) Link: https://patch.msgid.link/20251119172549.642866767@linutronix.de --- diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 67a7bdf772f7c..bafb81b339221 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -1342,13 +1342,13 @@ static inline cpumask_t *mm_cpus_allowed(struct mm_struct *mm) } /* Accessor for struct mm_struct's cidmask. */ -static inline cpumask_t *mm_cidmask(struct mm_struct *mm) +static inline unsigned long *mm_cidmask(struct mm_struct *mm) { unsigned long cid_bitmap = (unsigned long)mm_cpus_allowed(mm); /* Skip mm_cpus_allowed */ cid_bitmap += cpumask_size(); - return (struct cpumask *)cid_bitmap; + return (unsigned long *)cid_bitmap; } static inline void mm_init_cid(struct mm_struct *mm, struct task_struct *p) @@ -1363,7 +1363,7 @@ static inline void mm_init_cid(struct mm_struct *mm, struct task_struct *p) mm->mm_cid.nr_cpus_allowed = p->nr_cpus_allowed; raw_spin_lock_init(&mm->mm_cid.lock); cpumask_copy(mm_cpus_allowed(mm), &p->cpus_mask); - cpumask_clear(mm_cidmask(mm)); + bitmap_zero(mm_cidmask(mm), num_possible_cpus()); } static inline int mm_alloc_cid_noprof(struct mm_struct *mm, struct task_struct *p) @@ -1384,7 +1384,8 @@ static inline void mm_destroy_cid(struct mm_struct *mm) static inline unsigned int mm_cid_size(void) { - return 2 * cpumask_size(); /* mm_cpus_allowed(), mm_cidmask(). */ + /* mm_cpus_allowed(), mm_cidmask(). */ + return cpumask_size() + bitmap_size(num_possible_cpus()); } #else /* CONFIG_SCHED_MM_CID */ diff --git a/kernel/sched/core.c b/kernel/sched/core.c index f6bbfa1f5c15a..9a114b6f6a6fd 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -10402,7 +10402,7 @@ void sched_mm_cid_exit_signals(struct task_struct *t) guard(preempt)(); t->mm_cid.active = 0; if (t->mm_cid.cid != MM_CID_UNSET) { - cpumask_clear_cpu(t->mm_cid.cid, mm_cidmask(mm)); + clear_bit(t->mm_cid.cid, mm_cidmask(mm)); t->mm_cid.cid = MM_CID_UNSET; } } diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index a17f04f075e19..31f2e431db5e3 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -3559,7 +3559,7 @@ static inline bool __mm_cid_get(struct task_struct *t, unsigned int cid, unsigne if (cid >= max_cids) return false; - if (cpumask_test_and_set_cpu(cid, mm_cidmask(mm))) + if (test_and_set_bit(cid, mm_cidmask(mm))) return false; t->mm_cid.cid = t->mm_cid.last_cid = cid; __this_cpu_write(mm->mm_cid.pcpu->cid, cid); @@ -3582,7 +3582,7 @@ static inline bool mm_cid_get(struct task_struct *t) return true; /* Try the first zero bit in the cidmask. */ - return __mm_cid_get(t, cpumask_first_zero(mm_cidmask(mm)), max_cids); + return __mm_cid_get(t, find_first_zero_bit(mm_cidmask(mm), num_possible_cpus()), max_cids); } static inline void mm_cid_select(struct task_struct *t) @@ -3603,7 +3603,7 @@ static inline void switch_mm_cid(struct task_struct *prev, struct task_struct *n { if (prev->mm_cid.active) { if (prev->mm_cid.cid != MM_CID_UNSET) - cpumask_clear_cpu(prev->mm_cid.cid, mm_cidmask(prev->mm)); + clear_bit(prev->mm_cid.cid, mm_cidmask(prev->mm)); prev->mm_cid.cid = MM_CID_UNSET; }