]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
sched/mmcid: Move initialization out of line
authorThomas Gleixner <tglx@linutronix.de>
Wed, 19 Nov 2025 17:27:07 +0000 (18:27 +0100)
committerThomas Gleixner <tglx@linutronix.de>
Tue, 25 Nov 2025 18:45:40 +0000 (19:45 +0100)
It's getting bigger soon, so just move it out of line to the rest of the
code.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
Link: https://patch.msgid.link/20251119172549.769636491@linutronix.de
include/linux/mm_types.h
kernel/sched/core.c

index bafb81b3392219eb974fc522fb56d946e5295961..3b7d05e7169c7e0590b8ed524a2a3f0c8039e42a 100644 (file)
@@ -1351,20 +1351,7 @@ static inline unsigned long *mm_cidmask(struct mm_struct *mm)
        return (unsigned long *)cid_bitmap;
 }
 
-static inline void mm_init_cid(struct mm_struct *mm, struct task_struct *p)
-{
-       int i;
-
-       for_each_possible_cpu(i) {
-               struct mm_cid_pcpu *pcpu = per_cpu_ptr(mm->mm_cid.pcpu, i);
-
-               pcpu->cid = MM_CID_UNSET;
-       }
-       mm->mm_cid.nr_cpus_allowed = p->nr_cpus_allowed;
-       raw_spin_lock_init(&mm->mm_cid.lock);
-       cpumask_copy(mm_cpus_allowed(mm), &p->cpus_mask);
-       bitmap_zero(mm_cidmask(mm), num_possible_cpus());
-}
+void mm_init_cid(struct mm_struct *mm, struct task_struct *p);
 
 static inline int mm_alloc_cid_noprof(struct mm_struct *mm, struct task_struct *p)
 {
index 3fdf90a7074d80828e5f9d93ddde98c80dfaacdd..34b6c31eca3a4f243165a2c42e7961ec99ad3144 100644 (file)
@@ -10431,6 +10431,20 @@ void sched_mm_cid_fork(struct task_struct *t)
        WARN_ON_ONCE(!t->mm || t->mm_cid.cid != MM_CID_UNSET);
        t->mm_cid.active = 1;
 }
+
+void mm_init_cid(struct mm_struct *mm, struct task_struct *p)
+{
+       struct mm_cid_pcpu __percpu *pcpu = mm->mm_cid.pcpu;
+       int cpu;
+
+       for_each_possible_cpu(cpu)
+               per_cpu_ptr(pcpu, cpu)->cid = MM_CID_UNSET;
+
+       mm->mm_cid.nr_cpus_allowed = p->nr_cpus_allowed;
+       raw_spin_lock_init(&mm->mm_cid.lock);
+       cpumask_copy(mm_cpus_allowed(mm), &p->cpus_mask);
+       bitmap_zero(mm_cidmask(mm), num_possible_cpus());
+}
 #else /* CONFIG_SCHED_MM_CID */
 static inline void mm_update_cpus_allowed(struct mm_struct *mm, const struct cpumask *affmsk) { }
 #endif /* !CONFIG_SCHED_MM_CID */