return (unsigned long *)cid_bitmap;
}
-static inline void mm_init_cid(struct mm_struct *mm, struct task_struct *p)
-{
- int i;
-
- for_each_possible_cpu(i) {
- struct mm_cid_pcpu *pcpu = per_cpu_ptr(mm->mm_cid.pcpu, i);
-
- pcpu->cid = MM_CID_UNSET;
- }
- mm->mm_cid.nr_cpus_allowed = p->nr_cpus_allowed;
- raw_spin_lock_init(&mm->mm_cid.lock);
- cpumask_copy(mm_cpus_allowed(mm), &p->cpus_mask);
- bitmap_zero(mm_cidmask(mm), num_possible_cpus());
-}
+void mm_init_cid(struct mm_struct *mm, struct task_struct *p);
static inline int mm_alloc_cid_noprof(struct mm_struct *mm, struct task_struct *p)
{
WARN_ON_ONCE(!t->mm || t->mm_cid.cid != MM_CID_UNSET);
t->mm_cid.active = 1;
}
+
+void mm_init_cid(struct mm_struct *mm, struct task_struct *p)
+{
+ struct mm_cid_pcpu __percpu *pcpu = mm->mm_cid.pcpu;
+ int cpu;
+
+ for_each_possible_cpu(cpu)
+ per_cpu_ptr(pcpu, cpu)->cid = MM_CID_UNSET;
+
+ mm->mm_cid.nr_cpus_allowed = p->nr_cpus_allowed;
+ raw_spin_lock_init(&mm->mm_cid.lock);
+ cpumask_copy(mm_cpus_allowed(mm), &p->cpus_mask);
+ bitmap_zero(mm_cidmask(mm), num_possible_cpus());
+}
#else /* CONFIG_SCHED_MM_CID */
static inline void mm_update_cpus_allowed(struct mm_struct *mm, const struct cpumask *affmsk) { }
#endif /* !CONFIG_SCHED_MM_CID */