}
/* Accessor for struct mm_struct's cidmask. */
-static inline cpumask_t *mm_cidmask(struct mm_struct *mm)
+static inline unsigned long *mm_cidmask(struct mm_struct *mm)
{
unsigned long cid_bitmap = (unsigned long)mm_cpus_allowed(mm);
/* Skip mm_cpus_allowed */
cid_bitmap += cpumask_size();
- return (struct cpumask *)cid_bitmap;
+ return (unsigned long *)cid_bitmap;
}
static inline void mm_init_cid(struct mm_struct *mm, struct task_struct *p)
mm->mm_cid.nr_cpus_allowed = p->nr_cpus_allowed;
raw_spin_lock_init(&mm->mm_cid.lock);
cpumask_copy(mm_cpus_allowed(mm), &p->cpus_mask);
- cpumask_clear(mm_cidmask(mm));
+ bitmap_zero(mm_cidmask(mm), num_possible_cpus());
}
static inline int mm_alloc_cid_noprof(struct mm_struct *mm, struct task_struct *p)
static inline unsigned int mm_cid_size(void)
{
- return 2 * cpumask_size(); /* mm_cpus_allowed(), mm_cidmask(). */
+ /* mm_cpus_allowed(), mm_cidmask(). */
+ return cpumask_size() + bitmap_size(num_possible_cpus());
}
#else /* CONFIG_SCHED_MM_CID */
guard(preempt)();
t->mm_cid.active = 0;
if (t->mm_cid.cid != MM_CID_UNSET) {
- cpumask_clear_cpu(t->mm_cid.cid, mm_cidmask(mm));
+ clear_bit(t->mm_cid.cid, mm_cidmask(mm));
t->mm_cid.cid = MM_CID_UNSET;
}
}
if (cid >= max_cids)
return false;
- if (cpumask_test_and_set_cpu(cid, mm_cidmask(mm)))
+ if (test_and_set_bit(cid, mm_cidmask(mm)))
return false;
t->mm_cid.cid = t->mm_cid.last_cid = cid;
__this_cpu_write(mm->mm_cid.pcpu->cid, cid);
return true;
/* Try the first zero bit in the cidmask. */
- return __mm_cid_get(t, cpumask_first_zero(mm_cidmask(mm)), max_cids);
+ return __mm_cid_get(t, find_first_zero_bit(mm_cidmask(mm), num_possible_cpus()), max_cids);
}
static inline void mm_cid_select(struct task_struct *t)
{
if (prev->mm_cid.active) {
if (prev->mm_cid.cid != MM_CID_UNSET)
- cpumask_clear_cpu(prev->mm_cid.cid, mm_cidmask(prev->mm));
+ clear_bit(prev->mm_cid.cid, mm_cidmask(prev->mm));
prev->mm_cid.cid = MM_CID_UNSET;
}