#include <linux/seqlock.h>
#include <linux/percpu_counter.h>
#include <linux/types.h>
+#include <linux/rseq_types.h>
#include <linux/bitmap.h>
#include <asm/mmu.h>
#define vma_policy(vma) NULL
#endif
-struct mm_cid {
- unsigned int cid;
-};
-
/*
* Opaque type representing current mm_struct flag state. Must be accessed via
* mm_flags_xxx() helper functions.
*/
atomic_t mm_users;
-#ifdef CONFIG_SCHED_MM_CID
- /**
- * @pcpu_cid: Per-cpu current cid.
- *
- * Keep track of the currently allocated mm_cid for each cpu.
- * The per-cpu mm_cid values are serialized by their respective
- * runqueue locks.
- */
- struct mm_cid __percpu *pcpu_cid;
- /**
- * @nr_cpus_allowed: Number of CPUs allowed for mm.
- *
- * Number of CPUs allowed in the union of all mm's
- * threads allowed CPUs.
- */
- unsigned int nr_cpus_allowed;
- /**
- * @cpus_allowed_lock: Lock protecting mm cpus_allowed.
- *
- * Provide mutual exclusion for mm cpus_allowed and
- * mm nr_cpus_allowed updates.
- */
- raw_spinlock_t cpus_allowed_lock;
-#endif
+ /* MM CID related storage */
+ struct mm_mm_cid mm_cid;
+
#ifdef CONFIG_MMU
atomic_long_t pgtables_bytes; /* size of all page tables */
#endif
}
#ifdef CONFIG_SCHED_MM_CID
-
-#define MM_CID_UNSET (~0U)
-
/*
* mm_cpus_allowed: Union of all mm's threads allowed CPUs.
*/
int i;
for_each_possible_cpu(i) {
- struct mm_cid *pcpu_cid = per_cpu_ptr(mm->pcpu_cid, i);
+ struct mm_cid_pcpu *pcpu = per_cpu_ptr(mm->mm_cid.pcpu, i);
- pcpu_cid->cid = MM_CID_UNSET;
+ pcpu->cid = MM_CID_UNSET;
}
- mm->nr_cpus_allowed = p->nr_cpus_allowed;
- raw_spin_lock_init(&mm->cpus_allowed_lock);
+ mm->mm_cid.nr_cpus_allowed = p->nr_cpus_allowed;
+ raw_spin_lock_init(&mm->mm_cid.lock);
cpumask_copy(mm_cpus_allowed(mm), &p->cpus_mask);
cpumask_clear(mm_cidmask(mm));
}
static inline int mm_alloc_cid_noprof(struct mm_struct *mm, struct task_struct *p)
{
- mm->pcpu_cid = alloc_percpu_noprof(struct mm_cid);
- if (!mm->pcpu_cid)
+ mm->mm_cid.pcpu = alloc_percpu_noprof(struct mm_cid_pcpu);
+ if (!mm->mm_cid.pcpu)
return -ENOMEM;
mm_init_cid(mm, p);
return 0;
static inline void mm_destroy_cid(struct mm_struct *mm)
{
- free_percpu(mm->pcpu_cid);
- mm->pcpu_cid = NULL;
+ free_percpu(mm->mm_cid.pcpu);
+ mm->mm_cid.pcpu = NULL;
}
static inline unsigned int mm_cid_size(void)
if (!mm)
return;
/* The mm_cpus_allowed is the union of each thread allowed CPUs masks. */
- raw_spin_lock(&mm->cpus_allowed_lock);
+ guard(raw_spinlock)(&mm->mm_cid.lock);
cpumask_or(mm_allowed, mm_allowed, cpumask);
- WRITE_ONCE(mm->nr_cpus_allowed, cpumask_weight(mm_allowed));
- raw_spin_unlock(&mm->cpus_allowed_lock);
+ WRITE_ONCE(mm->mm_cid.nr_cpus_allowed, cpumask_weight(mm_allowed));
}
#else /* CONFIG_SCHED_MM_CID */
static inline void mm_init_cid(struct mm_struct *mm, struct task_struct *p) { }
struct rseq_data { };
#endif /* !CONFIG_RSEQ */
+#ifdef CONFIG_SCHED_MM_CID
+
+#define MM_CID_UNSET (~0U)
+
+/**
+ * struct sched_mm_cid - Storage for per task MM CID data
+ * @active: MM CID is active for the task
+ * @cid: The CID associated to the task
+ * @last_cid: The last CID associated to the task
+ */
+struct sched_mm_cid {
+ unsigned int active;
+ unsigned int cid;
+ unsigned int last_cid;
+};
+
+/**
+ * struct mm_cid_pcpu - Storage for per CPU MM_CID data
+ * @cid: The CID associated to the CPU
+ */
+struct mm_cid_pcpu {
+ unsigned int cid;
+};
+
+/**
+ * struct mm_mm_cid - Storage for per MM CID data
+ * @pcpu: Per CPU storage for CIDs associated to a CPU
+ * @nr_cpus_allowed: The number of CPUs in the per MM allowed CPUs map. The map
+ * is growth only.
+ * @lock: Spinlock to protect all fields except @pcpu. It also protects
+ * the MM cid cpumask and the MM cidmask bitmap.
+ */
+struct mm_mm_cid {
+ struct mm_cid_pcpu __percpu *pcpu;
+ unsigned int nr_cpus_allowed;
+ raw_spinlock_t lock;
+};
+#else /* CONFIG_SCHED_MM_CID */
+struct mm_mm_cid { };
+struct sched_mm_cid { };
+#endif /* !CONFIG_SCHED_MM_CID */
+
#endif
#endif /* CONFIG_NUMA_BALANCING */
struct rseq_data rseq;
-
-#ifdef CONFIG_SCHED_MM_CID
- int mm_cid; /* Current cid in mm */
- int last_mm_cid; /* Most recent cid in mm */
- int migrate_from_cpu;
- int mm_cid_active; /* Whether cid bitmap is active */
- struct callback_head cid_work;
-#endif
+ struct sched_mm_cid mm_cid;
struct tlbflush_unmap_batch tlb_ubc;
void sched_mm_cid_exit_signals(struct task_struct *t);
static inline int task_mm_cid(struct task_struct *t)
{
- return t->mm_cid;
+ return t->mm_cid.cid;
}
#else
static inline void sched_mm_cid_before_execve(struct task_struct *t) { }
#ifdef CONFIG_SECCOMP_FILTER
.seccomp = { .filter_count = ATOMIC_INIT(0) },
#endif
+#ifdef CONFIG_SCHED_MM_CID
+ .mm_cid = { .cid = MM_CID_UNSET, },
+#endif
};
EXPORT_SYMBOL(init_task);
#endif
#ifdef CONFIG_SCHED_MM_CID
- tsk->mm_cid = MM_CID_UNSET;
- tsk->last_mm_cid = MM_CID_UNSET;
- tsk->mm_cid_active = 0;
+ tsk->mm_cid.cid = MM_CID_UNSET;
+ tsk->mm_cid.last_cid = MM_CID_UNSET;
+ tsk->mm_cid.active = 0;
#endif
return tsk;
{
struct mm_struct *mm = t->mm;
- if (!mm || !t->mm_cid_active)
+ if (!mm || !t->mm_cid.active)
return;
guard(preempt)();
- t->mm_cid_active = 0;
- if (t->mm_cid != MM_CID_UNSET) {
- cpumask_clear_cpu(t->mm_cid, mm_cidmask(mm));
- t->mm_cid = MM_CID_UNSET;
+ t->mm_cid.active = 0;
+ if (t->mm_cid.cid != MM_CID_UNSET) {
+ cpumask_clear_cpu(t->mm_cid.cid, mm_cidmask(mm));
+ t->mm_cid.cid = MM_CID_UNSET;
}
}
return;
guard(preempt)();
- t->mm_cid_active = 1;
+ t->mm_cid.active = 1;
mm_cid_select(t);
}
void sched_mm_cid_fork(struct task_struct *t)
{
- WARN_ON_ONCE(!t->mm || t->mm_cid != MM_CID_UNSET);
- t->mm_cid_active = 1;
+ WARN_ON_ONCE(!t->mm || t->mm_cid.cid != MM_CID_UNSET);
+ t->mm_cid.active = 1;
}
#endif /* CONFIG_SCHED_MM_CID */
return;
/* Preset last_mm_cid */
- max_cid = min_t(int, READ_ONCE(mm->nr_cpus_allowed), atomic_read(&mm->mm_users));
- t->last_mm_cid = max_cid - 1;
+ max_cid = min_t(int, READ_ONCE(mm->mm_cid.nr_cpus_allowed), atomic_read(&mm->mm_users));
+ t->mm_cid.last_cid = max_cid - 1;
}
static inline bool __mm_cid_get(struct task_struct *t, unsigned int cid, unsigned int max_cids)
return false;
if (cpumask_test_and_set_cpu(cid, mm_cidmask(mm)))
return false;
- t->mm_cid = t->last_mm_cid = cid;
- __this_cpu_write(mm->pcpu_cid->cid, cid);
+ t->mm_cid.cid = t->mm_cid.last_cid = cid;
+ __this_cpu_write(mm->mm_cid.pcpu->cid, cid);
return true;
}
struct mm_struct *mm = t->mm;
unsigned int max_cids;
- max_cids = min_t(int, READ_ONCE(mm->nr_cpus_allowed), atomic_read(&mm->mm_users));
+ max_cids = min_t(int, READ_ONCE(mm->mm_cid.nr_cpus_allowed), atomic_read(&mm->mm_users));
/* Try to reuse the last CID of this task */
- if (__mm_cid_get(t, t->last_mm_cid, max_cids))
+ if (__mm_cid_get(t, t->mm_cid.last_cid, max_cids))
return true;
/* Try to reuse the last CID of this mm on this CPU */
- if (__mm_cid_get(t, __this_cpu_read(mm->pcpu_cid->cid), max_cids))
+ if (__mm_cid_get(t, __this_cpu_read(mm->mm_cid.pcpu->cid), max_cids))
return true;
/* Try the first zero bit in the cidmask. */
static inline void switch_mm_cid(struct task_struct *prev, struct task_struct *next)
{
- if (prev->mm_cid_active) {
- if (prev->mm_cid != MM_CID_UNSET)
- cpumask_clear_cpu(prev->mm_cid, mm_cidmask(prev->mm));
- prev->mm_cid = MM_CID_UNSET;
+ if (prev->mm_cid.active) {
+ if (prev->mm_cid.cid != MM_CID_UNSET)
+ cpumask_clear_cpu(prev->mm_cid.cid, mm_cidmask(prev->mm));
+ prev->mm_cid.cid = MM_CID_UNSET;
}
- if (next->mm_cid_active) {
+ if (next->mm_cid.active) {
mm_cid_select(next);
- rseq_sched_set_task_mm_cid(next, next->mm_cid);
+ rseq_sched_set_task_mm_cid(next, next->mm_cid.cid);
}
}