return READ_ONCE(tsk->rmid) == rmid;
}
-static inline void resctrl_sched_in(struct task_struct *tsk)
+static inline void resctrl_arch_sched_in(struct task_struct *tsk)
{
if (static_branch_likely(&rdt_enable_key))
__resctrl_sched_in(tsk);
#else
-static inline void resctrl_sched_in(struct task_struct *tsk) {}
+static inline void resctrl_arch_sched_in(struct task_struct *tsk) {}
static inline void resctrl_cpu_detect(struct cpuinfo_x86 *c) {}
#endif /* CONFIG_X86_CPU_RESCTRL */
}
/*
- * This is safe against resctrl_sched_in() called from __switch_to()
+ * This is safe against resctrl_arch_sched_in() called from __switch_to()
* because __switch_to() is executed with interrupts disabled. A local call
* from update_closid_rmid() is protected against __switch_to() because
* preemption is disabled.
* executing task might have its own closid selected. Just reuse
* the context switch code.
*/
- resctrl_sched_in(current);
+ resctrl_arch_sched_in(current);
}
/*
* Otherwise, the MSR is updated when the task is scheduled in.
*/
if (task == current)
- resctrl_sched_in(task);
+ resctrl_arch_sched_in(task);
}
static void update_task_closid_rmid(struct task_struct *t)
* Ensure the task's closid and rmid are written before determining if
* the task is current that will decide if it will be interrupted.
* This pairs with the full barrier between the rq->curr update and
- * resctrl_sched_in() during context switch.
+ * resctrl_arch_sched_in() during context switch.
*/
smp_mb();
/*
* Order the closid/rmid stores above before the loads
* in task_curr(). This pairs with the full barrier
- * between the rq->curr update and resctrl_sched_in()
- * during context switch.
+ * between the rq->curr update and
+ * resctrl_arch_sched_in() during context switch.
*/
smp_mb();