static_branch_disable(&scx_ops_enq_exiting);
static_branch_disable(&scx_ops_enq_migration_disabled);
static_branch_disable(&scx_ops_cpu_preempt);
- static_branch_disable(&scx_builtin_idle_enabled);
+ scx_idle_disable();
synchronize_rcu();
if (ei->kind >= SCX_EXIT_ERROR) {
if (scx_ops.cpu_acquire || scx_ops.cpu_release)
static_branch_enable(&scx_ops_cpu_preempt);
- if (!ops->update_idle || (ops->flags & SCX_OPS_KEEP_BUILTIN_IDLE)) {
- scx_idle_reset_masks();
- static_branch_enable(&scx_builtin_idle_enabled);
- } else {
- static_branch_disable(&scx_builtin_idle_enabled);
- }
+ scx_idle_enable(ops);
/*
* Lock out forks, cgroup on/offlining and moves before opening the
#include "ext_idle.h"
/* Enable/disable built-in idle CPU selection policy */
-DEFINE_STATIC_KEY_FALSE(scx_builtin_idle_enabled);
+static DEFINE_STATIC_KEY_FALSE(scx_builtin_idle_enabled);
#ifdef CONFIG_SMP
#ifdef CONFIG_CPUMASK_OFFSTACK
#endif
/* Enable/disable LLC aware optimizations */
-DEFINE_STATIC_KEY_FALSE(scx_selcpu_topo_llc);
+static DEFINE_STATIC_KEY_FALSE(scx_selcpu_topo_llc);
/* Enable/disable NUMA aware optimizations */
-DEFINE_STATIC_KEY_FALSE(scx_selcpu_topo_numa);
+static DEFINE_STATIC_KEY_FALSE(scx_selcpu_topo_numa);
static struct {
cpumask_var_t cpu;
return cpu;
}
-void scx_idle_reset_masks(void)
-{
- /*
- * Consider all online cpus idle. Should converge to the actual state
- * quickly.
- */
- cpumask_copy(idle_masks.cpu, cpu_online_mask);
- cpumask_copy(idle_masks.smt, cpu_online_mask);
-}
-
void scx_idle_init_masks(void)
{
BUG_ON(!alloc_cpumask_var(&idle_masks.cpu, GFP_KERNEL));
}
#endif /* CONFIG_SMP */
+void scx_idle_enable(struct sched_ext_ops *ops)
+{
+ if (ops->update_idle && !(ops->flags & SCX_OPS_KEEP_BUILTIN_IDLE)) {
+ static_branch_disable(&scx_builtin_idle_enabled);
+ return;
+ }
+ static_branch_enable(&scx_builtin_idle_enabled);
+
+#ifdef CONFIG_SMP
+ /*
+ * Consider all online cpus idle. Should converge to the actual state
+ * quickly.
+ */
+ cpumask_copy(idle_masks.cpu, cpu_online_mask);
+ cpumask_copy(idle_masks.smt, cpu_online_mask);
+#endif
+}
+
+void scx_idle_disable(void)
+{
+ static_branch_disable(&scx_builtin_idle_enabled);
+}
+
/********************************************************************************
* Helpers that can be called from the BPF scheduler.
*/
#ifndef _KERNEL_SCHED_EXT_IDLE_H
#define _KERNEL_SCHED_EXT_IDLE_H
-extern struct static_key_false scx_builtin_idle_enabled;
+struct sched_ext_ops;
#ifdef CONFIG_SMP
-extern struct static_key_false scx_selcpu_topo_llc;
-extern struct static_key_false scx_selcpu_topo_numa;
-
void scx_idle_update_selcpu_topology(void);
-void scx_idle_reset_masks(void);
void scx_idle_init_masks(void);
bool scx_idle_test_and_clear_cpu(int cpu);
s32 scx_pick_idle_cpu(const struct cpumask *cpus_allowed, u64 flags);
#else /* !CONFIG_SMP */
static inline void scx_idle_update_selcpu_topology(void) {}
-static inline void scx_idle_reset_masks(void) {}
static inline void scx_idle_init_masks(void) {}
static inline bool scx_idle_test_and_clear_cpu(int cpu) { return false; }
static inline s32 scx_pick_idle_cpu(const struct cpumask *cpus_allowed, u64 flags)
#endif /* CONFIG_SMP */
s32 scx_select_cpu_dfl(struct task_struct *p, s32 prev_cpu, u64 wake_flags, bool *found);
-
-extern int scx_idle_init(void);
+void scx_idle_enable(struct sched_ext_ops *ops);
+void scx_idle_disable(void);
+int scx_idle_init(void);
#endif /* _KERNEL_SCHED_EXT_IDLE_H */