* There usually is no reason to modify these as normal scheduler operation
* shouldn't be affected by them. The knobs are primarily for debugging.
*/
-static u64 scx_slice_dfl = SCX_SLICE_DFL;
static unsigned int scx_slice_bypass_us = SCX_SLICE_BYPASS / NSEC_PER_USEC;
static unsigned int scx_bypass_lb_intv_us = SCX_BYPASS_LB_DFL_INTV_US;
static void refill_task_slice_dfl(struct scx_sched *sch, struct task_struct *p)
{
- p->scx.slice = READ_ONCE(scx_slice_dfl);
+ p->scx.slice = READ_ONCE(sch->slice_dfl);
__scx_add_event(sch, SCX_EV_REFILL_SLICE_DFL, 1);
}
INIT_LIST_HEAD(&scx->runnable_node);
scx->runnable_at = jiffies;
scx->ddsp_dsq_id = SCX_DSQ_INVALID;
- scx->slice = READ_ONCE(scx_slice_dfl);
+ scx->slice = SCX_SLICE_DFL;
}
void scx_pre_fork(struct task_struct *p)
raw_spin_lock_irqsave(&bypass_lock, flags);
sch = rcu_dereference_bh(scx_root);
+ if (!sch)
+ goto unlock;
if (bypass) {
u32 intv_us;
WARN_ON_ONCE(scx_bypass_depth <= 0);
if (scx_bypass_depth != 1)
goto unlock;
- WRITE_ONCE(scx_slice_dfl, READ_ONCE(scx_slice_bypass_us) * NSEC_PER_USEC);
+ WRITE_ONCE(sch->slice_dfl, READ_ONCE(scx_slice_bypass_us) * NSEC_PER_USEC);
bypass_timestamp = ktime_get_ns();
if (sch)
scx_add_event(sch, SCX_EV_BYPASS_ACTIVATE, 1);
WARN_ON_ONCE(scx_bypass_depth < 0);
if (scx_bypass_depth != 0)
goto unlock;
- WRITE_ONCE(scx_slice_dfl, SCX_SLICE_DFL);
+ WRITE_ONCE(sch->slice_dfl, SCX_SLICE_DFL);
if (sch)
scx_add_event(sch, SCX_EV_BYPASS_DURATION,
ktime_get_ns() - bypass_timestamp);
sch->ancestors[level] = sch;
sch->level = level;
+ sch->slice_dfl = SCX_SLICE_DFL;
atomic_set(&sch->exit_kind, SCX_EXIT_NONE);
init_irq_work(&sch->error_irq_work, scx_error_irq_workfn);
kthread_init_work(&sch->disable_work, scx_disable_workfn);
queue_flags |= DEQUEUE_CLASS;
scoped_guard (sched_change, p, queue_flags) {
- p->scx.slice = READ_ONCE(scx_slice_dfl);
+ p->scx.slice = READ_ONCE(sch->slice_dfl);
p->sched_class = new_class;
}
}