__scx_add_event(sch, SCX_EV_REFILL_SLICE_DFL, 1);
}
+static void local_dsq_post_enq(struct scx_dispatch_q *dsq, struct task_struct *p,
+ u64 enq_flags)
+{
+ struct rq *rq = container_of(dsq, struct rq, scx.local_dsq);
+ bool preempt = false;
+
+ if ((enq_flags & SCX_ENQ_PREEMPT) && p != rq->curr &&
+ rq->curr->sched_class == &ext_sched_class) {
+ rq->curr->scx.slice = 0;
+ preempt = true;
+ }
+
+ if (preempt || sched_class_above(&ext_sched_class, rq->curr->sched_class))
+ resched_curr(rq);
+}
+
static void dispatch_enqueue(struct scx_sched *sch, struct scx_dispatch_q *dsq,
struct task_struct *p, u64 enq_flags)
{
if (enq_flags & SCX_ENQ_CLEAR_OPSS)
atomic_long_set_release(&p->scx.ops_state, SCX_OPSS_NONE);
- if (is_local) {
- struct rq *rq = container_of(dsq, struct rq, scx.local_dsq);
- bool preempt = false;
-
- if ((enq_flags & SCX_ENQ_PREEMPT) && p != rq->curr &&
- rq->curr->sched_class == &ext_sched_class) {
- rq->curr->scx.slice = 0;
- preempt = true;
- }
-
- if (preempt || sched_class_above(&ext_sched_class,
- rq->curr->sched_class))
- resched_curr(rq);
- } else {
+ if (is_local)
+ local_dsq_post_enq(dsq, p, enq_flags);
+ else
raw_spin_unlock(&dsq->lock);
- }
}
static void task_unlink_from_dsq(struct task_struct *p,