struct scx_sched *sch;
scoped_guard (raw_spinlock, &rq->scx.deferred_reenq_lock) {
- struct scx_sched_pcpu *sch_pcpu =
+ struct scx_deferred_reenq_local *drl =
list_first_entry_or_null(&rq->scx.deferred_reenq_locals,
- struct scx_sched_pcpu,
- deferred_reenq_local_node);
- if (!sch_pcpu)
+ struct scx_deferred_reenq_local,
+ node);
+ struct scx_sched_pcpu *sch_pcpu;
+
+ if (!drl)
return;
+ sch_pcpu = container_of(drl, struct scx_sched_pcpu,
+ deferred_reenq_local);
sch = sch_pcpu->sch;
- list_del_init(&sch_pcpu->deferred_reenq_local_node);
+ list_del_init(&drl->node);
}
reenq_local(sch, rq);
for_each_possible_cpu(cpu) {
struct scx_sched_pcpu *pcpu = per_cpu_ptr(sch->pcpu, cpu);
- WARN_ON_ONCE(!list_empty(&pcpu->deferred_reenq_local_node));
+ WARN_ON_ONCE(!list_empty(&pcpu->deferred_reenq_local.node));
}
free_percpu(sch->pcpu);
struct scx_sched_pcpu *pcpu = per_cpu_ptr(sch->pcpu, cpu);
pcpu->sch = sch;
- INIT_LIST_HEAD(&pcpu->deferred_reenq_local_node);
+ INIT_LIST_HEAD(&pcpu->deferred_reenq_local.node);
}
sch->helper = kthread_run_worker(0, "sched_ext_helper");
scoped_guard (raw_spinlock, &rq->scx.deferred_reenq_lock) {
struct scx_sched_pcpu *pcpu = this_cpu_ptr(sch->pcpu);
- if (list_empty(&pcpu->deferred_reenq_local_node))
- list_move_tail(&pcpu->deferred_reenq_local_node,
+ if (list_empty(&pcpu->deferred_reenq_local.node))
+ list_move_tail(&pcpu->deferred_reenq_local.node,
&rq->scx.deferred_reenq_locals);
}
struct scx_dsp_buf_ent buf[];
};
+struct scx_deferred_reenq_local {
+ struct list_head node;
+};
+
struct scx_sched_pcpu {
struct scx_sched *sch;
u64 flags; /* protected by rq lock */
*/
struct scx_event_stats event_stats;
- struct list_head deferred_reenq_local_node;
+ struct scx_deferred_reenq_local deferred_reenq_local;
struct scx_dispatch_q bypass_dsq;
#ifdef CONFIG_EXT_SUB_SCHED
u32 bypass_host_seq;