From: Chuyi Zhou Date: Wed, 17 Jul 2024 14:33:42 +0000 (+0800) Subject: sched/fair: Remove cfs_rq::nr_spread_over and cfs_rq::exec_clock X-Git-Tag: v6.12-rc1~120^2~61 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=2c2d9624697fc5e7dd84490ae01b80cc43ec2def;p=thirdparty%2Flinux.git sched/fair: Remove cfs_rq::nr_spread_over and cfs_rq::exec_clock nr_spread_over tracks the number of instances where the difference between a scheduling entity's virtual runtime and the minimum virtual runtime in the runqueue exceeds three times the scheduler latency, indicating significant disparity in task scheduling. Commit that removed its usage: 5e963f2bd: sched/fair: Commit to EEVDF cfs_rq->exec_clock was used to account for time spent executing tasks. Commit that removed its usage: 5d69eca542ee1 sched: Unify runtime accounting across classes cfs_rq::nr_spread_over and cfs_rq::exec_clock are not used anymore in eevdf. Remove them from struct cfs_rq. Signed-off-by: Chuyi Zhou Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Chengming Zhou Reviewed-by: K Prateek Nayak Acked-by: Vishal Chourasia Link: https://lore.kernel.org/r/20240717143342.593262-1-zhouchuyi@bytedance.com --- diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c index c1eb9a1afd13e..90c4a9998377c 100644 --- a/kernel/sched/debug.c +++ b/kernel/sched/debug.c @@ -641,8 +641,6 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq) SEQ_printf(m, "\n"); SEQ_printf(m, "cfs_rq[%d]:\n", cpu); #endif - SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "exec_clock", - SPLIT_NS(cfs_rq->exec_clock)); raw_spin_rq_lock_irqsave(rq, flags); root = __pick_root_entity(cfs_rq); @@ -669,8 +667,6 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq) SPLIT_NS(right_vruntime)); spread = right_vruntime - left_vruntime; SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "spread", SPLIT_NS(spread)); - SEQ_printf(m, " .%-30s: %d\n", "nr_spread_over", - cfs_rq->nr_spread_over); SEQ_printf(m, " .%-30s: %d\n", "nr_running", cfs_rq->nr_running); SEQ_printf(m, " .%-30s: %d\n", "h_nr_running", cfs_rq->h_nr_running); SEQ_printf(m, " .%-30s: %d\n", "idle_nr_running", diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 4c36cc6803617..8a071022bdec8 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -599,7 +599,6 @@ struct cfs_rq { s64 avg_vruntime; u64 avg_load; - u64 exec_clock; u64 min_vruntime; #ifdef CONFIG_SCHED_CORE unsigned int forceidle_seq; @@ -619,10 +618,6 @@ struct cfs_rq { struct sched_entity *curr; struct sched_entity *next; -#ifdef CONFIG_SCHED_DEBUG - unsigned int nr_spread_over; -#endif - #ifdef CONFIG_SMP /* * CFS load tracking @@ -1158,7 +1153,6 @@ struct rq { /* latency stats */ struct sched_info rq_sched_info; unsigned long long rq_cpu_time; - /* could above be rq->cfs_rq.exec_clock + rq->rt_rq.rt_runtime ? */ /* sys_sched_yield() stats */ unsigned int yld_count;