From: Vincent Guittot Date: Mon, 2 Dec 2024 17:46:03 +0000 (+0100) Subject: sched/fair: Remove unused cfs_rq.idle_nr_running X-Git-Tag: v6.14-rc1~184^2~31 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=43eef7c3a4a65e258244d63a8992d0a8d70e5974;p=thirdparty%2Fkernel%2Flinux.git sched/fair: Remove unused cfs_rq.idle_nr_running cfs_rq.idle_nr_running field is not used anywhere so we can remove the useless associated computation. Last user went in commit 5e963f2bd465 ("sched/fair: Commit to EEVDF"). Signed-off-by: Vincent Guittot Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Dietmar Eggemann Link: https://lore.kernel.org/r/20241202174606.4074512-9-vincent.guittot@linaro.org --- diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c index e21b66b6ee10b..e300ee4d79566 100644 --- a/kernel/sched/debug.c +++ b/kernel/sched/debug.c @@ -846,8 +846,6 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq) SEQ_printf(m, " .%-30s: %d\n", "nr_running", cfs_rq->nr_running); SEQ_printf(m, " .%-30s: %d\n", "h_nr_runnable", cfs_rq->h_nr_runnable); SEQ_printf(m, " .%-30s: %d\n", "h_nr_queued", cfs_rq->h_nr_queued); - SEQ_printf(m, " .%-30s: %d\n", "idle_nr_running", - cfs_rq->idle_nr_running); SEQ_printf(m, " .%-30s: %d\n", "h_nr_idle", cfs_rq->h_nr_idle); SEQ_printf(m, " .%-30s: %ld\n", "load", cfs_rq->load.weight); #ifdef CONFIG_SMP diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 2ef33784cbf50..8afa0a4ed09f6 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -3674,8 +3674,6 @@ account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se) } #endif cfs_rq->nr_running++; - if (se_is_idle(se)) - cfs_rq->idle_nr_running++; } static void @@ -3689,8 +3687,6 @@ account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se) } #endif cfs_rq->nr_running--; - if (se_is_idle(se)) - cfs_rq->idle_nr_running--; } /* @@ -13507,7 +13503,7 @@ int sched_group_set_idle(struct task_group *tg, long idle) for_each_possible_cpu(i) { struct rq *rq = cpu_rq(i); struct sched_entity *se = tg->se[i]; - struct cfs_rq *parent_cfs_rq, *grp_cfs_rq = tg->cfs_rq[i]; + struct cfs_rq *grp_cfs_rq = tg->cfs_rq[i]; bool was_idle = cfs_rq_is_idle(grp_cfs_rq); long idle_task_delta; struct rq_flags rf; @@ -13518,14 +13514,6 @@ int sched_group_set_idle(struct task_group *tg, long idle) if (WARN_ON_ONCE(was_idle == cfs_rq_is_idle(grp_cfs_rq))) goto next_cpu; - if (se->on_rq) { - parent_cfs_rq = cfs_rq_of(se); - if (cfs_rq_is_idle(grp_cfs_rq)) - parent_cfs_rq->idle_nr_running++; - else - parent_cfs_rq->idle_nr_running--; - } - idle_task_delta = grp_cfs_rq->h_nr_queued - grp_cfs_rq->h_nr_idle; if (!cfs_rq_is_idle(grp_cfs_rq)) diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index afe5cb93db896..9a9220aad9fc4 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -648,7 +648,6 @@ struct cfs_rq { unsigned int nr_running; unsigned int h_nr_queued; /* SCHED_{NORMAL,BATCH,IDLE} */ unsigned int h_nr_runnable; /* SCHED_{NORMAL,BATCH,IDLE} */ - unsigned int idle_nr_running; /* SCHED_IDLE */ unsigned int h_nr_idle; /* SCHED_IDLE */ s64 avg_vruntime;