From: Peter Zijlstra Date: Wed, 4 Oct 2023 10:43:53 +0000 (+0200) Subject: sched/eevdf: Remove min_vruntime_copy X-Git-Tag: v6.12-rc1~120^2~42 X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=949090eaf0a3e39aa0f4a675407e16d0e975da11;p=thirdparty%2Fkernel%2Fstable.git sched/eevdf: Remove min_vruntime_copy Since commit e8f331bcc270 ("sched/smp: Use lag to simplify cross-runqueue placement") the min_vruntime_copy is no longer used. Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Valentin Schneider Tested-by: Valentin Schneider Link: https://lkml.kernel.org/r/20240727105028.395297941@infradead.org --- diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 6d39a824bbe12..8201f0f4e7095 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -779,8 +779,7 @@ static void update_min_vruntime(struct cfs_rq *cfs_rq) } /* ensure we never gain time by being placed backwards. */ - u64_u32_store(cfs_rq->min_vruntime, - __update_min_vruntime(cfs_rq, vruntime)); + cfs_rq->min_vruntime = __update_min_vruntime(cfs_rq, vruntime); } static inline bool __entity_less(struct rb_node *a, const struct rb_node *b) @@ -12933,7 +12932,7 @@ static void set_next_task_fair(struct rq *rq, struct task_struct *p, bool first) void init_cfs_rq(struct cfs_rq *cfs_rq) { cfs_rq->tasks_timeline = RB_ROOT_CACHED; - u64_u32_store(cfs_rq->min_vruntime, (u64)(-(1LL << 20))); + cfs_rq->min_vruntime = (u64)(-(1LL << 20)); #ifdef CONFIG_SMP raw_spin_lock_init(&cfs_rq->removed.lock); #endif diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 1e1d1b467af2c..a6d6b6f106dac 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -613,10 +613,6 @@ struct cfs_rq { u64 min_vruntime_fi; #endif -#ifndef CONFIG_64BIT - u64 min_vruntime_copy; -#endif - struct rb_root_cached tasks_timeline; /*