From: K Prateek Nayak Date: Thu, 12 Mar 2026 04:44:28 +0000 (+0000) Subject: sched/topology: Allocate per-CPU sched_domain_shared in s_data X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=1cc8a33ca7e8d38f962b64ece2a42c411a67bc76;p=thirdparty%2Fkernel%2Flinux.git sched/topology: Allocate per-CPU sched_domain_shared in s_data The "sched_domain_shared" object is allocated for every topology level in __sdt_alloc() and is freed post sched domain rebuild if they aren't assigned during sd_init(). "sd->shared" is only assigned for SD_SHARE_LLC domains and out of all the assigned objects, only "sd_llc_shared" is ever used by the scheduler. Since only "sd_llc_shared" is ever used, and since SD_SHARE_LLC domains never overlap, allocate only a single range of per-CPU "sched_domain_shared" object with s_data instead of doing it per topology level. The subsequent commit uses the degeneration path to correctly assign the "sd->shared" to the topmost SD_SHARE_LLC domain. No functional changes are expected at this point. Signed-off-by: K Prateek Nayak Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Valentin Schneider Reviewed-by: Chen Yu Reviewed-by: Dietmar Eggemann Tested-by: Dietmar Eggemann Link: https://patch.msgid.link/20260312044434.1974-4-kprateek.nayak@amd.com --- diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c index 6303790a41437..9006586720bf0 100644 --- a/kernel/sched/topology.c +++ b/kernel/sched/topology.c @@ -782,6 +782,7 @@ cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu) } struct s_data { + struct sched_domain_shared * __percpu *sds; struct sched_domain * __percpu *sd; struct root_domain *rd; }; @@ -789,6 +790,7 @@ struct s_data { enum s_alloc { sa_rootdomain, sa_sd, + sa_sd_shared, sa_sd_storage, sa_none, }; @@ -1535,6 +1537,9 @@ static void set_domain_attribute(struct sched_domain *sd, static void __sdt_free(const struct cpumask *cpu_map); static int __sdt_alloc(const struct cpumask *cpu_map); +static void __sds_free(struct s_data *d, const struct cpumask *cpu_map); +static int __sds_alloc(struct s_data *d, const struct cpumask *cpu_map); + static void __free_domain_allocs(struct s_data *d, enum s_alloc what, const struct cpumask *cpu_map) { @@ -1546,6 +1551,9 @@ static void __free_domain_allocs(struct s_data *d, enum s_alloc what, case sa_sd: free_percpu(d->sd); fallthrough; + case sa_sd_shared: + __sds_free(d, cpu_map); + fallthrough; case sa_sd_storage: __sdt_free(cpu_map); fallthrough; @@ -1561,9 +1569,11 @@ __visit_domain_allocation_hell(struct s_data *d, const struct cpumask *cpu_map) if (__sdt_alloc(cpu_map)) return sa_sd_storage; + if (__sds_alloc(d, cpu_map)) + return sa_sd_shared; d->sd = alloc_percpu(struct sched_domain *); if (!d->sd) - return sa_sd_storage; + return sa_sd_shared; d->rd = alloc_rootdomain(); if (!d->rd) return sa_sd; @@ -2464,6 +2474,42 @@ static void __sdt_free(const struct cpumask *cpu_map) } } +static int __sds_alloc(struct s_data *d, const struct cpumask *cpu_map) +{ + int j; + + d->sds = alloc_percpu(struct sched_domain_shared *); + if (!d->sds) + return -ENOMEM; + + for_each_cpu(j, cpu_map) { + struct sched_domain_shared *sds; + + sds = kzalloc_node(sizeof(struct sched_domain_shared), + GFP_KERNEL, cpu_to_node(j)); + if (!sds) + return -ENOMEM; + + *per_cpu_ptr(d->sds, j) = sds; + } + + return 0; +} + +static void __sds_free(struct s_data *d, const struct cpumask *cpu_map) +{ + int j; + + if (!d->sds) + return; + + for_each_cpu(j, cpu_map) + kfree(*per_cpu_ptr(d->sds, j)); + + free_percpu(d->sds); + d->sds = NULL; +} + static struct sched_domain *build_sched_domain(struct sched_domain_topology_level *tl, const struct cpumask *cpu_map, struct sched_domain_attr *attr, struct sched_domain *child, int cpu)