]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
sched/topology: Rename SD_SHARE_PKG_RESOURCES to SD_SHARE_LLC
authorAlex Shi <alexs@kernel.org>
Sat, 10 Feb 2024 11:39:23 +0000 (19:39 +0800)
committerIngo Molnar <mingo@kernel.org>
Wed, 28 Feb 2024 14:43:17 +0000 (15:43 +0100)
SD_SHARE_PKG_RESOURCES is a bit of a misnomer: its naming suggests that
it's sharing all 'package resources' - while in reality it's specifically
for sharing the LLC only.

Rename it to SD_SHARE_LLC to reduce confusion.

[ mingo: Rewrote the confusing changelog as well. ]

Suggested-by: Valentin Schneider <vschneid@redhat.com>
Signed-off-by: Alex Shi <alexs@kernel.org>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Reviewed-by: Valentin Schneider <vschneid@redhat.com>
Reviewed-by: Ricardo Neri <ricardo.neri-calderon@linux.intel.com>
Reviewed-by: Barry Song <baohua@kernel.org>
Link: https://lore.kernel.org/r/20240210113924.1130448-5-alexs@kernel.org
arch/powerpc/kernel/smp.c
include/linux/sched/sd_flags.h
include/linux/sched/topology.h
kernel/sched/fair.c
kernel/sched/topology.c

index 693334c20d07db70e46224333c565abc457be11a..a60e4139214be58384edca3e282f8df936b8c4ea 100644 (file)
@@ -984,7 +984,7 @@ static bool shared_caches __ro_after_init;
 /* cpumask of CPUs with asymmetric SMT dependency */
 static int powerpc_smt_flags(void)
 {
-       int flags = SD_SHARE_CPUCAPACITY | SD_SHARE_PKG_RESOURCES;
+       int flags = SD_SHARE_CPUCAPACITY | SD_SHARE_LLC;
 
        if (cpu_has_feature(CPU_FTR_ASYM_SMT)) {
                printk_once(KERN_INFO "Enabling Asymmetric SMT scheduling\n");
@@ -1010,9 +1010,9 @@ static __ro_after_init DEFINE_STATIC_KEY_FALSE(splpar_asym_pack);
 static int powerpc_shared_cache_flags(void)
 {
        if (static_branch_unlikely(&splpar_asym_pack))
-               return SD_SHARE_PKG_RESOURCES | SD_ASYM_PACKING;
+               return SD_SHARE_LLC | SD_ASYM_PACKING;
 
-       return SD_SHARE_PKG_RESOURCES;
+       return SD_SHARE_LLC;
 }
 
 static int powerpc_shared_proc_flags(void)
index a8b28647aafc812d011839852c78ce81aee09ee2..b04a5d04dee901e58d26f7454ba700ded9c8d46a 100644 (file)
@@ -117,13 +117,13 @@ SD_FLAG(SD_SHARE_CPUCAPACITY, SDF_SHARED_CHILD | SDF_NEEDS_GROUPS)
 SD_FLAG(SD_CLUSTER, SDF_NEEDS_GROUPS)
 
 /*
- * Domain members share CPU package resources (i.e. caches)
+ * Domain members share CPU Last Level Caches
  *
  * SHARED_CHILD: Set from the base domain up until spanned CPUs no longer share
  *               the same cache(s).
  * NEEDS_GROUPS: Caches are shared between groups.
  */
-SD_FLAG(SD_SHARE_PKG_RESOURCES, SDF_SHARED_CHILD | SDF_NEEDS_GROUPS)
+SD_FLAG(SD_SHARE_LLC, SDF_SHARED_CHILD | SDF_NEEDS_GROUPS)
 
 /*
  * Only a single load balancing instance
index a6e04b4a21d70f9d3c807970815d1b79fee74ed4..191b122158fbf65d4164eea21fa45d49db792e0d 100644 (file)
@@ -38,21 +38,21 @@ extern const struct sd_flag_debug sd_flag_debug[];
 #ifdef CONFIG_SCHED_SMT
 static inline int cpu_smt_flags(void)
 {
-       return SD_SHARE_CPUCAPACITY | SD_SHARE_PKG_RESOURCES;
+       return SD_SHARE_CPUCAPACITY | SD_SHARE_LLC;
 }
 #endif
 
 #ifdef CONFIG_SCHED_CLUSTER
 static inline int cpu_cluster_flags(void)
 {
-       return SD_CLUSTER | SD_SHARE_PKG_RESOURCES;
+       return SD_CLUSTER | SD_SHARE_LLC;
 }
 #endif
 
 #ifdef CONFIG_SCHED_MC
 static inline int cpu_core_flags(void)
 {
-       return SD_SHARE_PKG_RESOURCES;
+       return SD_SHARE_LLC;
 }
 #endif
 
index 39781a666c087efe5c0baf168280ee0000610d86..6a16129f9a5c0a90655bb37277c52f4dc1137721 100644 (file)
@@ -10678,7 +10678,7 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s
         */
        if (local->group_type == group_has_spare) {
                if ((busiest->group_type > group_fully_busy) &&
-                   !(env->sd->flags & SD_SHARE_PKG_RESOURCES)) {
+                   !(env->sd->flags & SD_SHARE_LLC)) {
                        /*
                         * If busiest is overloaded, try to fill spare
                         * capacity. This might end up creating spare capacity
index 0b33f7b05d21d62d3415fdd994a0f3a1de677183..99ea5986038ce44997627fee1e01f6e36bef1b26 100644 (file)
@@ -657,13 +657,13 @@ static void destroy_sched_domains(struct sched_domain *sd)
 }
 
 /*
- * Keep a special pointer to the highest sched_domain that has
- * SD_SHARE_PKG_RESOURCE set (Last Level Cache Domain) for this
- * allows us to avoid some pointer chasing select_idle_sibling().
+ * Keep a special pointer to the highest sched_domain that has SD_SHARE_LLC set
+ * (Last Level Cache Domain) for this allows us to avoid some pointer chasing
+ * select_idle_sibling().
  *
- * Also keep a unique ID per domain (we use the first CPU number in
- * the cpumask of the domain), this allows us to quickly tell if
- * two CPUs are in the same cache domain, see cpus_share_cache().
+ * Also keep a unique ID per domain (we use the first CPU number in the cpumask
+ * of the domain), this allows us to quickly tell if two CPUs are in the same
+ * cache domain, see cpus_share_cache().
  */
 DEFINE_PER_CPU(struct sched_domain __rcu *, sd_llc);
 DEFINE_PER_CPU(int, sd_llc_size);
@@ -684,7 +684,7 @@ static void update_top_cache_domain(int cpu)
        int id = cpu;
        int size = 1;
 
-       sd = highest_flag_domain(cpu, SD_SHARE_PKG_RESOURCES);
+       sd = highest_flag_domain(cpu, SD_SHARE_LLC);
        if (sd) {
                id = cpumask_first(sched_domain_span(sd));
                size = cpumask_weight(sched_domain_span(sd));
@@ -1554,7 +1554,7 @@ static struct cpumask             ***sched_domains_numa_masks;
  * function. For details, see include/linux/sched/sd_flags.h.
  *
  *   SD_SHARE_CPUCAPACITY
- *   SD_SHARE_PKG_RESOURCES
+ *   SD_SHARE_LLC
  *   SD_CLUSTER
  *   SD_NUMA
  *
@@ -1566,7 +1566,7 @@ static struct cpumask             ***sched_domains_numa_masks;
 #define TOPOLOGY_SD_FLAGS              \
        (SD_SHARE_CPUCAPACITY   |       \
         SD_CLUSTER             |       \
-        SD_SHARE_PKG_RESOURCES |       \
+        SD_SHARE_LLC           |       \
         SD_NUMA                |       \
         SD_ASYM_PACKING)
 
@@ -1609,7 +1609,7 @@ sd_init(struct sched_domain_topology_level *tl,
                                        | 0*SD_BALANCE_WAKE
                                        | 1*SD_WAKE_AFFINE
                                        | 0*SD_SHARE_CPUCAPACITY
-                                       | 0*SD_SHARE_PKG_RESOURCES
+                                       | 0*SD_SHARE_LLC
                                        | 0*SD_SERIALIZE
                                        | 1*SD_PREFER_SIBLING
                                        | 0*SD_NUMA
@@ -1646,7 +1646,7 @@ sd_init(struct sched_domain_topology_level *tl,
        if (sd->flags & SD_SHARE_CPUCAPACITY) {
                sd->imbalance_pct = 110;
 
-       } else if (sd->flags & SD_SHARE_PKG_RESOURCES) {
+       } else if (sd->flags & SD_SHARE_LLC) {
                sd->imbalance_pct = 117;
                sd->cache_nice_tries = 1;
 
@@ -1671,7 +1671,7 @@ sd_init(struct sched_domain_topology_level *tl,
         * For all levels sharing cache; connect a sched_domain_shared
         * instance.
         */
-       if (sd->flags & SD_SHARE_PKG_RESOURCES) {
+       if (sd->flags & SD_SHARE_LLC) {
                sd->shared = *per_cpu_ptr(sdd->sds, sd_id);
                atomic_inc(&sd->shared->ref);
                atomic_set(&sd->shared->nr_busy_cpus, sd_weight);
@@ -2446,8 +2446,8 @@ build_sched_domains(const struct cpumask *cpu_map, struct sched_domain_attr *att
                for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) {
                        struct sched_domain *child = sd->child;
 
-                       if (!(sd->flags & SD_SHARE_PKG_RESOURCES) && child &&
-                           (child->flags & SD_SHARE_PKG_RESOURCES)) {
+                       if (!(sd->flags & SD_SHARE_LLC) && child &&
+                           (child->flags & SD_SHARE_LLC)) {
                                struct sched_domain __rcu *top_p;
                                unsigned int nr_llcs;