]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
sched/topology: Remove sched_domain_topology_level::flags
authorK Prateek Nayak <kprateek.nayak@amd.com>
Fri, 11 Jul 2025 05:50:30 +0000 (11:20 +0530)
committerPeter Zijlstra <peterz@infradead.org>
Mon, 14 Jul 2025 08:59:35 +0000 (10:59 +0200)
Support for overlapping domains added in commit e3589f6c81e4 ("sched:
Allow for overlapping sched_domain spans") also allowed forcefully
setting SD_OVERLAP for !NUMA domains via FORCE_SD_OVERLAP sched_feat().

Since NUMA domains had to be presumed overlapping to ensure correct
behavior, "sched_domain_topology_level::flags" was introduced. NUMA
domains added the SDTL_OVERLAP flag would ensure SD_OVERLAP was always
added during build_sched_domains() for these domains, even when
FORCE_SD_OVERLAP was off.

Condition for adding the SD_OVERLAP flag at the aforementioned commit
was as follows:

    if (tl->flags & SDTL_OVERLAP || sched_feat(FORCE_SD_OVERLAP))
            sd->flags |= SD_OVERLAP;

The FORCE_SD_OVERLAP debug feature was removed in commit af85596c74de
("sched/topology: Remove FORCE_SD_OVERLAP") which left the NUMA domains
as the exclusive users of SDTL_OVERLAP, SD_OVERLAP, and SD_NUMA flags.

Get rid of SDTL_OVERLAP and SD_OVERLAP as they have become redundant
and instead rely on SD_NUMA to detect the only overlapping domain
currently supported. Since SDTL_OVERLAP was the only user of
"tl->flags", get rid of "sched_domain_topology_level::flags" too.

Signed-off-by: K Prateek Nayak <kprateek.nayak@amd.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lkml.kernel.org/r/ba4dbdf8-bc37-493d-b2e0-2efb00ea3e19@amd.com
include/linux/sched/sd_flags.h
include/linux/sched/topology.h
kernel/sched/fair.c
kernel/sched/topology.c

index b04a5d04dee901e58d26f7454ba700ded9c8d46a..42839cfa27788008208088ae26a7c9cd19c9494d 100644 (file)
@@ -153,14 +153,6 @@ SD_FLAG(SD_ASYM_PACKING, SDF_NEEDS_GROUPS)
  */
 SD_FLAG(SD_PREFER_SIBLING, SDF_NEEDS_GROUPS)
 
-/*
- * sched_groups of this level overlap
- *
- * SHARED_PARENT: Set for all NUMA levels above NODE.
- * NEEDS_GROUPS: Overlaps can only exist with more than one group.
- */
-SD_FLAG(SD_OVERLAP, SDF_SHARED_PARENT | SDF_NEEDS_GROUPS)
-
 /*
  * Cross-node balancing
  *
index 0d5daaa277b755041e8e24dc59e0fe8d1f5e4544..5263746b63e8c3ca9aab40e91ff3eb13de3adc2d 100644 (file)
@@ -175,8 +175,6 @@ bool cpus_share_resources(int this_cpu, int that_cpu);
 typedef const struct cpumask *(*sched_domain_mask_f)(int cpu);
 typedef int (*sched_domain_flags_f)(void);
 
-#define SDTL_OVERLAP   0x01
-
 struct sd_data {
        struct sched_domain *__percpu *sd;
        struct sched_domain_shared *__percpu *sds;
@@ -187,7 +185,6 @@ struct sd_data {
 struct sched_domain_topology_level {
        sched_domain_mask_f mask;
        sched_domain_flags_f sd_flags;
-       int                 flags;
        int                 numa_level;
        struct sd_data      data;
        char                *name;
index 20a845697c1dc7c4b8f2ece975cdd4c77ab389b6..b9b4bbbf0af6fd1fe3a9ca8407aa0e9f5302e1da 100644 (file)
@@ -9926,9 +9926,9 @@ void update_group_capacity(struct sched_domain *sd, int cpu)
        min_capacity = ULONG_MAX;
        max_capacity = 0;
 
-       if (child->flags & SD_OVERLAP) {
+       if (child->flags & SD_NUMA) {
                /*
-                * SD_OVERLAP domains cannot assume that child groups
+                * SD_NUMA domains cannot assume that child groups
                 * span the current group.
                 */
 
@@ -9941,7 +9941,7 @@ void update_group_capacity(struct sched_domain *sd, int cpu)
                }
        } else  {
                /*
-                * !SD_OVERLAP domains can assume that child groups
+                * !SD_NUMA domains can assume that child groups
                 * span the current group.
                 */
 
index d01f5a49f2e7a7cdab9167b3d7c83492be770487..977e133bb8a44340122b55b9af33038347e687b5 100644 (file)
@@ -89,7 +89,7 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
                        break;
                }
 
-               if (!(sd->flags & SD_OVERLAP) &&
+               if (!(sd->flags & SD_NUMA) &&
                    cpumask_intersects(groupmask, sched_group_span(group))) {
                        printk(KERN_CONT "\n");
                        printk(KERN_ERR "ERROR: repeated CPUs\n");
@@ -102,7 +102,7 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
                                group->sgc->id,
                                cpumask_pr_args(sched_group_span(group)));
 
-               if ((sd->flags & SD_OVERLAP) &&
+               if ((sd->flags & SD_NUMA) &&
                    !cpumask_equal(group_balance_mask(group), sched_group_span(group))) {
                        printk(KERN_CONT " mask=%*pbl",
                                cpumask_pr_args(group_balance_mask(group)));
@@ -1344,7 +1344,7 @@ void sched_update_asym_prefer_cpu(int cpu, int old_prio, int new_prio)
                 * "sg->asym_prefer_cpu" to "sg->sgc->asym_prefer_cpu"
                 * which is shared by all the overlapping groups.
                 */
-               WARN_ON_ONCE(sd->flags & SD_OVERLAP);
+               WARN_ON_ONCE(sd->flags & SD_NUMA);
 
                sg = sd->groups;
                if (cpu != sg->asym_prefer_cpu) {
@@ -2016,7 +2016,6 @@ void sched_init_numa(int offline_node)
        for (j = 1; j < nr_levels; i++, j++) {
                tl[i] = SDTL_INIT(sd_numa_mask, cpu_numa_flags, NUMA);
                tl[i].numa_level = j;
-               tl[i].flags = SDTL_OVERLAP;
        }
 
        sched_domain_topology_saved = sched_domain_topology;
@@ -2327,7 +2326,7 @@ static void __sdt_free(const struct cpumask *cpu_map)
 
                        if (sdd->sd) {
                                sd = *per_cpu_ptr(sdd->sd, j);
-                               if (sd && (sd->flags & SD_OVERLAP))
+                               if (sd && (sd->flags & SD_NUMA))
                                        free_sched_groups(sd->groups, 0);
                                kfree(*per_cpu_ptr(sdd->sd, j));
                        }
@@ -2393,9 +2392,13 @@ static bool topology_span_sane(const struct cpumask *cpu_map)
        id_seen = sched_domains_tmpmask2;
 
        for_each_sd_topology(tl) {
+               int tl_common_flags = 0;
+
+               if (tl->sd_flags)
+                       tl_common_flags = (*tl->sd_flags)();
 
                /* NUMA levels are allowed to overlap */
-               if (tl->flags & SDTL_OVERLAP)
+               if (tl_common_flags & SD_NUMA)
                        continue;
 
                cpumask_clear(covered);
@@ -2466,8 +2469,6 @@ build_sched_domains(const struct cpumask *cpu_map, struct sched_domain_attr *att
 
                        if (tl == sched_domain_topology)
                                *per_cpu_ptr(d.sd, i) = sd;
-                       if (tl->flags & SDTL_OVERLAP)
-                               sd->flags |= SD_OVERLAP;
                        if (cpumask_equal(cpu_map, sched_domain_span(sd)))
                                break;
                }
@@ -2480,7 +2481,7 @@ build_sched_domains(const struct cpumask *cpu_map, struct sched_domain_attr *att
        for_each_cpu(i, cpu_map) {
                for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) {
                        sd->span_weight = cpumask_weight(sched_domain_span(sd));
-                       if (sd->flags & SD_OVERLAP) {
+                       if (sd->flags & SD_NUMA) {
                                if (build_overlap_sched_groups(sd, i))
                                        goto error;
                        } else {