]> git.ipfire.org Git - people/arne_f/kernel.git/commitdiff
sched/topology: Rename sched_group_cpus()
authorPeter Zijlstra <peterz@infradead.org>
Mon, 1 May 2017 09:03:12 +0000 (11:03 +0200)
committerIngo Molnar <mingo@kernel.org>
Mon, 15 May 2017 08:15:34 +0000 (10:15 +0200)
There's a discrepancy in naming between the sched_domain and
sched_group cpumask accessor. Since we're doing changes, fix it.

  $ git grep sched_group_cpus | wc -l
  28
  $ git grep sched_domain_span | wc -l
  38

Suggests changing sched_group_cpus() into sched_group_span():

  for i  in `git grep -l sched_group_cpus`
  do
    sed -ie 's/sched_group_cpus/sched_group_span/g' $i
  done

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-kernel@vger.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
kernel/sched/fair.c
kernel/sched/sched.h
kernel/sched/topology.c

index a7d84c8a7881f5039c43b133e8c680e363c8b7a7..eede181b4530cb74cca4c66b566e27d205673940 100644 (file)
@@ -5484,12 +5484,12 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p,
                int i;
 
                /* Skip over this group if it has no CPUs allowed */
-               if (!cpumask_intersects(sched_group_cpus(group),
+               if (!cpumask_intersects(sched_group_span(group),
                                        &p->cpus_allowed))
                        continue;
 
                local_group = cpumask_test_cpu(this_cpu,
-                                              sched_group_cpus(group));
+                                              sched_group_span(group));
 
                /*
                 * Tally up the load of all CPUs in the group and find
@@ -5499,7 +5499,7 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p,
                runnable_load = 0;
                max_spare_cap = 0;
 
-               for_each_cpu(i, sched_group_cpus(group)) {
+               for_each_cpu(i, sched_group_span(group)) {
                        /* Bias balancing toward cpus of our domain */
                        if (local_group)
                                load = source_load(i, load_idx);
@@ -5602,10 +5602,10 @@ find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
 
        /* Check if we have any choice: */
        if (group->group_weight == 1)
-               return cpumask_first(sched_group_cpus(group));
+               return cpumask_first(sched_group_span(group));
 
        /* Traverse only the allowed CPUs */
-       for_each_cpu_and(i, sched_group_cpus(group), &p->cpus_allowed) {
+       for_each_cpu_and(i, sched_group_span(group), &p->cpus_allowed) {
                if (idle_cpu(i)) {
                        struct rq *rq = cpu_rq(i);
                        struct cpuidle_state *idle = idle_get_state(rq);
@@ -7192,7 +7192,7 @@ void update_group_capacity(struct sched_domain *sd, int cpu)
                 * span the current group.
                 */
 
-               for_each_cpu(cpu, sched_group_cpus(sdg)) {
+               for_each_cpu(cpu, sched_group_span(sdg)) {
                        struct sched_group_capacity *sgc;
                        struct rq *rq = cpu_rq(cpu);
 
@@ -7371,7 +7371,7 @@ static inline void update_sg_lb_stats(struct lb_env *env,
 
        memset(sgs, 0, sizeof(*sgs));
 
-       for_each_cpu_and(i, sched_group_cpus(group), env->cpus) {
+       for_each_cpu_and(i, sched_group_span(group), env->cpus) {
                struct rq *rq = cpu_rq(i);
 
                /* Bias balancing toward cpus of our domain */
@@ -7535,7 +7535,7 @@ static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sd
                struct sg_lb_stats *sgs = &tmp_sgs;
                int local_group;
 
-               local_group = cpumask_test_cpu(env->dst_cpu, sched_group_cpus(sg));
+               local_group = cpumask_test_cpu(env->dst_cpu, sched_group_span(sg));
                if (local_group) {
                        sds->local = sg;
                        sgs = local;
@@ -7890,7 +7890,7 @@ static struct rq *find_busiest_queue(struct lb_env *env,
        unsigned long busiest_load = 0, busiest_capacity = 1;
        int i;
 
-       for_each_cpu_and(i, sched_group_cpus(group), env->cpus) {
+       for_each_cpu_and(i, sched_group_span(group), env->cpus) {
                unsigned long capacity, wl;
                enum fbq_type rt;
 
@@ -8043,7 +8043,7 @@ static int load_balance(int this_cpu, struct rq *this_rq,
                .sd             = sd,
                .dst_cpu        = this_cpu,
                .dst_rq         = this_rq,
-               .dst_grpmask    = sched_group_cpus(sd->groups),
+               .dst_grpmask    = sched_group_span(sd->groups),
                .idle           = idle,
                .loop_break     = sched_nr_migrate_break,
                .cpus           = cpus,
index f7c70575ae34b3c70983d077530b082e6a7874a2..f8cf1d87f0659d50e70409a36d370b233ab9f0fd 100644 (file)
@@ -1048,7 +1048,7 @@ struct sched_group {
        unsigned long cpumask[0];
 };
 
-static inline struct cpumask *sched_group_cpus(struct sched_group *sg)
+static inline struct cpumask *sched_group_span(struct sched_group *sg)
 {
        return to_cpumask(sg->cpumask);
 }
@@ -1067,7 +1067,7 @@ static inline struct cpumask *group_balance_mask(struct sched_group *sg)
  */
 static inline unsigned int group_first_cpu(struct sched_group *group)
 {
-       return cpumask_first(sched_group_cpus(group));
+       return cpumask_first(sched_group_span(group));
 }
 
 extern int group_balance_cpu(struct sched_group *sg);
index 070191f02035e863bb07b691023185ef1ce2ba5b..79895aec281eb5ad198fae3e5e8aed31849ed900 100644 (file)
@@ -53,7 +53,7 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
                printk(KERN_ERR "ERROR: domain->span does not contain "
                                "CPU%d\n", cpu);
        }
-       if (!cpumask_test_cpu(cpu, sched_group_cpus(group))) {
+       if (!cpumask_test_cpu(cpu, sched_group_span(group))) {
                printk(KERN_ERR "ERROR: domain->groups does not contain"
                                " CPU%d\n", cpu);
        }
@@ -66,27 +66,27 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
                        break;
                }
 
-               if (!cpumask_weight(sched_group_cpus(group))) {
+               if (!cpumask_weight(sched_group_span(group))) {
                        printk(KERN_CONT "\n");
                        printk(KERN_ERR "ERROR: empty group\n");
                        break;
                }
 
                if (!(sd->flags & SD_OVERLAP) &&
-                   cpumask_intersects(groupmask, sched_group_cpus(group))) {
+                   cpumask_intersects(groupmask, sched_group_span(group))) {
                        printk(KERN_CONT "\n");
                        printk(KERN_ERR "ERROR: repeated CPUs\n");
                        break;
                }
 
-               cpumask_or(groupmask, groupmask, sched_group_cpus(group));
+               cpumask_or(groupmask, groupmask, sched_group_span(group));
 
                printk(KERN_CONT " %d:{ span=%*pbl",
                                group->sgc->id,
-                               cpumask_pr_args(sched_group_cpus(group)));
+                               cpumask_pr_args(sched_group_span(group)));
 
                if ((sd->flags & SD_OVERLAP) &&
-                   !cpumask_equal(group_balance_mask(group), sched_group_cpus(group))) {
+                   !cpumask_equal(group_balance_mask(group), sched_group_span(group))) {
                        printk(KERN_CONT " mask=%*pbl",
                                cpumask_pr_args(group_balance_mask(group)));
                }
@@ -96,7 +96,7 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
 
                if (group == sd->groups && sd->child &&
                    !cpumask_equal(sched_domain_span(sd->child),
-                                  sched_group_cpus(group))) {
+                                  sched_group_span(group))) {
                        printk(KERN_ERR "ERROR: domain->groups does not match domain->child\n");
                }
 
@@ -618,7 +618,7 @@ int group_balance_cpu(struct sched_group *sg)
 static void
 build_balance_mask(struct sched_domain *sd, struct sched_group *sg, struct cpumask *mask)
 {
-       const struct cpumask *sg_span = sched_group_cpus(sg);
+       const struct cpumask *sg_span = sched_group_span(sg);
        struct sd_data *sdd = sd->private;
        struct sched_domain *sibling;
        int i;
@@ -664,7 +664,7 @@ build_group_from_child_sched_domain(struct sched_domain *sd, int cpu)
        if (!sg)
                return NULL;
 
-       sg_span = sched_group_cpus(sg);
+       sg_span = sched_group_span(sg);
        if (sd->child)
                cpumask_copy(sg_span, sched_domain_span(sd->child));
        else
@@ -682,7 +682,7 @@ static void init_overlap_sched_group(struct sched_domain *sd,
        int cpu;
 
        build_balance_mask(sd, sg, mask);
-       cpu = cpumask_first_and(sched_group_cpus(sg), mask);
+       cpu = cpumask_first_and(sched_group_span(sg), mask);
 
        sg->sgc = *per_cpu_ptr(sdd->sgc, cpu);
        if (atomic_inc_return(&sg->sgc->ref) == 1)
@@ -695,7 +695,7 @@ static void init_overlap_sched_group(struct sched_domain *sd,
         * domains and no possible iteration will get us here, we won't
         * die on a /0 trap.
         */
-       sg_span = sched_group_cpus(sg);
+       sg_span = sched_group_span(sg);
        sg->sgc->capacity = SCHED_CAPACITY_SCALE * cpumask_weight(sg_span);
        sg->sgc->min_capacity = SCHED_CAPACITY_SCALE;
 }
@@ -737,7 +737,7 @@ build_overlap_sched_groups(struct sched_domain *sd, int cpu)
                if (!sg)
                        goto fail;
 
-               sg_span = sched_group_cpus(sg);
+               sg_span = sched_group_span(sg);
                cpumask_or(covered, covered, sg_span);
 
                init_overlap_sched_group(sd, sg);
@@ -848,14 +848,14 @@ static struct sched_group *get_group(int cpu, struct sd_data *sdd)
        atomic_inc(&sg->sgc->ref);
 
        if (child) {
-               cpumask_copy(sched_group_cpus(sg), sched_domain_span(child));
-               cpumask_copy(group_balance_mask(sg), sched_group_cpus(sg));
+               cpumask_copy(sched_group_span(sg), sched_domain_span(child));
+               cpumask_copy(group_balance_mask(sg), sched_group_span(sg));
        } else {
-               cpumask_set_cpu(cpu, sched_group_cpus(sg));
+               cpumask_set_cpu(cpu, sched_group_span(sg));
                cpumask_set_cpu(cpu, group_balance_mask(sg));
        }
 
-       sg->sgc->capacity = SCHED_CAPACITY_SCALE * cpumask_weight(sched_group_cpus(sg));
+       sg->sgc->capacity = SCHED_CAPACITY_SCALE * cpumask_weight(sched_group_span(sg));
        sg->sgc->min_capacity = SCHED_CAPACITY_SCALE;
 
        return sg;
@@ -890,7 +890,7 @@ build_sched_groups(struct sched_domain *sd, int cpu)
 
                sg = get_group(i, sdd);
 
-               cpumask_or(covered, covered, sched_group_cpus(sg));
+               cpumask_or(covered, covered, sched_group_span(sg));
 
                if (!first)
                        first = sg;
@@ -923,12 +923,12 @@ static void init_sched_groups_capacity(int cpu, struct sched_domain *sd)
        do {
                int cpu, max_cpu = -1;
 
-               sg->group_weight = cpumask_weight(sched_group_cpus(sg));
+               sg->group_weight = cpumask_weight(sched_group_span(sg));
 
                if (!(sd->flags & SD_ASYM_PACKING))
                        goto next;
 
-               for_each_cpu(cpu, sched_group_cpus(sg)) {
+               for_each_cpu(cpu, sched_group_span(sg)) {
                        if (max_cpu < 0)
                                max_cpu = cpu;
                        else if (sched_asym_prefer(cpu, max_cpu))