]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
sched: Pre-compute cpumask_weight(sched_domain_span(sd))
authorPeter Zijlstra <a.p.zijlstra@chello.nl>
Fri, 16 Apr 2010 12:59:29 +0000 (14:59 +0200)
committerPaul Gortmaker <paul.gortmaker@windriver.com>
Thu, 6 Jan 2011 23:07:52 +0000 (18:07 -0500)
commit 669c55e9f99b90e46eaa0f98a67ec53d46dc969a upstream.

Dave reported that his large SPARC machines spend lots of time in
hweight64(), try and optimize some of those needless cpumask_weight()
invocations (esp. with the large offstack cpumasks these are very
expensive indeed).

Reported-by: David Miller <davem@davemloft.net>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
include/linux/sched.h
kernel/sched.c
kernel/sched_fair.c

index 25f966922954a60ceff4fb96584662112eb9aef0..89e0fee6b57adfc8ccb6942150a843b0e4ba64e3 100644 (file)
@@ -954,6 +954,7 @@ struct sched_domain {
        char *name;
 #endif
 
+       unsigned int span_weight;
        /*
         * Span of all CPUs in this domain.
         *
index 074c4d8ec1d6a42d6794a8b00ba671f8e2f442f9..b741de5a8f74863e20ff6881cccf279724cba500 100644 (file)
@@ -6296,6 +6296,9 @@ cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu)
        struct rq *rq = cpu_rq(cpu);
        struct sched_domain *tmp;
 
+       for (tmp = sd; tmp; tmp = tmp->parent)
+               tmp->span_weight = cpumask_weight(sched_domain_span(tmp));
+
        /* Remove the sched domains which do not contribute to scheduling. */
        for (tmp = sd; tmp; ) {
                struct sched_domain *parent = tmp->parent;
index ae4d842c0951b38f8b07d5c20a10bb2a5c2678fd..a29df8699cfc1e71169aa0895d208311128914f6 100644 (file)
@@ -1542,9 +1542,7 @@ select_task_rq_fair(struct rq *rq, struct task_struct *p, int sd_flag, int wake_
                 * Pick the largest domain to update shares over
                 */
                tmp = sd;
-               if (affine_sd && (!tmp ||
-                                 cpumask_weight(sched_domain_span(affine_sd)) >
-                                 cpumask_weight(sched_domain_span(sd))))
+               if (affine_sd && (!tmp || affine_sd->span_weight > sd->span_weight))
                        tmp = affine_sd;
 
                if (tmp) {
@@ -1588,10 +1586,10 @@ select_task_rq_fair(struct rq *rq, struct task_struct *p, int sd_flag, int wake_
 
                /* Now try balancing at a lower domain level of new_cpu */
                cpu = new_cpu;
-               weight = cpumask_weight(sched_domain_span(sd));
+               weight = sd->span_weight;
                sd = NULL;
                for_each_domain(cpu, tmp) {
-                       if (weight <= cpumask_weight(sched_domain_span(tmp)))
+                       if (weight <= tmp->span_weight)
                                break;
                        if (tmp->flags & sd_flag)
                                sd = tmp;
@@ -2323,7 +2321,7 @@ unsigned long __weak arch_scale_freq_power(struct sched_domain *sd, int cpu)
 
 unsigned long default_scale_smt_power(struct sched_domain *sd, int cpu)
 {
-       unsigned long weight = cpumask_weight(sched_domain_span(sd));
+       unsigned long weight = sd->span_weight;
        unsigned long smt_gain = sd->smt_gain;
 
        smt_gain /= weight;
@@ -2356,7 +2354,7 @@ unsigned long scale_rt_power(int cpu)
 
 static void update_cpu_power(struct sched_domain *sd, int cpu)
 {
-       unsigned long weight = cpumask_weight(sched_domain_span(sd));
+       unsigned long weight = sd->span_weight;
        unsigned long power = SCHED_LOAD_SCALE;
        struct sched_group *sdg = sd->groups;