]> git.ipfire.org Git - people/pmueller/ipfire-2.x.git/blobdiff - src/patches/suse-2.6.27.31/patches.arch/x86_sgi_cpus4096-06-optimize-cpumask-in-sched_c.patch
Move xen patchset to new version's subdir.
[people/pmueller/ipfire-2.x.git] / src / patches / suse-2.6.27.31 / patches.arch / x86_sgi_cpus4096-06-optimize-cpumask-in-sched_c.patch
diff --git a/src/patches/suse-2.6.27.31/patches.arch/x86_sgi_cpus4096-06-optimize-cpumask-in-sched_c.patch b/src/patches/suse-2.6.27.31/patches.arch/x86_sgi_cpus4096-06-optimize-cpumask-in-sched_c.patch
new file mode 100644 (file)
index 0000000..c24f43e
--- /dev/null
@@ -0,0 +1,49 @@
+From: Mike Travis <travis@sgi.com>
+Subject: Additional cpumask fixups
+References: bnc#425240 FATE304266
+Patch-mainline: 2.6.28
+
+Signed-off-by: Thomas Renninger <trenn@suse.de>
+
+---
+ kernel/sched.c |   13 +++++++------
+ 1 file changed, 7 insertions(+), 6 deletions(-)
+
+--- a/kernel/sched.c
++++ b/kernel/sched.c
+@@ -6075,8 +6075,9 @@ static void move_task_off_dead_cpu(int d
+       do {
+               /* On same node? */
+-              mask = node_to_cpumask(cpu_to_node(dead_cpu));
+-              cpus_and(mask, mask, p->cpus_allowed);
++              node_to_cpumask_ptr(pnodemask, cpu_to_node(dead_cpu));
++
++              cpus_and(mask, *pnodemask, p->cpus_allowed);
+               dest_cpu = any_online_cpu(mask);
+               /* On any allowed CPU? */
+@@ -7086,9 +7087,9 @@ static int cpu_to_allnodes_group(int cpu
+                                struct sched_group **sg, cpumask_t *nodemask)
+ {
+       int group;
++      node_to_cpumask_ptr(pnodemask, cpu_to_node(cpu));
+-      *nodemask = node_to_cpumask(cpu_to_node(cpu));
+-      cpus_and(*nodemask, *nodemask, *cpu_map);
++      cpus_and(*nodemask, *pnodemask, *cpu_map);
+       group = first_cpu(*nodemask);
+       if (sg)
+@@ -7138,9 +7139,9 @@ static void free_sched_groups(const cpum
+               for (i = 0; i < nr_node_ids; i++) {
+                       struct sched_group *oldsg, *sg = sched_group_nodes[i];
++                      node_to_cpumask_ptr(pnodemask, i);
+-                      *nodemask = node_to_cpumask(i);
+-                      cpus_and(*nodemask, *nodemask, *cpu_map);
++                      cpus_and(*nodemask, *pnodemask, *cpu_map);
+                       if (cpus_empty(*nodemask))
+                               continue;