]> git.ipfire.org Git - people/teissler/ipfire-2.x.git/blobdiff - src/patches/suse-2.6.27.31/patches.arch/s390-07-03-topology-fix.diff
Move xen patchset to new version's subdir.
[people/teissler/ipfire-2.x.git] / src / patches / suse-2.6.27.31 / patches.arch / s390-07-03-topology-fix.diff
diff --git a/src/patches/suse-2.6.27.31/patches.arch/s390-07-03-topology-fix.diff b/src/patches/suse-2.6.27.31/patches.arch/s390-07-03-topology-fix.diff
new file mode 100644 (file)
index 0000000..c86c5e3
--- /dev/null
@@ -0,0 +1,207 @@
+From: Gerald Schaefer <geraldsc@de.ibm.com>
+Subject: kernel: fix cpu topology support
+References: bnc#464466
+
+Symptom:     CPU topology changes aren't recognized by the scheduler.
+Problem:     The common code scheduler used to have a hook which could be
+             called from architecture code to trigger a rebuild of all
+             scheduling domains when cpu topology changed. This hook got
+             removed errorneously. So cpu topology change notifications
+             got lost.
+Solution:    Readd the hook. This patch also removes some unused code
+             from the s390 specific cpu topology code.
+
+Acked-by: John Jolly <jjolly@suse.de>
+---
+ arch/s390/kernel/topology.c |   35 ++++++++++-------------------------
+ include/linux/topology.h    |    2 +-
+ kernel/sched.c              |   16 +++++++++++++---
+ 3 files changed, 24 insertions(+), 29 deletions(-)
+
+--- a/arch/s390/kernel/topology.c
++++ b/arch/s390/kernel/topology.c
+@@ -14,6 +14,7 @@
+ #include <linux/workqueue.h>
+ #include <linux/cpu.h>
+ #include <linux/smp.h>
++#include <linux/cpuset.h>
+ #include <asm/delay.h>
+ #include <asm/s390_ext.h>
+ #include <asm/sysinfo.h>
+@@ -64,7 +65,6 @@ static void topology_work_fn(struct work
+ static struct tl_info *tl_info;
+ static struct core_info core_info;
+ static int machine_has_topology;
+-static int machine_has_topology_irq;
+ static struct timer_list topology_timer;
+ static void set_topology_timer(void);
+ static DECLARE_WORK(topology_work, topology_work_fn);
+@@ -81,7 +81,7 @@ cpumask_t cpu_coregroup_map(unsigned int
+       cpus_clear(mask);
+       if (!topology_enabled || !machine_has_topology)
+-              return cpu_present_map;
++              return cpu_possible_map;
+       spin_lock_irqsave(&topology_lock, flags);
+       while (core) {
+               if (cpu_isset(cpu, core->mask)) {
+@@ -171,7 +171,7 @@ static void topology_update_polarization
+       int cpu;
+       mutex_lock(&smp_cpu_state_mutex);
+-      for_each_present_cpu(cpu)
++      for_each_possible_cpu(cpu)
+               smp_cpu_polarization[cpu] = POLARIZATION_HRZ;
+       mutex_unlock(&smp_cpu_state_mutex);
+ }
+@@ -202,7 +202,7 @@ int topology_set_cpu_management(int fc)
+               rc = ptf(PTF_HORIZONTAL);
+       if (rc)
+               return -EBUSY;
+-      for_each_present_cpu(cpu)
++      for_each_possible_cpu(cpu)
+               smp_cpu_polarization[cpu] = POLARIZATION_UNKNWN;
+       return rc;
+ }
+@@ -211,11 +211,11 @@ static void update_cpu_core_map(void)
+ {
+       int cpu;
+-      for_each_present_cpu(cpu)
++      for_each_possible_cpu(cpu)
+               cpu_core_map[cpu] = cpu_coregroup_map(cpu);
+ }
+-void arch_update_cpu_topology(void)
++int arch_update_cpu_topology(void)
+ {
+       struct tl_info *info = tl_info;
+       struct sys_device *sysdev;
+@@ -224,7 +224,7 @@ void arch_update_cpu_topology(void)
+       if (!machine_has_topology) {
+               update_cpu_core_map();
+               topology_update_polarization_simple();
+-              return;
++              return 0;
+       }
+       stsi(info, 15, 1, 2);
+       tl_to_cores(info);
+@@ -233,11 +233,12 @@ void arch_update_cpu_topology(void)
+               sysdev = get_cpu_sysdev(cpu);
+               kobject_uevent(&sysdev->kobj, KOBJ_CHANGE);
+       }
++      return 1;
+ }
+ static void topology_work_fn(struct work_struct *work)
+ {
+-      arch_reinit_sched_domains();
++      rebuild_sched_domains();
+ }
+ void topology_schedule_update(void)
+@@ -260,11 +261,6 @@ static void set_topology_timer(void)
+       add_timer(&topology_timer);
+ }
+-static void topology_interrupt(__u16 code)
+-{
+-      schedule_work(&topology_work);
+-}
+-
+ static int __init early_parse_topology(char *p)
+ {
+       if (strncmp(p, "on", 2))
+@@ -284,14 +280,7 @@ static int __init init_topology_update(v
+               goto out;
+       }
+       init_timer_deferrable(&topology_timer);
+-      if (machine_has_topology_irq) {
+-              rc = register_external_interrupt(0x2005, topology_interrupt);
+-              if (rc)
+-                      goto out;
+-              ctl_set_bit(0, 8);
+-      }
+-      else
+-              set_topology_timer();
++      set_topology_timer();
+ out:
+       update_cpu_core_map();
+       return rc;
+@@ -312,9 +301,6 @@ void __init s390_init_cpu_topology(void)
+               return;
+       machine_has_topology = 1;
+-      if (facility_bits & (1ULL << 51))
+-              machine_has_topology_irq = 1;
+-
+       tl_info = alloc_bootmem_pages(PAGE_SIZE);
+       info = tl_info;
+       stsi(info, 15, 1, 2);
+@@ -338,5 +324,4 @@ void __init s390_init_cpu_topology(void)
+       return;
+ error:
+       machine_has_topology = 0;
+-      machine_has_topology_irq = 0;
+ }
+--- a/include/linux/topology.h
++++ b/include/linux/topology.h
+@@ -49,7 +49,7 @@
+       for_each_online_node(node)                      \
+               if (nr_cpus_node(node))
+-void arch_update_cpu_topology(void);
++int arch_update_cpu_topology(void);
+ /* Conform to ACPI 2.0 SLIT distance definitions */
+ #define LOCAL_DISTANCE                10
+--- a/kernel/sched.c
++++ b/kernel/sched.c
+@@ -7640,8 +7640,14 @@ static struct sched_domain_attr *dattr_c
+  */
+ static cpumask_t fallback_doms;
+-void __attribute__((weak)) arch_update_cpu_topology(void)
++/*
++ * arch_update_cpu_topology lets virtualized architectures update the
++ * cpu core maps. It is supposed to return 1 if the topology changed
++ * or 0 if it stayed the same.
++ */
++int __attribute__((weak)) arch_update_cpu_topology(void)
+ {
++      return 0;
+ }
+ /*
+@@ -7735,17 +7741,21 @@ void partition_sched_domains(int ndoms_n
+                            struct sched_domain_attr *dattr_new)
+ {
+       int i, j, n;
++      int top_changed;
+       mutex_lock(&sched_domains_mutex);
+       /* always unregister in case we don't destroy any domains */
+       unregister_sched_domain_sysctl();
++      /* Let architecture update cpu core mappings. */
++      top_changed = arch_update_cpu_topology();
++
+       n = doms_new ? ndoms_new : 0;
+       /* Destroy deleted domains */
+       for (i = 0; i < ndoms_cur; i++) {
+-              for (j = 0; j < n; j++) {
++              for (j = 0; j < n && !top_changed; j++) {
+                       if (cpus_equal(doms_cur[i], doms_new[j])
+                           && dattrs_equal(dattr_cur, i, dattr_new, j))
+                               goto match1;
+@@ -7765,7 +7775,7 @@ match1:
+       /* Build new domains */
+       for (i = 0; i < ndoms_new; i++) {
+-              for (j = 0; j < ndoms_cur; j++) {
++              for (j = 0; j < ndoms_cur && !top_changed; j++) {
+                       if (cpus_equal(doms_new[i], doms_cur[j])
+                           && dattrs_equal(dattr_new, i, dattr_cur, j))
+                               goto match2;