]>
Commit | Line | Data |
---|---|---|
8f69975d BS |
1 | From: Gerald Schaefer <geraldsc@de.ibm.com> |
2 | Subject: kernel: fix cpu topology support | |
3 | References: bnc#464466 | |
4 | ||
5 | Symptom: CPU topology changes aren't recognized by the scheduler. | |
6 | Problem: The common code scheduler used to have a hook which could be | |
7 | called from architecture code to trigger a rebuild of all | |
8 | scheduling domains when cpu topology changed. This hook got | |
9 | removed errorneously. So cpu topology change notifications | |
10 | got lost. | |
11 | Solution: Readd the hook. This patch also removes some unused code | |
12 | from the s390 specific cpu topology code. | |
13 | ||
14 | Acked-by: John Jolly <jjolly@suse.de> | |
15 | --- | |
16 | arch/s390/kernel/topology.c | 35 ++++++++++------------------------- | |
17 | include/linux/topology.h | 2 +- | |
18 | kernel/sched.c | 16 +++++++++++++--- | |
19 | 3 files changed, 24 insertions(+), 29 deletions(-) | |
20 | ||
21 | --- a/arch/s390/kernel/topology.c | |
22 | +++ b/arch/s390/kernel/topology.c | |
23 | @@ -14,6 +14,7 @@ | |
24 | #include <linux/workqueue.h> | |
25 | #include <linux/cpu.h> | |
26 | #include <linux/smp.h> | |
27 | +#include <linux/cpuset.h> | |
28 | #include <asm/delay.h> | |
29 | #include <asm/s390_ext.h> | |
30 | #include <asm/sysinfo.h> | |
31 | @@ -64,7 +65,6 @@ static void topology_work_fn(struct work | |
32 | static struct tl_info *tl_info; | |
33 | static struct core_info core_info; | |
34 | static int machine_has_topology; | |
35 | -static int machine_has_topology_irq; | |
36 | static struct timer_list topology_timer; | |
37 | static void set_topology_timer(void); | |
38 | static DECLARE_WORK(topology_work, topology_work_fn); | |
39 | @@ -81,7 +81,7 @@ cpumask_t cpu_coregroup_map(unsigned int | |
40 | ||
41 | cpus_clear(mask); | |
42 | if (!topology_enabled || !machine_has_topology) | |
43 | - return cpu_present_map; | |
44 | + return cpu_possible_map; | |
45 | spin_lock_irqsave(&topology_lock, flags); | |
46 | while (core) { | |
47 | if (cpu_isset(cpu, core->mask)) { | |
48 | @@ -171,7 +171,7 @@ static void topology_update_polarization | |
49 | int cpu; | |
50 | ||
51 | mutex_lock(&smp_cpu_state_mutex); | |
52 | - for_each_present_cpu(cpu) | |
53 | + for_each_possible_cpu(cpu) | |
54 | smp_cpu_polarization[cpu] = POLARIZATION_HRZ; | |
55 | mutex_unlock(&smp_cpu_state_mutex); | |
56 | } | |
57 | @@ -202,7 +202,7 @@ int topology_set_cpu_management(int fc) | |
58 | rc = ptf(PTF_HORIZONTAL); | |
59 | if (rc) | |
60 | return -EBUSY; | |
61 | - for_each_present_cpu(cpu) | |
62 | + for_each_possible_cpu(cpu) | |
63 | smp_cpu_polarization[cpu] = POLARIZATION_UNKNWN; | |
64 | return rc; | |
65 | } | |
66 | @@ -211,11 +211,11 @@ static void update_cpu_core_map(void) | |
67 | { | |
68 | int cpu; | |
69 | ||
70 | - for_each_present_cpu(cpu) | |
71 | + for_each_possible_cpu(cpu) | |
72 | cpu_core_map[cpu] = cpu_coregroup_map(cpu); | |
73 | } | |
74 | ||
75 | -void arch_update_cpu_topology(void) | |
76 | +int arch_update_cpu_topology(void) | |
77 | { | |
78 | struct tl_info *info = tl_info; | |
79 | struct sys_device *sysdev; | |
80 | @@ -224,7 +224,7 @@ void arch_update_cpu_topology(void) | |
81 | if (!machine_has_topology) { | |
82 | update_cpu_core_map(); | |
83 | topology_update_polarization_simple(); | |
84 | - return; | |
85 | + return 0; | |
86 | } | |
87 | stsi(info, 15, 1, 2); | |
88 | tl_to_cores(info); | |
89 | @@ -233,11 +233,12 @@ void arch_update_cpu_topology(void) | |
90 | sysdev = get_cpu_sysdev(cpu); | |
91 | kobject_uevent(&sysdev->kobj, KOBJ_CHANGE); | |
92 | } | |
93 | + return 1; | |
94 | } | |
95 | ||
96 | static void topology_work_fn(struct work_struct *work) | |
97 | { | |
98 | - arch_reinit_sched_domains(); | |
99 | + rebuild_sched_domains(); | |
100 | } | |
101 | ||
102 | void topology_schedule_update(void) | |
103 | @@ -260,11 +261,6 @@ static void set_topology_timer(void) | |
104 | add_timer(&topology_timer); | |
105 | } | |
106 | ||
107 | -static void topology_interrupt(__u16 code) | |
108 | -{ | |
109 | - schedule_work(&topology_work); | |
110 | -} | |
111 | - | |
112 | static int __init early_parse_topology(char *p) | |
113 | { | |
114 | if (strncmp(p, "on", 2)) | |
115 | @@ -284,14 +280,7 @@ static int __init init_topology_update(v | |
116 | goto out; | |
117 | } | |
118 | init_timer_deferrable(&topology_timer); | |
119 | - if (machine_has_topology_irq) { | |
120 | - rc = register_external_interrupt(0x2005, topology_interrupt); | |
121 | - if (rc) | |
122 | - goto out; | |
123 | - ctl_set_bit(0, 8); | |
124 | - } | |
125 | - else | |
126 | - set_topology_timer(); | |
127 | + set_topology_timer(); | |
128 | out: | |
129 | update_cpu_core_map(); | |
130 | return rc; | |
131 | @@ -312,9 +301,6 @@ void __init s390_init_cpu_topology(void) | |
132 | return; | |
133 | machine_has_topology = 1; | |
134 | ||
135 | - if (facility_bits & (1ULL << 51)) | |
136 | - machine_has_topology_irq = 1; | |
137 | - | |
138 | tl_info = alloc_bootmem_pages(PAGE_SIZE); | |
139 | info = tl_info; | |
140 | stsi(info, 15, 1, 2); | |
141 | @@ -338,5 +324,4 @@ void __init s390_init_cpu_topology(void) | |
142 | return; | |
143 | error: | |
144 | machine_has_topology = 0; | |
145 | - machine_has_topology_irq = 0; | |
146 | } | |
147 | --- a/include/linux/topology.h | |
148 | +++ b/include/linux/topology.h | |
149 | @@ -49,7 +49,7 @@ | |
150 | for_each_online_node(node) \ | |
151 | if (nr_cpus_node(node)) | |
152 | ||
153 | -void arch_update_cpu_topology(void); | |
154 | +int arch_update_cpu_topology(void); | |
155 | ||
156 | /* Conform to ACPI 2.0 SLIT distance definitions */ | |
157 | #define LOCAL_DISTANCE 10 | |
158 | --- a/kernel/sched.c | |
159 | +++ b/kernel/sched.c | |
160 | @@ -7640,8 +7640,14 @@ static struct sched_domain_attr *dattr_c | |
161 | */ | |
162 | static cpumask_t fallback_doms; | |
163 | ||
164 | -void __attribute__((weak)) arch_update_cpu_topology(void) | |
165 | +/* | |
166 | + * arch_update_cpu_topology lets virtualized architectures update the | |
167 | + * cpu core maps. It is supposed to return 1 if the topology changed | |
168 | + * or 0 if it stayed the same. | |
169 | + */ | |
170 | +int __attribute__((weak)) arch_update_cpu_topology(void) | |
171 | { | |
172 | + return 0; | |
173 | } | |
174 | ||
175 | /* | |
176 | @@ -7735,17 +7741,21 @@ void partition_sched_domains(int ndoms_n | |
177 | struct sched_domain_attr *dattr_new) | |
178 | { | |
179 | int i, j, n; | |
180 | + int top_changed; | |
181 | ||
182 | mutex_lock(&sched_domains_mutex); | |
183 | ||
184 | /* always unregister in case we don't destroy any domains */ | |
185 | unregister_sched_domain_sysctl(); | |
186 | ||
187 | + /* Let architecture update cpu core mappings. */ | |
188 | + top_changed = arch_update_cpu_topology(); | |
189 | + | |
190 | n = doms_new ? ndoms_new : 0; | |
191 | ||
192 | /* Destroy deleted domains */ | |
193 | for (i = 0; i < ndoms_cur; i++) { | |
194 | - for (j = 0; j < n; j++) { | |
195 | + for (j = 0; j < n && !top_changed; j++) { | |
196 | if (cpus_equal(doms_cur[i], doms_new[j]) | |
197 | && dattrs_equal(dattr_cur, i, dattr_new, j)) | |
198 | goto match1; | |
199 | @@ -7765,7 +7775,7 @@ match1: | |
200 | ||
201 | /* Build new domains */ | |
202 | for (i = 0; i < ndoms_new; i++) { | |
203 | - for (j = 0; j < ndoms_cur; j++) { | |
204 | + for (j = 0; j < ndoms_cur && !top_changed; j++) { | |
205 | if (cpus_equal(doms_new[i], doms_cur[j]) | |
206 | && dattrs_equal(dattr_new, i, dattr_cur, j)) | |
207 | goto match2; |