]>
Commit | Line | Data |
---|---|---|
2cb7cef9 BS |
1 | From: Mike Travis <travis@sgi.com> |
2 | Subject: x86 cpumask: Updates to support NR_CPUS=4096 | |
3 | References: bnc#425240 FATE304266 | |
4 | Patch-mainline: 2.6.28 | |
5 | ||
6 | Signed-off-by: Thomas Renninger <trenn@suse.de> | |
7 | ||
8 | * Add for_each_cpu_mask_and() function to eliminate need for a common use | |
9 | of a temporary cpumask_t variable. | |
10 | ||
11 | * Change genapic interfaces to accept cpumask_t pointers where possible. | |
12 | Modify external callers to use cpumask_t pointers in function calls. | |
13 | ||
14 | * Create new send_IPI_mask_allbutself which is the same as the | |
15 | send_IPI_mask functions but removes smp_processor_id() from list. | |
16 | This removes another common need for a temporary cpumask_t variable. | |
17 | ||
18 | * Use node_to_cpumask_ptr in place of node_to_cpumask to reduce stack | |
19 | requirements in sched.c. | |
20 | ||
21 | * Modify arch/x86/Kconfig to enable MAXSMP and 4096 cpus. | |
22 | ||
23 | Signed-off-by: Mike Travis <travis@sgi.com> | |
24 | Acked-by: Rusty Russell <rusty@rustcorp.com.au> | |
25 | Signed-off-by: Jiri Slaby <jslaby@suse.de> [bigsmp cpu_mask_to_apicid fix] | |
26 | Automatically created from "patches.arch/x86_sgi_cpus4096-05-update-send_IPI_mask.patch" by xen-port-patches.py | |
27 | ||
28 | Index: head-2008-11-25/arch/x86/kernel/genapic_xen_64.c | |
29 | =================================================================== | |
30 | --- head-2008-11-25.orig/arch/x86/kernel/genapic_xen_64.c 2008-11-25 14:37:42.000000000 +0100 | |
31 | +++ head-2008-11-25/arch/x86/kernel/genapic_xen_64.c 2008-11-25 13:12:11.000000000 +0100 | |
32 | @@ -34,9 +34,10 @@ static inline void __send_IPI_one(unsign | |
33 | notify_remote_via_irq(irq); | |
34 | } | |
35 | ||
36 | -static void xen_send_IPI_shortcut(unsigned int shortcut, int vector, | |
37 | - unsigned int dest) | |
38 | +static void xen_send_IPI_shortcut(unsigned int shortcut, | |
39 | + const cpumask_t *cpumask, int vector) | |
40 | { | |
41 | + unsigned long flags; | |
42 | int cpu; | |
43 | ||
44 | switch (shortcut) { | |
45 | @@ -44,20 +45,26 @@ static void xen_send_IPI_shortcut(unsign | |
46 | __send_IPI_one(smp_processor_id(), vector); | |
47 | break; | |
48 | case APIC_DEST_ALLBUT: | |
49 | + local_irq_save(flags); | |
50 | + WARN_ON(!cpus_subset(*cpumask, cpu_online_map)); | |
51 | for_each_possible_cpu(cpu) { | |
52 | if (cpu == smp_processor_id()) | |
53 | continue; | |
54 | - if (cpu_isset(cpu, cpu_online_map)) { | |
55 | + if (cpu_isset(cpu, *cpumask)) { | |
56 | __send_IPI_one(cpu, vector); | |
57 | } | |
58 | } | |
59 | + local_irq_restore(flags); | |
60 | break; | |
61 | case APIC_DEST_ALLINC: | |
62 | + local_irq_save(flags); | |
63 | + WARN_ON(!cpus_subset(*cpumask, cpu_online_map)); | |
64 | for_each_possible_cpu(cpu) { | |
65 | - if (cpu_isset(cpu, cpu_online_map)) { | |
66 | + if (cpu_isset(cpu, *cpumask)) { | |
67 | __send_IPI_one(cpu, vector); | |
68 | } | |
69 | } | |
70 | + local_irq_restore(flags); | |
71 | break; | |
72 | default: | |
73 | printk("XXXXXX __send_IPI_shortcut %08x vector %d\n", shortcut, | |
74 | @@ -66,14 +73,15 @@ static void xen_send_IPI_shortcut(unsign | |
75 | } | |
76 | } | |
77 | ||
78 | -static cpumask_t xen_target_cpus(void) | |
79 | +static const cpumask_t *xen_target_cpus(void) | |
80 | { | |
81 | - return cpu_online_map; | |
82 | + return &cpu_online_map; | |
83 | } | |
84 | ||
85 | -static cpumask_t xen_vector_allocation_domain(int cpu) | |
86 | +static void xen_vector_allocation_domain(int cpu, cpumask_t *retmask) | |
87 | { | |
88 | - return cpumask_of_cpu(cpu); | |
89 | + cpus_clear(*retmask); | |
90 | + cpu_set(cpu, *retmask); | |
91 | } | |
92 | ||
93 | /* | |
94 | @@ -84,42 +92,30 @@ static void xen_init_apic_ldr(void) | |
95 | { | |
96 | } | |
97 | ||
98 | -static void xen_send_IPI_allbutself(int vector) | |
99 | +static void xen_send_IPI_mask(const cpumask_t *cpumask, int vector) | |
100 | { | |
101 | - /* | |
102 | - * if there are no other CPUs in the system then | |
103 | - * we get an APIC send error if we try to broadcast. | |
104 | - * thus we have to avoid sending IPIs in this case. | |
105 | - */ | |
106 | - if (num_online_cpus() > 1) | |
107 | - xen_send_IPI_shortcut(APIC_DEST_ALLBUT, vector, APIC_DEST_LOGICAL); | |
108 | + xen_send_IPI_shortcut(APIC_DEST_ALLINC, cpumask, vector); | |
109 | } | |
110 | ||
111 | -static void xen_send_IPI_all(int vector) | |
112 | +static void xen_send_IPI_mask_allbutself(const cpumask_t *cpumask, | |
113 | + int vector) | |
114 | { | |
115 | - xen_send_IPI_shortcut(APIC_DEST_ALLINC, vector, APIC_DEST_LOGICAL); | |
116 | + xen_send_IPI_shortcut(APIC_DEST_ALLBUT, cpumask, vector); | |
117 | } | |
118 | ||
119 | -static void xen_send_IPI_mask(cpumask_t cpumask, int vector) | |
120 | +static void xen_send_IPI_allbutself(int vector) | |
121 | { | |
122 | - unsigned long mask = cpus_addr(cpumask)[0]; | |
123 | - unsigned int cpu; | |
124 | - unsigned long flags; | |
125 | - | |
126 | - local_irq_save(flags); | |
127 | - WARN_ON(mask & ~cpus_addr(cpu_online_map)[0]); | |
128 | + xen_send_IPI_shortcut(APIC_DEST_ALLBUT, &cpu_online_map, vector); | |
129 | +} | |
130 | ||
131 | - for_each_possible_cpu(cpu) { | |
132 | - if (cpu_isset(cpu, cpumask)) { | |
133 | - __send_IPI_one(cpu, vector); | |
134 | - } | |
135 | - } | |
136 | - local_irq_restore(flags); | |
137 | +static void xen_send_IPI_all(int vector) | |
138 | +{ | |
139 | + xen_send_IPI_shortcut(APIC_DEST_ALLINC, &cpu_online_map, vector); | |
140 | } | |
141 | ||
142 | static void xen_send_IPI_self(int vector) | |
143 | { | |
144 | - xen_send_IPI_shortcut(APIC_DEST_SELF, vector, APIC_DEST_PHYSICAL); | |
145 | + xen_send_IPI_shortcut(APIC_DEST_SELF, NULL, vector); | |
146 | } | |
147 | ||
148 | #ifdef CONFIG_XEN_PRIVILEGED_GUEST | |
149 | @@ -130,9 +126,9 @@ static int xen_apic_id_registered(void) | |
150 | } | |
151 | #endif | |
152 | ||
153 | -static unsigned int xen_cpu_mask_to_apicid(cpumask_t cpumask) | |
154 | +static unsigned int xen_cpu_mask_to_apicid(const cpumask_t *cpumask) | |
155 | { | |
156 | - return cpus_addr(cpumask)[0] & APIC_ALL_CPUS; | |
157 | + return cpus_addr(*cpumask)[0] & APIC_ALL_CPUS; | |
158 | } | |
159 | ||
160 | static unsigned int phys_pkg_id(int index_msb) | |
161 | @@ -158,6 +154,7 @@ struct genapic apic_xen = { | |
162 | .send_IPI_all = xen_send_IPI_all, | |
163 | .send_IPI_allbutself = xen_send_IPI_allbutself, | |
164 | .send_IPI_mask = xen_send_IPI_mask, | |
165 | + .send_IPI_mask_allbutself = xen_send_IPI_mask_allbutself, | |
166 | .send_IPI_self = xen_send_IPI_self, | |
167 | .cpu_mask_to_apicid = xen_cpu_mask_to_apicid, | |
168 | .phys_pkg_id = phys_pkg_id, | |
169 | Index: head-2008-11-25/arch/x86/kernel/io_apic_32-xen.c | |
170 | =================================================================== | |
171 | --- head-2008-11-25.orig/arch/x86/kernel/io_apic_32-xen.c 2008-11-25 14:37:42.000000000 +0100 | |
172 | +++ head-2008-11-25/arch/x86/kernel/io_apic_32-xen.c 2008-11-25 14:37:47.000000000 +0100 | |
173 | @@ -393,11 +393,11 @@ static void set_ioapic_affinity_irq(unsi | |
174 | ||
175 | cpus_and(tmp, cpumask, cpu_online_map); | |
176 | if (cpus_empty(tmp)) | |
177 | - tmp = TARGET_CPUS; | |
178 | + tmp = *TARGET_CPUS; | |
179 | ||
180 | cpus_and(cpumask, tmp, CPU_MASK_ALL); | |
181 | ||
182 | - apicid_value = cpu_mask_to_apicid(cpumask); | |
183 | + apicid_value = cpu_mask_to_apicid(&cpumask); | |
184 | /* Prepare to do the io_apic_write */ | |
185 | apicid_value = apicid_value << 24; | |
186 | spin_lock_irqsave(&ioapic_lock, flags); | |
187 | @@ -981,7 +981,7 @@ void __init setup_ioapic_dest(void) | |
188 | if (irq_entry == -1) | |
189 | continue; | |
190 | irq = pin_2_irq(irq_entry, ioapic, pin); | |
191 | - set_ioapic_affinity_irq(irq, TARGET_CPUS); | |
192 | + set_ioapic_affinity_irq(irq, *TARGET_CPUS); | |
193 | } | |
194 | ||
195 | } | |
196 | @@ -2602,13 +2602,13 @@ static void set_msi_irq_affinity(unsigne | |
197 | ||
198 | cpus_and(tmp, mask, cpu_online_map); | |
199 | if (cpus_empty(tmp)) | |
200 | - tmp = TARGET_CPUS; | |
201 | + tmp = *TARGET_CPUS; | |
202 | ||
203 | vector = assign_irq_vector(irq); | |
204 | if (vector < 0) | |
205 | return; | |
206 | ||
207 | - dest = cpu_mask_to_apicid(mask); | |
208 | + dest = cpu_mask_to_apicid(&mask); | |
209 | ||
210 | read_msi_msg(irq, &msg); | |
211 | ||
212 | @@ -2695,11 +2695,11 @@ static void set_ht_irq_affinity(unsigned | |
213 | ||
214 | cpus_and(tmp, mask, cpu_online_map); | |
215 | if (cpus_empty(tmp)) | |
216 | - tmp = TARGET_CPUS; | |
217 | + tmp = *TARGET_CPUS; | |
218 | ||
219 | cpus_and(mask, tmp, CPU_MASK_ALL); | |
220 | ||
221 | - dest = cpu_mask_to_apicid(mask); | |
222 | + dest = cpu_mask_to_apicid(&mask); | |
223 | ||
224 | target_ht_irq(irq, dest); | |
225 | irq_desc[irq].affinity = mask; | |
226 | @@ -2729,7 +2729,7 @@ int arch_setup_ht_irq(unsigned int irq, | |
227 | ||
228 | cpus_clear(tmp); | |
229 | cpu_set(vector >> 8, tmp); | |
230 | - dest = cpu_mask_to_apicid(tmp); | |
231 | + dest = cpu_mask_to_apicid(&tmp); | |
232 | ||
233 | msg.address_hi = HT_IRQ_HIGH_DEST_ID(dest); | |
234 | ||
235 | Index: head-2008-11-25/arch/x86/kernel/io_apic_64-xen.c | |
236 | =================================================================== | |
237 | --- head-2008-11-25.orig/arch/x86/kernel/io_apic_64-xen.c 2008-11-25 14:37:42.000000000 +0100 | |
238 | +++ head-2008-11-25/arch/x86/kernel/io_apic_64-xen.c 2008-11-25 14:29:21.000000000 +0100 | |
239 | @@ -66,7 +66,7 @@ struct irq_cfg { | |
240 | /* irq_cfg is indexed by the sum of all RTEs in all I/O APICs. */ | |
241 | static struct irq_cfg irq_cfg[NR_IRQS] __read_mostly; | |
242 | ||
243 | -static int assign_irq_vector(int irq, cpumask_t mask); | |
244 | +static int assign_irq_vector(int irq, const cpumask_t *mask); | |
245 | ||
246 | #ifndef CONFIG_XEN | |
247 | int first_system_vector = 0xfe; | |
248 | @@ -337,11 +337,11 @@ static void set_ioapic_affinity_irq(unsi | |
249 | if (cpus_empty(tmp)) | |
250 | return; | |
251 | ||
252 | - if (assign_irq_vector(irq, mask)) | |
253 | + if (assign_irq_vector(irq, &mask)) | |
254 | return; | |
255 | ||
256 | cpus_and(tmp, cfg->domain, mask); | |
257 | - dest = cpu_mask_to_apicid(tmp); | |
258 | + dest = cpu_mask_to_apicid(&tmp); | |
259 | ||
260 | /* | |
261 | * Only the high 8 bits are valid. | |
262 | @@ -749,7 +749,7 @@ void unlock_vector_lock(void) | |
263 | spin_unlock(&vector_lock); | |
264 | } | |
265 | ||
266 | -static int __assign_irq_vector(int irq, cpumask_t mask) | |
267 | +static int __assign_irq_vector(int irq, const cpumask_t *mask) | |
268 | { | |
269 | struct physdev_irq irq_op; | |
270 | struct irq_cfg *cfg; | |
271 | @@ -776,7 +776,7 @@ static int __assign_irq_vector(int irq, | |
272 | return 0; | |
273 | } | |
274 | ||
275 | -static int assign_irq_vector(int irq, cpumask_t mask) | |
276 | +static int assign_irq_vector(int irq, const cpumask_t *mask) | |
277 | { | |
278 | int err; | |
279 | unsigned long flags; | |
280 | @@ -858,8 +858,8 @@ static void setup_IO_APIC_irq(int apic, | |
281 | if (!IO_APIC_IRQ(irq)) | |
282 | return; | |
283 | ||
284 | - mask = TARGET_CPUS; | |
285 | - if (assign_irq_vector(irq, mask)) | |
286 | + mask = *TARGET_CPUS; | |
287 | + if (assign_irq_vector(irq, &mask)) | |
288 | return; | |
289 | ||
290 | #ifndef CONFIG_XEN | |
291 | @@ -879,7 +879,7 @@ static void setup_IO_APIC_irq(int apic, | |
292 | ||
293 | entry.delivery_mode = INT_DELIVERY_MODE; | |
294 | entry.dest_mode = INT_DEST_MODE; | |
295 | - entry.dest = cpu_mask_to_apicid(mask); | |
296 | + entry.dest = cpu_mask_to_apicid(&mask); | |
297 | entry.mask = 0; /* enable IRQ */ | |
298 | entry.trigger = trigger; | |
299 | entry.polarity = polarity; | |
300 | @@ -1385,7 +1385,7 @@ static int ioapic_retrigger_irq(unsigned | |
301 | unsigned long flags; | |
302 | ||
303 | spin_lock_irqsave(&vector_lock, flags); | |
304 | - send_IPI_mask(cpumask_of_cpu(first_cpu(cfg->domain)), cfg->vector); | |
305 | + send_IPI_mask(&cpumask_of_cpu(first_cpu(cfg->domain)), cfg->vector); | |
306 | spin_unlock_irqrestore(&vector_lock, flags); | |
307 | ||
308 | return 1; | |
309 | @@ -1450,7 +1450,7 @@ static void irq_complete_move(unsigned i | |
310 | ||
311 | cpus_and(cleanup_mask, cfg->old_domain, cpu_online_map); | |
312 | cfg->move_cleanup_count = cpus_weight(cleanup_mask); | |
313 | - send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR); | |
314 | + send_IPI_mask(&cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR); | |
315 | cfg->move_in_progress = 0; | |
316 | } | |
317 | } | |
318 | @@ -2009,11 +2009,11 @@ static int msi_compose_msg(struct pci_de | |
319 | unsigned dest; | |
320 | cpumask_t tmp; | |
321 | ||
322 | - tmp = TARGET_CPUS; | |
323 | - err = assign_irq_vector(irq, tmp); | |
324 | + tmp = *TARGET_CPUS; | |
325 | + err = assign_irq_vector(irq, &tmp); | |
326 | if (!err) { | |
327 | cpus_and(tmp, cfg->domain, tmp); | |
328 | - dest = cpu_mask_to_apicid(tmp); | |
329 | + dest = cpu_mask_to_apicid(&tmp); | |
330 | ||
331 | msg->address_hi = MSI_ADDR_BASE_HI; | |
332 | msg->address_lo = | |
333 | @@ -2049,11 +2049,11 @@ static void set_msi_irq_affinity(unsigne | |
334 | if (cpus_empty(tmp)) | |
335 | return; | |
336 | ||
337 | - if (assign_irq_vector(irq, mask)) | |
338 | + if (assign_irq_vector(irq, &mask)) | |
339 | return; | |
340 | ||
341 | cpus_and(tmp, cfg->domain, mask); | |
342 | - dest = cpu_mask_to_apicid(tmp); | |
343 | + dest = cpu_mask_to_apicid(&tmp); | |
344 | ||
345 | read_msi_msg(irq, &msg); | |
346 | ||
347 | @@ -2122,11 +2122,11 @@ static void dmar_msi_set_affinity(unsign | |
348 | if (cpus_empty(tmp)) | |
349 | return; | |
350 | ||
351 | - if (assign_irq_vector(irq, mask)) | |
352 | + if (assign_irq_vector(irq, &mask)) | |
353 | return; | |
354 | ||
355 | cpus_and(tmp, cfg->domain, mask); | |
356 | - dest = cpu_mask_to_apicid(tmp); | |
357 | + dest = cpu_mask_to_apicid(&tmp); | |
358 | ||
359 | dmar_msi_read(irq, &msg); | |
360 | ||
361 | @@ -2198,11 +2198,11 @@ static void set_ht_irq_affinity(unsigned | |
362 | if (cpus_empty(tmp)) | |
363 | return; | |
364 | ||
365 | - if (assign_irq_vector(irq, mask)) | |
366 | + if (assign_irq_vector(irq, &mask)) | |
367 | return; | |
368 | ||
369 | cpus_and(tmp, cfg->domain, mask); | |
370 | - dest = cpu_mask_to_apicid(tmp); | |
371 | + dest = cpu_mask_to_apicid(&tmp); | |
372 | ||
373 | target_ht_irq(irq, dest, cfg->vector); | |
374 | irq_desc[irq].affinity = mask; | |
375 | @@ -2226,14 +2226,14 @@ int arch_setup_ht_irq(unsigned int irq, | |
376 | int err; | |
377 | cpumask_t tmp; | |
378 | ||
379 | - tmp = TARGET_CPUS; | |
380 | - err = assign_irq_vector(irq, tmp); | |
381 | + tmp = *TARGET_CPUS; | |
382 | + err = assign_irq_vector(irq, &tmp); | |
383 | if (!err) { | |
384 | struct ht_irq_msg msg; | |
385 | unsigned dest; | |
386 | ||
387 | cpus_and(tmp, cfg->domain, tmp); | |
388 | - dest = cpu_mask_to_apicid(tmp); | |
389 | + dest = cpu_mask_to_apicid(&tmp); | |
390 | ||
391 | msg.address_hi = HT_IRQ_HIGH_DEST_ID(dest); | |
392 | ||
393 | @@ -2351,7 +2351,7 @@ void __init setup_ioapic_dest(void) | |
394 | irq_trigger(irq_entry), | |
395 | irq_polarity(irq_entry)); | |
396 | else | |
397 | - set_ioapic_affinity_irq(irq, TARGET_CPUS); | |
398 | + set_ioapic_affinity_irq(irq, *TARGET_CPUS); | |
399 | } | |
400 | ||
401 | } | |
402 | Index: head-2008-11-25/arch/x86/kernel/ipi-xen.c | |
403 | =================================================================== | |
404 | --- head-2008-11-25.orig/arch/x86/kernel/ipi-xen.c 2008-11-25 14:37:42.000000000 +0100 | |
405 | +++ head-2008-11-25/arch/x86/kernel/ipi-xen.c 2008-11-25 13:12:11.000000000 +0100 | |
406 | @@ -148,12 +148,11 @@ static inline void __send_IPI_dest_field | |
407 | /* | |
408 | * This is only used on smaller machines. | |
409 | */ | |
410 | -void send_IPI_mask_bitmask(cpumask_t cpumask, int vector) | |
411 | +void send_IPI_mask_bitmask(const cpumask_t *cpumask, int vector) | |
412 | { | |
413 | #ifndef CONFIG_XEN | |
414 | - unsigned long mask = cpus_addr(cpumask)[0]; | |
415 | + unsigned long mask = cpus_addr(*cpumask)[0]; | |
416 | #else | |
417 | - cpumask_t mask; | |
418 | unsigned int cpu; | |
419 | #endif | |
420 | unsigned long flags; | |
421 | @@ -163,16 +162,15 @@ void send_IPI_mask_bitmask(cpumask_t cpu | |
422 | WARN_ON(mask & ~cpus_addr(cpu_online_map)[0]); | |
423 | __send_IPI_dest_field(mask, vector); | |
424 | #else | |
425 | - cpus_andnot(mask, cpumask, cpu_online_map); | |
426 | - WARN_ON(!cpus_empty(mask)); | |
427 | + WARN_ON(!cpus_subset(*cpumask, cpu_online_map)); | |
428 | for_each_online_cpu(cpu) | |
429 | - if (cpu_isset(cpu, cpumask)) | |
430 | + if (cpu_isset(cpu, *cpumask)) | |
431 | __send_IPI_one(cpu, vector); | |
432 | #endif | |
433 | local_irq_restore(flags); | |
434 | } | |
435 | ||
436 | -void send_IPI_mask_sequence(cpumask_t mask, int vector) | |
437 | +void send_IPI_mask_sequence(const cpumask_t *mask, int vector) | |
438 | { | |
439 | #ifndef CONFIG_XEN | |
440 | unsigned long flags; | |
441 | @@ -185,15 +183,34 @@ void send_IPI_mask_sequence(cpumask_t ma | |
442 | */ | |
443 | ||
444 | local_irq_save(flags); | |
445 | - for_each_possible_cpu(query_cpu) { | |
446 | - if (cpu_isset(query_cpu, mask)) { | |
447 | + for_each_cpu_mask_and(query_cpu, *mask, cpu_online_map) | |
448 | + __send_IPI_dest_field(cpu_to_logical_apicid(query_cpu), vector); | |
449 | + local_irq_restore(flags); | |
450 | +#else | |
451 | + send_IPI_mask_bitmask(mask, vector); | |
452 | +#endif | |
453 | +} | |
454 | + | |
455 | +void send_IPI_mask_allbutself(const cpumask_t *mask, int vector) | |
456 | +{ | |
457 | +#ifndef CONFIG_XEN | |
458 | + unsigned long flags; | |
459 | + unsigned int query_cpu; | |
460 | + unsigned int this_cpu = smp_processor_id(); | |
461 | + | |
462 | + /* See Hack comment above */ | |
463 | + | |
464 | + local_irq_save(flags); | |
465 | + for_each_cpu_mask_and(query_cpu, *mask, cpu_online_map) | |
466 | + if (query_cpu != this_cpu) | |
467 | __send_IPI_dest_field(cpu_to_logical_apicid(query_cpu), | |
468 | vector); | |
469 | - } | |
470 | - } | |
471 | local_irq_restore(flags); | |
472 | #else | |
473 | - send_IPI_mask_bitmask(mask, vector); | |
474 | + cpumask_t allbut = *mask; | |
475 | + | |
476 | + cpu_clear(smp_processor_id(), allbut); | |
477 | + send_IPI_mask_bitmask(&allbut, vector); | |
478 | #endif | |
479 | } | |
480 | ||
481 | Index: head-2008-11-25/arch/x86/kernel/smp-xen.c | |
482 | =================================================================== | |
483 | --- head-2008-11-25.orig/arch/x86/kernel/smp-xen.c 2008-11-04 11:59:11.000000000 +0100 | |
484 | +++ head-2008-11-25/arch/x86/kernel/smp-xen.c 2008-11-25 13:12:11.000000000 +0100 | |
485 | @@ -118,17 +118,17 @@ void xen_smp_send_reschedule(int cpu) | |
486 | WARN_ON(1); | |
487 | return; | |
488 | } | |
489 | - send_IPI_mask(cpumask_of_cpu(cpu), RESCHEDULE_VECTOR); | |
490 | + send_IPI_mask(&cpumask_of_cpu(cpu), RESCHEDULE_VECTOR); | |
491 | } | |
492 | ||
493 | void xen_send_call_func_single_ipi(int cpu) | |
494 | { | |
495 | - send_IPI_mask(cpumask_of_cpu(cpu), CALL_FUNC_SINGLE_VECTOR); | |
496 | + send_IPI_mask(&cpumask_of_cpu(cpu), CALL_FUNC_SINGLE_VECTOR); | |
497 | } | |
498 | ||
499 | void xen_send_call_func_ipi(const cpumask_t *mask) | |
500 | { | |
501 | - send_IPI_mask(*mask, CALL_FUNCTION_VECTOR); | |
502 | + send_IPI_mask_allbutself(mask, CALL_FUNCTION_VECTOR); | |
503 | } | |
504 | ||
505 | static void stop_this_cpu(void *dummy) |