]> git.ipfire.org Git - people/teissler/ipfire-2.x.git/blob - src/patches/suse-2.6.27.25/patches.arch/x86_sgi_cpus4096-05-update-send_IPI_mask.patch
Updated xen patches taken from suse.
[people/teissler/ipfire-2.x.git] / src / patches / suse-2.6.27.25 / patches.arch / x86_sgi_cpus4096-05-update-send_IPI_mask.patch
1 From: Mike Travis <travis@sgi.com>
2 Subject: x86 cpumask: Updates to support NR_CPUS=4096
3 References: bnc#425240 FATE304266
4 Patch-mainline: 2.6.28
5
6 Signed-off-by: Thomas Renninger <trenn@suse.de>
7
8 * Add for_each_cpu_mask_and() function to eliminate need for a common use
9 of a temporary cpumask_t variable.
10
11 * Change genapic interfaces to accept cpumask_t pointers where possible.
12 Modify external callers to use cpumask_t pointers in function calls.
13
14 * Create new send_IPI_mask_allbutself which is the same as the
15 send_IPI_mask functions but removes smp_processor_id() from list.
16 This removes another common need for a temporary cpumask_t variable.
17
18 * Use node_to_cpumask_ptr in place of node_to_cpumask to reduce stack
19 requirements in sched.c.
20
21 * Modify arch/x86/Kconfig to enable MAXSMP and 4096 cpus.
22
23 Signed-off-by: Mike Travis <travis@sgi.com>
24 Acked-by: Rusty Russell <rusty@rustcorp.com.au>
25 Signed-off-by: Jiri Slaby <jslaby@suse.de> [bigsmp cpu_mask_to_apicid fix]
26 ---
27 arch/x86/Kconfig | 11 +--
28 arch/x86/kernel/apic_32.c | 2
29 arch/x86/kernel/apic_64.c | 2
30 arch/x86/kernel/crash.c | 5 -
31 arch/x86/kernel/genapic_flat_64.c | 76 ++++++++++++++++--------
32 arch/x86/kernel/genx2apic_cluster.c | 60 +++++++++++++------
33 arch/x86/kernel/genx2apic_phys.c | 55 ++++++++++++-----
34 arch/x86/kernel/genx2apic_uv_x.c | 43 ++++++++------
35 arch/x86/kernel/io_apic_32.c | 16 ++---
36 arch/x86/kernel/io_apic_64.c | 95 +++++++++++++++----------------
37 arch/x86/kernel/ipi.c | 26 ++++++--
38 arch/x86/kernel/smp.c | 15 ----
39 arch/x86/kernel/tlb_32.c | 2
40 arch/x86/kernel/tlb_64.c | 2
41 arch/x86/xen/smp.c | 15 ++--
42 include/asm-x86/genapic_32.h | 8 +-
43 include/asm-x86/genapic_64.h | 11 ++-
44 include/asm-x86/ipi.h | 22 ++++++-
45 include/asm-x86/mach-bigsmp/mach_apic.h | 8 +-
46 include/asm-x86/mach-bigsmp/mach_ipi.h | 21 ++++--
47 include/asm-x86/mach-default/mach_apic.h | 12 +--
48 include/asm-x86/mach-default/mach_ipi.h | 18 ++---
49 include/asm-x86/mach-es7000/mach_apic.h | 8 +-
50 include/asm-x86/mach-es7000/mach_ipi.h | 20 ++++--
51 include/asm-x86/mach-generic/mach_ipi.h | 1
52 include/asm-x86/mach-numaq/mach_apic.h | 6 -
53 include/asm-x86/mach-numaq/mach_ipi.h | 22 ++++---
54 include/asm-x86/mach-summit/mach_apic.h | 6 -
55 include/asm-x86/mach-summit/mach_ipi.h | 22 ++++---
56 29 files changed, 363 insertions(+), 247 deletions(-)
57
58 --- a/arch/x86/Kconfig
59 +++ b/arch/x86/Kconfig
60 @@ -584,15 +584,15 @@ config IOMMU_HELPER
61
62 config MAXSMP
63 bool "Configure Maximum number of SMP Processors and NUMA Nodes"
64 - depends on X86_64 && SMP && BROKEN
65 + depends on X86_64 && SMP
66 default n
67 help
68 Configure maximum number of CPUS and NUMA Nodes for this architecture.
69 If unsure, say N.
70
71 config NR_CPUS
72 - int "Maximum number of CPUs (2-512)" if !MAXSMP
73 - range 2 512
74 + int "Maximum number of CPUs (2-4096)"
75 + range 2 4096
76 depends on SMP
77 default "4096" if MAXSMP
78 default "32" if X86_NUMAQ || X86_SUMMIT || X86_BIGSMP || X86_ES7000
79 @@ -603,7 +603,7 @@ config NR_CPUS
80 minimum value which makes sense is 2.
81
82 This is purely to save memory - each supported CPU adds
83 - approximately eight kilobytes to the kernel image.
84 + approximately one kilobyte to the kernel image.
85
86 config SCHED_SMT
87 bool "SMT (Hyperthreading) scheduler support"
88 @@ -1019,7 +1019,8 @@ config NUMA_EMU
89 number of nodes. This is only useful for debugging.
90
91 config NODES_SHIFT
92 - int "Maximum NUMA Nodes (as a power of 2)" if !MAXSMP
93 + int "Maximum NUMA Nodes (as a power of 2)"
94 + range 9 9 if MAXSMP
95 range 1 9 if X86_64
96 default "9" if MAXSMP
97 default "6" if X86_64
98 --- a/arch/x86/kernel/apic_32.c
99 +++ b/arch/x86/kernel/apic_32.c
100 @@ -319,7 +319,7 @@ static void lapic_timer_setup(enum clock
101 static void lapic_timer_broadcast(cpumask_t mask)
102 {
103 #ifdef CONFIG_SMP
104 - send_IPI_mask(mask, LOCAL_TIMER_VECTOR);
105 + send_IPI_mask(&mask, LOCAL_TIMER_VECTOR);
106 #endif
107 }
108
109 --- a/arch/x86/kernel/apic_64.c
110 +++ b/arch/x86/kernel/apic_64.c
111 @@ -351,7 +351,7 @@ static void lapic_timer_setup(enum clock
112 static void lapic_timer_broadcast(cpumask_t mask)
113 {
114 #ifdef CONFIG_SMP
115 - send_IPI_mask(mask, LOCAL_TIMER_VECTOR);
116 + send_IPI_mask(&mask, LOCAL_TIMER_VECTOR);
117 #endif
118 }
119
120 --- a/arch/x86/kernel/crash.c
121 +++ b/arch/x86/kernel/crash.c
122 @@ -77,10 +77,7 @@ static int crash_nmi_callback(struct not
123
124 static void smp_send_nmi_allbutself(void)
125 {
126 - cpumask_t mask = cpu_online_map;
127 - cpu_clear(safe_smp_processor_id(), mask);
128 - if (!cpus_empty(mask))
129 - send_IPI_mask(mask, NMI_VECTOR);
130 + send_IPI_allbutself(NMI_VECTOR);
131 }
132
133 static struct notifier_block crash_nmi_nb = {
134 --- a/arch/x86/kernel/genapic_flat_64.c
135 +++ b/arch/x86/kernel/genapic_flat_64.c
136 @@ -30,12 +30,12 @@ static int flat_acpi_madt_oem_check(char
137 return 1;
138 }
139
140 -static cpumask_t flat_target_cpus(void)
141 +static const cpumask_t *flat_target_cpus(void)
142 {
143 - return cpu_online_map;
144 + return &cpu_online_map;
145 }
146
147 -static cpumask_t flat_vector_allocation_domain(int cpu)
148 +static void flat_vector_allocation_domain(int cpu, cpumask_t *retmask)
149 {
150 /* Careful. Some cpus do not strictly honor the set of cpus
151 * specified in the interrupt destination when using lowest
152 @@ -45,8 +45,7 @@ static cpumask_t flat_vector_allocation_
153 * deliver interrupts to the wrong hyperthread when only one
154 * hyperthread was specified in the interrupt desitination.
155 */
156 - cpumask_t domain = { { [0] = APIC_ALL_CPUS, } };
157 - return domain;
158 + *retmask = (cpumask_t) { {[0] = APIC_ALL_CPUS, } };
159 }
160
161 /*
162 @@ -69,9 +68,8 @@ static void flat_init_apic_ldr(void)
163 apic_write(APIC_LDR, val);
164 }
165
166 -static void flat_send_IPI_mask(cpumask_t cpumask, int vector)
167 +static inline void _flat_send_IPI_mask(unsigned long mask, int vector)
168 {
169 - unsigned long mask = cpus_addr(cpumask)[0];
170 unsigned long flags;
171
172 local_irq_save(flags);
173 @@ -79,20 +77,40 @@ static void flat_send_IPI_mask(cpumask_t
174 local_irq_restore(flags);
175 }
176
177 +static void flat_send_IPI_mask(const cpumask_t *cpumask, int vector)
178 +{
179 + unsigned long mask = cpus_addr(*cpumask)[0];
180 +
181 + _flat_send_IPI_mask(mask, vector);
182 +}
183 +
184 +static void flat_send_IPI_mask_allbutself(const cpumask_t *cpumask, int vector)
185 +{
186 + unsigned long mask = cpus_addr(*cpumask)[0];
187 + int cpu = smp_processor_id();
188 +
189 + if (cpu < BITS_PER_LONG)
190 + clear_bit(cpu, &mask);
191 + _flat_send_IPI_mask(mask, vector);
192 +}
193 +
194 static void flat_send_IPI_allbutself(int vector)
195 {
196 + int cpu = smp_processor_id();
197 #ifdef CONFIG_HOTPLUG_CPU
198 int hotplug = 1;
199 #else
200 int hotplug = 0;
201 #endif
202 if (hotplug || vector == NMI_VECTOR) {
203 - cpumask_t allbutme = cpu_online_map;
204 + if (!cpus_equal(cpu_online_map, cpumask_of_cpu(cpu))) {
205 + unsigned long mask = cpus_addr(cpu_online_map)[0];
206
207 - cpu_clear(smp_processor_id(), allbutme);
208 + if (cpu < BITS_PER_LONG)
209 + clear_bit(cpu, &mask);
210
211 - if (!cpus_empty(allbutme))
212 - flat_send_IPI_mask(allbutme, vector);
213 + _flat_send_IPI_mask(mask, vector);
214 + }
215 } else if (num_online_cpus() > 1) {
216 __send_IPI_shortcut(APIC_DEST_ALLBUT, vector,APIC_DEST_LOGICAL);
217 }
218 @@ -101,7 +119,7 @@ static void flat_send_IPI_allbutself(int
219 static void flat_send_IPI_all(int vector)
220 {
221 if (vector == NMI_VECTOR)
222 - flat_send_IPI_mask(cpu_online_map, vector);
223 + flat_send_IPI_mask(&cpu_online_map, vector);
224 else
225 __send_IPI_shortcut(APIC_DEST_ALLINC, vector, APIC_DEST_LOGICAL);
226 }
227 @@ -135,9 +153,9 @@ static int flat_apic_id_registered(void)
228 return physid_isset(read_xapic_id(), phys_cpu_present_map);
229 }
230
231 -static unsigned int flat_cpu_mask_to_apicid(cpumask_t cpumask)
232 +static unsigned int flat_cpu_mask_to_apicid(const cpumask_t *cpumask)
233 {
234 - return cpus_addr(cpumask)[0] & APIC_ALL_CPUS;
235 + return cpus_addr(*cpumask)[0] & APIC_ALL_CPUS;
236 }
237
238 static unsigned int phys_pkg_id(int index_msb)
239 @@ -157,6 +175,7 @@ struct genapic apic_flat = {
240 .send_IPI_all = flat_send_IPI_all,
241 .send_IPI_allbutself = flat_send_IPI_allbutself,
242 .send_IPI_mask = flat_send_IPI_mask,
243 + .send_IPI_mask_allbutself = flat_send_IPI_mask_allbutself,
244 .send_IPI_self = apic_send_IPI_self,
245 .cpu_mask_to_apicid = flat_cpu_mask_to_apicid,
246 .phys_pkg_id = phys_pkg_id,
247 @@ -186,35 +205,39 @@ static int physflat_acpi_madt_oem_check(
248 return 0;
249 }
250
251 -static cpumask_t physflat_target_cpus(void)
252 +static const cpumask_t *physflat_target_cpus(void)
253 {
254 - return cpu_online_map;
255 + return &cpu_online_map;
256 }
257
258 -static cpumask_t physflat_vector_allocation_domain(int cpu)
259 +static void physflat_vector_allocation_domain(int cpu, cpumask_t *retmask)
260 {
261 - return cpumask_of_cpu(cpu);
262 + cpus_clear(*retmask);
263 + cpu_set(cpu, *retmask);
264 }
265
266 -static void physflat_send_IPI_mask(cpumask_t cpumask, int vector)
267 +static void physflat_send_IPI_mask(const cpumask_t *cpumask, int vector)
268 {
269 send_IPI_mask_sequence(cpumask, vector);
270 }
271
272 -static void physflat_send_IPI_allbutself(int vector)
273 +static void physflat_send_IPI_mask_allbutself(const cpumask_t *cpumask,
274 + int vector)
275 {
276 - cpumask_t allbutme = cpu_online_map;
277 + send_IPI_mask_allbutself(cpumask, vector);
278 +}
279
280 - cpu_clear(smp_processor_id(), allbutme);
281 - physflat_send_IPI_mask(allbutme, vector);
282 +static void physflat_send_IPI_allbutself(int vector)
283 +{
284 + send_IPI_mask_allbutself(&cpu_online_map, vector);
285 }
286
287 static void physflat_send_IPI_all(int vector)
288 {
289 - physflat_send_IPI_mask(cpu_online_map, vector);
290 + physflat_send_IPI_mask(&cpu_online_map, vector);
291 }
292
293 -static unsigned int physflat_cpu_mask_to_apicid(cpumask_t cpumask)
294 +static unsigned int physflat_cpu_mask_to_apicid(const cpumask_t *cpumask)
295 {
296 int cpu;
297
298 @@ -222,7 +245,7 @@ static unsigned int physflat_cpu_mask_to
299 * We're using fixed IRQ delivery, can only return one phys APIC ID.
300 * May as well be the first.
301 */
302 - cpu = first_cpu(cpumask);
303 + cpu = first_cpu(*cpumask);
304 if ((unsigned)cpu < nr_cpu_ids)
305 return per_cpu(x86_cpu_to_apicid, cpu);
306 else
307 @@ -241,6 +264,7 @@ struct genapic apic_physflat = {
308 .send_IPI_all = physflat_send_IPI_all,
309 .send_IPI_allbutself = physflat_send_IPI_allbutself,
310 .send_IPI_mask = physflat_send_IPI_mask,
311 + .send_IPI_mask_allbutself = physflat_send_IPI_mask_allbutself,
312 .send_IPI_self = apic_send_IPI_self,
313 .cpu_mask_to_apicid = physflat_cpu_mask_to_apicid,
314 .phys_pkg_id = phys_pkg_id,
315 --- a/arch/x86/kernel/genx2apic_cluster.c
316 +++ b/arch/x86/kernel/genx2apic_cluster.c
317 @@ -19,19 +19,18 @@ static int x2apic_acpi_madt_oem_check(ch
318
319 /* Start with all IRQs pointing to boot CPU. IRQ balancing will shift them. */
320
321 -static cpumask_t x2apic_target_cpus(void)
322 +static const cpumask_t *x2apic_target_cpus(void)
323 {
324 - return cpumask_of_cpu(0);
325 + return &cpumask_of_cpu(0);
326 }
327
328 /*
329 * for now each logical cpu is in its own vector allocation domain.
330 */
331 -static cpumask_t x2apic_vector_allocation_domain(int cpu)
332 +static void x2apic_vector_allocation_domain(int cpu, cpumask_t *retmask)
333 {
334 - cpumask_t domain = CPU_MASK_NONE;
335 - cpu_set(cpu, domain);
336 - return domain;
337 + cpus_clear(*retmask);
338 + cpu_set(cpu, *retmask);
339 }
340
341 static void __x2apic_send_IPI_dest(unsigned int apicid, int vector,
342 @@ -53,32 +52,52 @@ static void __x2apic_send_IPI_dest(unsig
343 * at once. We have 16 cpu's in a cluster. This will minimize IPI register
344 * writes.
345 */
346 -static void x2apic_send_IPI_mask(cpumask_t mask, int vector)
347 +static void x2apic_send_IPI_mask(const cpumask_t *mask, int vector)
348 {
349 unsigned long flags;
350 unsigned long query_cpu;
351
352 local_irq_save(flags);
353 - for_each_cpu_mask(query_cpu, mask) {
354 - __x2apic_send_IPI_dest(per_cpu(x86_cpu_to_logical_apicid, query_cpu),
355 - vector, APIC_DEST_LOGICAL);
356 - }
357 + for_each_cpu_mask_and(query_cpu, *mask, cpu_online_map)
358 + __x2apic_send_IPI_dest(
359 + per_cpu(x86_cpu_to_logical_apicid, query_cpu),
360 + vector, APIC_DEST_LOGICAL);
361 local_irq_restore(flags);
362 }
363
364 -static void x2apic_send_IPI_allbutself(int vector)
365 +static void x2apic_send_IPI_mask_allbutself(const cpumask_t *mask, int vector)
366 {
367 - cpumask_t mask = cpu_online_map;
368 + unsigned long flags;
369 + unsigned long query_cpu;
370 + unsigned long this_cpu = smp_processor_id();
371
372 - cpu_clear(smp_processor_id(), mask);
373 + local_irq_save(flags);
374 + for_each_cpu_mask_and(query_cpu, *mask, cpu_online_map)
375 + if (query_cpu != this_cpu)
376 + __x2apic_send_IPI_dest(
377 + per_cpu(x86_cpu_to_logical_apicid, query_cpu),
378 + vector, APIC_DEST_LOGICAL);
379 + local_irq_restore(flags);
380 +}
381
382 - if (!cpus_empty(mask))
383 - x2apic_send_IPI_mask(mask, vector);
384 +static void x2apic_send_IPI_allbutself(int vector)
385 +{
386 + unsigned long flags;
387 + unsigned long query_cpu;
388 + unsigned long this_cpu = smp_processor_id();
389 +
390 + local_irq_save(flags);
391 + for_each_online_cpu(query_cpu)
392 + if (query_cpu != this_cpu)
393 + __x2apic_send_IPI_dest(
394 + per_cpu(x86_cpu_to_logical_apicid, query_cpu),
395 + vector, APIC_DEST_LOGICAL);
396 + local_irq_restore(flags);
397 }
398
399 static void x2apic_send_IPI_all(int vector)
400 {
401 - x2apic_send_IPI_mask(cpu_online_map, vector);
402 + x2apic_send_IPI_mask(&cpu_online_map, vector);
403 }
404
405 static int x2apic_apic_id_registered(void)
406 @@ -86,7 +105,7 @@ static int x2apic_apic_id_registered(voi
407 return 1;
408 }
409
410 -static unsigned int x2apic_cpu_mask_to_apicid(cpumask_t cpumask)
411 +static unsigned int x2apic_cpu_mask_to_apicid(const cpumask_t *cpumask)
412 {
413 int cpu;
414
415 @@ -94,8 +113,8 @@ static unsigned int x2apic_cpu_mask_to_a
416 * We're using fixed IRQ delivery, can only return one phys APIC ID.
417 * May as well be the first.
418 */
419 - cpu = first_cpu(cpumask);
420 - if ((unsigned)cpu < NR_CPUS)
421 + cpu = first_cpu(*cpumask);
422 + if ((unsigned)cpu < nr_cpu_ids)
423 return per_cpu(x86_cpu_to_logical_apicid, cpu);
424 else
425 return BAD_APICID;
426 @@ -147,6 +166,7 @@ struct genapic apic_x2apic_cluster = {
427 .send_IPI_all = x2apic_send_IPI_all,
428 .send_IPI_allbutself = x2apic_send_IPI_allbutself,
429 .send_IPI_mask = x2apic_send_IPI_mask,
430 + .send_IPI_mask_allbutself = x2apic_send_IPI_mask_allbutself,
431 .send_IPI_self = x2apic_send_IPI_self,
432 .cpu_mask_to_apicid = x2apic_cpu_mask_to_apicid,
433 .phys_pkg_id = phys_pkg_id,
434 --- a/arch/x86/kernel/genx2apic_phys.c
435 +++ b/arch/x86/kernel/genx2apic_phys.c
436 @@ -31,16 +31,15 @@ static int x2apic_acpi_madt_oem_check(ch
437
438 /* Start with all IRQs pointing to boot CPU. IRQ balancing will shift them. */
439
440 -static cpumask_t x2apic_target_cpus(void)
441 +static const cpumask_t *x2apic_target_cpus(void)
442 {
443 - return cpumask_of_cpu(0);
444 + return &cpumask_of_cpu(0);
445 }
446
447 -static cpumask_t x2apic_vector_allocation_domain(int cpu)
448 +static void x2apic_vector_allocation_domain(int cpu, cpumask_t *retmask)
449 {
450 - cpumask_t domain = CPU_MASK_NONE;
451 - cpu_set(cpu, domain);
452 - return domain;
453 + cpus_clear(*retmask);
454 + cpu_set(cpu, *retmask);
455 }
456
457 static void __x2apic_send_IPI_dest(unsigned int apicid, int vector,
458 @@ -56,32 +55,53 @@ static void __x2apic_send_IPI_dest(unsig
459 x2apic_icr_write(cfg, apicid);
460 }
461
462 -static void x2apic_send_IPI_mask(cpumask_t mask, int vector)
463 +static void x2apic_send_IPI_mask(const cpumask_t *mask, int vector)
464 {
465 unsigned long flags;
466 unsigned long query_cpu;
467
468 local_irq_save(flags);
469 - for_each_cpu_mask(query_cpu, mask) {
470 + for_each_cpu_mask_and(query_cpu, *mask, cpu_online_map) {
471 __x2apic_send_IPI_dest(per_cpu(x86_cpu_to_apicid, query_cpu),
472 vector, APIC_DEST_PHYSICAL);
473 }
474 local_irq_restore(flags);
475 }
476
477 -static void x2apic_send_IPI_allbutself(int vector)
478 +static void x2apic_send_IPI_mask_allbutself(const cpumask_t *mask, int vector)
479 {
480 - cpumask_t mask = cpu_online_map;
481 + unsigned long flags;
482 + unsigned long query_cpu;
483 + unsigned long this_cpu = smp_processor_id();
484 +
485 + local_irq_save(flags);
486 + for_each_cpu_mask_and(query_cpu, *mask, cpu_online_map) {
487 + if (query_cpu != this_cpu)
488 + __x2apic_send_IPI_dest(
489 + per_cpu(x86_cpu_to_apicid, query_cpu),
490 + vector, APIC_DEST_PHYSICAL);
491 + }
492 + local_irq_restore(flags);
493 +}
494
495 - cpu_clear(smp_processor_id(), mask);
496 +static void x2apic_send_IPI_allbutself(int vector)
497 +{
498 + unsigned long flags;
499 + unsigned long query_cpu;
500 + unsigned long this_cpu = smp_processor_id();
501
502 - if (!cpus_empty(mask))
503 - x2apic_send_IPI_mask(mask, vector);
504 + local_irq_save(flags);
505 + for_each_online_cpu(query_cpu)
506 + if (query_cpu != this_cpu)
507 + __x2apic_send_IPI_dest(
508 + per_cpu(x86_cpu_to_apicid, query_cpu),
509 + vector, APIC_DEST_PHYSICAL);
510 + local_irq_restore(flags);
511 }
512
513 static void x2apic_send_IPI_all(int vector)
514 {
515 - x2apic_send_IPI_mask(cpu_online_map, vector);
516 + x2apic_send_IPI_mask(&cpu_online_map, vector);
517 }
518
519 static int x2apic_apic_id_registered(void)
520 @@ -89,7 +109,7 @@ static int x2apic_apic_id_registered(voi
521 return 1;
522 }
523
524 -static unsigned int x2apic_cpu_mask_to_apicid(cpumask_t cpumask)
525 +static unsigned int x2apic_cpu_mask_to_apicid(const cpumask_t *cpumask)
526 {
527 int cpu;
528
529 @@ -97,8 +117,8 @@ static unsigned int x2apic_cpu_mask_to_a
530 * We're using fixed IRQ delivery, can only return one phys APIC ID.
531 * May as well be the first.
532 */
533 - cpu = first_cpu(cpumask);
534 - if ((unsigned)cpu < NR_CPUS)
535 + cpu = first_cpu(*cpumask);
536 + if ((unsigned)cpu < nr_cpu_ids)
537 return per_cpu(x86_cpu_to_apicid, cpu);
538 else
539 return BAD_APICID;
540 @@ -147,6 +167,7 @@ struct genapic apic_x2apic_phys = {
541 .send_IPI_all = x2apic_send_IPI_all,
542 .send_IPI_allbutself = x2apic_send_IPI_allbutself,
543 .send_IPI_mask = x2apic_send_IPI_mask,
544 + .send_IPI_mask_allbutself = x2apic_send_IPI_mask_allbutself,
545 .send_IPI_self = x2apic_send_IPI_self,
546 .cpu_mask_to_apicid = x2apic_cpu_mask_to_apicid,
547 .phys_pkg_id = phys_pkg_id,
548 --- a/arch/x86/kernel/genx2apic_uv_x.c
549 +++ b/arch/x86/kernel/genx2apic_uv_x.c
550 @@ -75,16 +75,15 @@ EXPORT_SYMBOL(sn_rtc_cycles_per_second);
551
552 /* Start with all IRQs pointing to boot CPU. IRQ balancing will shift them. */
553
554 -static cpumask_t uv_target_cpus(void)
555 +static const cpumask_t *uv_target_cpus(void)
556 {
557 - return cpumask_of_cpu(0);
558 + return &cpumask_of_cpu(0);
559 }
560
561 -static cpumask_t uv_vector_allocation_domain(int cpu)
562 +static void uv_vector_allocation_domain(int cpu, cpumask_t *retmask)
563 {
564 - cpumask_t domain = CPU_MASK_NONE;
565 - cpu_set(cpu, domain);
566 - return domain;
567 + cpus_clear(*retmask);
568 + cpu_set(cpu, *retmask);
569 }
570
571 int uv_wakeup_secondary(int phys_apicid, unsigned int start_rip)
572 @@ -123,28 +122,37 @@ static void uv_send_IPI_one(int cpu, int
573 uv_write_global_mmr64(pnode, UVH_IPI_INT, val);
574 }
575
576 -static void uv_send_IPI_mask(cpumask_t mask, int vector)
577 +static void uv_send_IPI_mask(const cpumask_t *mask, int vector)
578 {
579 unsigned int cpu;
580
581 - for_each_possible_cpu(cpu)
582 - if (cpu_isset(cpu, mask))
583 + for_each_cpu_mask_and(cpu, *mask, cpu_online_map)
584 + uv_send_IPI_one(cpu, vector);
585 +}
586 +
587 +static void uv_send_IPI_mask_allbutself(const cpumask_t *mask, int vector)
588 +{
589 + unsigned int cpu;
590 + unsigned int this_cpu = smp_processor_id();
591 +
592 + for_each_cpu_mask_and(cpu, *mask, cpu_online_map)
593 + if (cpu != this_cpu)
594 uv_send_IPI_one(cpu, vector);
595 }
596
597 static void uv_send_IPI_allbutself(int vector)
598 {
599 - cpumask_t mask = cpu_online_map;
600 -
601 - cpu_clear(smp_processor_id(), mask);
602 + unsigned int cpu;
603 + unsigned int this_cpu = smp_processor_id();
604
605 - if (!cpus_empty(mask))
606 - uv_send_IPI_mask(mask, vector);
607 + for_each_online_cpu(cpu)
608 + if (cpu != this_cpu)
609 + uv_send_IPI_one(cpu, vector);
610 }
611
612 static void uv_send_IPI_all(int vector)
613 {
614 - uv_send_IPI_mask(cpu_online_map, vector);
615 + uv_send_IPI_mask(&cpu_online_map, vector);
616 }
617
618 static int uv_apic_id_registered(void)
619 @@ -156,7 +164,7 @@ static void uv_init_apic_ldr(void)
620 {
621 }
622
623 -static unsigned int uv_cpu_mask_to_apicid(cpumask_t cpumask)
624 +static unsigned int uv_cpu_mask_to_apicid(const cpumask_t *cpumask)
625 {
626 int cpu;
627
628 @@ -164,7 +172,7 @@ static unsigned int uv_cpu_mask_to_apici
629 * We're using fixed IRQ delivery, can only return one phys APIC ID.
630 * May as well be the first.
631 */
632 - cpu = first_cpu(cpumask);
633 + cpu = first_cpu(*cpumask);
634 if ((unsigned)cpu < nr_cpu_ids)
635 return per_cpu(x86_cpu_to_apicid, cpu);
636 else
637 @@ -219,6 +227,7 @@ struct genapic apic_x2apic_uv_x = {
638 .init_apic_ldr = uv_init_apic_ldr,
639 .send_IPI_all = uv_send_IPI_all,
640 .send_IPI_allbutself = uv_send_IPI_allbutself,
641 + .send_IPI_mask_allbutself = uv_send_IPI_mask_allbutself,
642 .send_IPI_mask = uv_send_IPI_mask,
643 /* ZZZ.send_IPI_self = uv_send_IPI_self, */
644 .cpu_mask_to_apicid = uv_cpu_mask_to_apicid,
645 --- a/arch/x86/kernel/io_apic_32.c
646 +++ b/arch/x86/kernel/io_apic_32.c
647 @@ -344,11 +344,11 @@ static void set_ioapic_affinity_irq(unsi
648
649 cpus_and(tmp, cpumask, cpu_online_map);
650 if (cpus_empty(tmp))
651 - tmp = TARGET_CPUS;
652 + tmp = *TARGET_CPUS;
653
654 cpus_and(cpumask, tmp, CPU_MASK_ALL);
655
656 - apicid_value = cpu_mask_to_apicid(cpumask);
657 + apicid_value = cpu_mask_to_apicid(&cpumask);
658 /* Prepare to do the io_apic_write */
659 apicid_value = apicid_value << 24;
660 spin_lock_irqsave(&ioapic_lock, flags);
661 @@ -926,7 +926,7 @@ void __init setup_ioapic_dest(void)
662 if (irq_entry == -1)
663 continue;
664 irq = pin_2_irq(irq_entry, ioapic, pin);
665 - set_ioapic_affinity_irq(irq, TARGET_CPUS);
666 + set_ioapic_affinity_irq(irq, *TARGET_CPUS);
667 }
668
669 }
670 @@ -2522,13 +2522,13 @@ static void set_msi_irq_affinity(unsigne
671
672 cpus_and(tmp, mask, cpu_online_map);
673 if (cpus_empty(tmp))
674 - tmp = TARGET_CPUS;
675 + tmp = *TARGET_CPUS;
676
677 vector = assign_irq_vector(irq);
678 if (vector < 0)
679 return;
680
681 - dest = cpu_mask_to_apicid(mask);
682 + dest = cpu_mask_to_apicid(&mask);
683
684 read_msi_msg(irq, &msg);
685
686 @@ -2615,11 +2615,11 @@ static void set_ht_irq_affinity(unsigned
687
688 cpus_and(tmp, mask, cpu_online_map);
689 if (cpus_empty(tmp))
690 - tmp = TARGET_CPUS;
691 + tmp = *TARGET_CPUS;
692
693 cpus_and(mask, tmp, CPU_MASK_ALL);
694
695 - dest = cpu_mask_to_apicid(mask);
696 + dest = cpu_mask_to_apicid(&mask);
697
698 target_ht_irq(irq, dest);
699 irq_desc[irq].affinity = mask;
700 @@ -2649,7 +2649,7 @@ int arch_setup_ht_irq(unsigned int irq,
701
702 cpus_clear(tmp);
703 cpu_set(vector >> 8, tmp);
704 - dest = cpu_mask_to_apicid(tmp);
705 + dest = cpu_mask_to_apicid(&tmp);
706
707 msg.address_hi = HT_IRQ_HIGH_DEST_ID(dest);
708
709 --- a/arch/x86/kernel/io_apic_64.c
710 +++ b/arch/x86/kernel/io_apic_64.c
711 @@ -83,7 +83,7 @@ static struct irq_cfg irq_cfg[NR_IRQS] _
712 [15] = { .domain = CPU_MASK_ALL, .vector = IRQ15_VECTOR, },
713 };
714
715 -static int assign_irq_vector(int irq, cpumask_t mask);
716 +static int assign_irq_vector(int irq, const cpumask_t *mask);
717
718 int first_system_vector = 0xfe;
719
720 @@ -335,11 +335,11 @@ static void set_ioapic_affinity_irq(unsi
721 if (cpus_empty(tmp))
722 return;
723
724 - if (assign_irq_vector(irq, mask))
725 + if (assign_irq_vector(irq, &mask))
726 return;
727
728 cpus_and(tmp, cfg->domain, mask);
729 - dest = cpu_mask_to_apicid(tmp);
730 + dest = cpu_mask_to_apicid(&tmp);
731
732 /*
733 * Only the high 8 bits are valid.
734 @@ -798,7 +798,7 @@ void unlock_vector_lock(void)
735 spin_unlock(&vector_lock);
736 }
737
738 -static int __assign_irq_vector(int irq, cpumask_t mask)
739 +static int __assign_irq_vector(int irq, const cpumask_t *mask)
740 {
741 /*
742 * NOTE! The local APIC isn't very good at handling
743 @@ -815,31 +815,28 @@ static int __assign_irq_vector(int irq,
744 unsigned int old_vector;
745 int cpu;
746 struct irq_cfg *cfg;
747 + cpumask_t tmp_mask;
748
749 BUG_ON((unsigned)irq >= NR_IRQS);
750 cfg = &irq_cfg[irq];
751
752 - /* Only try and allocate irqs on cpus that are present */
753 - cpus_and(mask, mask, cpu_online_map);
754 -
755 if ((cfg->move_in_progress) || cfg->move_cleanup_count)
756 return -EBUSY;
757
758 old_vector = cfg->vector;
759 if (old_vector) {
760 - cpumask_t tmp;
761 - cpus_and(tmp, cfg->domain, mask);
762 - if (!cpus_empty(tmp))
763 + cpus_and(tmp_mask, *mask, cpu_online_map);
764 + cpus_and(tmp_mask, cfg->domain, tmp_mask);
765 + if (!cpus_empty(tmp_mask))
766 return 0;
767 }
768
769 - for_each_cpu_mask_nr(cpu, mask) {
770 - cpumask_t domain, new_mask;
771 + /* Only try and allocate irqs on cpus that are present */
772 + for_each_cpu_mask_and(cpu, *mask, cpu_online_map) {
773 int new_cpu;
774 int vector, offset;
775
776 - domain = vector_allocation_domain(cpu);
777 - cpus_and(new_mask, domain, cpu_online_map);
778 + vector_allocation_domain(cpu, &tmp_mask);
779
780 vector = current_vector;
781 offset = current_offset;
782 @@ -854,7 +851,7 @@ next:
783 continue;
784 if (vector == IA32_SYSCALL_VECTOR)
785 goto next;
786 - for_each_cpu_mask_nr(new_cpu, new_mask)
787 + for_each_cpu_mask_and(new_cpu, tmp_mask, cpu_online_map)
788 if (per_cpu(vector_irq, new_cpu)[vector] != -1)
789 goto next;
790 /* Found one! */
791 @@ -864,16 +861,16 @@ next:
792 cfg->move_in_progress = 1;
793 cfg->old_domain = cfg->domain;
794 }
795 - for_each_cpu_mask_nr(new_cpu, new_mask)
796 + for_each_cpu_mask_and(new_cpu, tmp_mask, cpu_online_map)
797 per_cpu(vector_irq, new_cpu)[vector] = irq;
798 cfg->vector = vector;
799 - cfg->domain = domain;
800 + cfg->domain = tmp_mask;
801 return 0;
802 }
803 return -ENOSPC;
804 }
805
806 -static int assign_irq_vector(int irq, cpumask_t mask)
807 +static int assign_irq_vector(int irq, const cpumask_t *mask)
808 {
809 int err;
810 unsigned long flags;
811 @@ -1031,8 +1028,8 @@ static void setup_IO_APIC_irq(int apic,
812 if (!IO_APIC_IRQ(irq))
813 return;
814
815 - mask = TARGET_CPUS;
816 - if (assign_irq_vector(irq, mask))
817 + mask = *TARGET_CPUS;
818 + if (assign_irq_vector(irq, &mask))
819 return;
820
821 cpus_and(mask, cfg->domain, mask);
822 @@ -1045,7 +1042,7 @@ static void setup_IO_APIC_irq(int apic,
823
824
825 if (setup_ioapic_entry(mp_ioapics[apic].mp_apicid, irq, &entry,
826 - cpu_mask_to_apicid(mask), trigger, polarity,
827 + cpu_mask_to_apicid(&mask), trigger, polarity,
828 cfg->vector)) {
829 printk("Failed to setup ioapic entry for ioapic %d, pin %d\n",
830 mp_ioapics[apic].mp_apicid, pin);
831 @@ -1543,7 +1540,7 @@ static int ioapic_retrigger_irq(unsigned
832 unsigned long flags;
833
834 spin_lock_irqsave(&vector_lock, flags);
835 - send_IPI_mask(cpumask_of_cpu(first_cpu(cfg->domain)), cfg->vector);
836 + send_IPI_mask(&cpumask_of_cpu(first_cpu(cfg->domain)), cfg->vector);
837 spin_unlock_irqrestore(&vector_lock, flags);
838
839 return 1;
840 @@ -1588,7 +1585,7 @@ static void migrate_ioapic_irq(int irq,
841 {
842 struct irq_cfg *cfg = irq_cfg + irq;
843 struct irq_desc *desc = irq_desc + irq;
844 - cpumask_t tmp, cleanup_mask;
845 + cpumask_t tmp;
846 struct irte irte;
847 int modify_ioapic_rte = desc->status & IRQ_LEVEL;
848 unsigned int dest;
849 @@ -1601,11 +1598,11 @@ static void migrate_ioapic_irq(int irq,
850 if (get_irte(irq, &irte))
851 return;
852
853 - if (assign_irq_vector(irq, mask))
854 + if (assign_irq_vector(irq, &mask))
855 return;
856
857 cpus_and(tmp, cfg->domain, mask);
858 - dest = cpu_mask_to_apicid(tmp);
859 + dest = cpu_mask_to_apicid(&tmp);
860
861 if (modify_ioapic_rte) {
862 spin_lock_irqsave(&ioapic_lock, flags);
863 @@ -1622,9 +1619,9 @@ static void migrate_ioapic_irq(int irq,
864 modify_irte(irq, &irte);
865
866 if (cfg->move_in_progress) {
867 - cpus_and(cleanup_mask, cfg->old_domain, cpu_online_map);
868 - cfg->move_cleanup_count = cpus_weight(cleanup_mask);
869 - send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR);
870 + cpus_and(tmp, cfg->old_domain, cpu_online_map);
871 + cfg->move_cleanup_count = cpus_weight(tmp);
872 + send_IPI_mask(&tmp, IRQ_MOVE_CLEANUP_VECTOR);
873 cfg->move_in_progress = 0;
874 }
875
876 @@ -1749,7 +1746,7 @@ static void irq_complete_move(unsigned i
877
878 cpus_and(cleanup_mask, cfg->old_domain, cpu_online_map);
879 cfg->move_cleanup_count = cpus_weight(cleanup_mask);
880 - send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR);
881 + send_IPI_mask(&cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR);
882 cfg->move_in_progress = 0;
883 }
884 }
885 @@ -2329,13 +2326,13 @@ static int msi_compose_msg(struct pci_de
886 unsigned dest;
887 cpumask_t tmp;
888
889 - tmp = TARGET_CPUS;
890 - err = assign_irq_vector(irq, tmp);
891 + tmp = *TARGET_CPUS;
892 + err = assign_irq_vector(irq, &tmp);
893 if (err)
894 return err;
895
896 cpus_and(tmp, cfg->domain, tmp);
897 - dest = cpu_mask_to_apicid(tmp);
898 + dest = cpu_mask_to_apicid(&tmp);
899
900 #ifdef CONFIG_INTR_REMAP
901 if (irq_remapped(irq)) {
902 @@ -2400,11 +2397,11 @@ static void set_msi_irq_affinity(unsigne
903 if (cpus_empty(tmp))
904 return;
905
906 - if (assign_irq_vector(irq, mask))
907 + if (assign_irq_vector(irq, &mask))
908 return;
909
910 cpus_and(tmp, cfg->domain, mask);
911 - dest = cpu_mask_to_apicid(tmp);
912 + dest = cpu_mask_to_apicid(&tmp);
913
914 read_msi_msg(irq, &msg);
915
916 @@ -2426,7 +2423,7 @@ static void ir_set_msi_irq_affinity(unsi
917 {
918 struct irq_cfg *cfg = irq_cfg + irq;
919 unsigned int dest;
920 - cpumask_t tmp, cleanup_mask;
921 + cpumask_t tmp;
922 struct irte irte;
923
924 cpus_and(tmp, mask, cpu_online_map);
925 @@ -2436,11 +2433,11 @@ static void ir_set_msi_irq_affinity(unsi
926 if (get_irte(irq, &irte))
927 return;
928
929 - if (assign_irq_vector(irq, mask))
930 + if (assign_irq_vector(irq, &mask))
931 return;
932
933 cpus_and(tmp, cfg->domain, mask);
934 - dest = cpu_mask_to_apicid(tmp);
935 + dest = cpu_mask_to_apicid(&tmp);
936
937 irte.vector = cfg->vector;
938 irte.dest_id = IRTE_DEST(dest);
939 @@ -2456,9 +2453,9 @@ static void ir_set_msi_irq_affinity(unsi
940 * vector allocation.
941 */
942 if (cfg->move_in_progress) {
943 - cpus_and(cleanup_mask, cfg->old_domain, cpu_online_map);
944 - cfg->move_cleanup_count = cpus_weight(cleanup_mask);
945 - send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR);
946 + cpus_and(tmp, cfg->old_domain, cpu_online_map);
947 + cfg->move_cleanup_count = cpus_weight(tmp);
948 + send_IPI_mask(&tmp, IRQ_MOVE_CLEANUP_VECTOR);
949 cfg->move_in_progress = 0;
950 }
951
952 @@ -2653,11 +2650,11 @@ static void dmar_msi_set_affinity(unsign
953 if (cpus_empty(tmp))
954 return;
955
956 - if (assign_irq_vector(irq, mask))
957 + if (assign_irq_vector(irq, &mask))
958 return;
959
960 cpus_and(tmp, cfg->domain, mask);
961 - dest = cpu_mask_to_apicid(tmp);
962 + dest = cpu_mask_to_apicid(&tmp);
963
964 dmar_msi_read(irq, &msg);
965
966 @@ -2729,11 +2726,11 @@ static void set_ht_irq_affinity(unsigned
967 if (cpus_empty(tmp))
968 return;
969
970 - if (assign_irq_vector(irq, mask))
971 + if (assign_irq_vector(irq, &mask))
972 return;
973
974 cpus_and(tmp, cfg->domain, mask);
975 - dest = cpu_mask_to_apicid(tmp);
976 + dest = cpu_mask_to_apicid(&tmp);
977
978 target_ht_irq(irq, dest, cfg->vector);
979 irq_desc[irq].affinity = mask;
980 @@ -2757,14 +2754,14 @@ int arch_setup_ht_irq(unsigned int irq,
981 int err;
982 cpumask_t tmp;
983
984 - tmp = TARGET_CPUS;
985 - err = assign_irq_vector(irq, tmp);
986 + tmp = *TARGET_CPUS;
987 + err = assign_irq_vector(irq, &tmp);
988 if (!err) {
989 struct ht_irq_msg msg;
990 unsigned dest;
991
992 cpus_and(tmp, cfg->domain, tmp);
993 - dest = cpu_mask_to_apicid(tmp);
994 + dest = cpu_mask_to_apicid(&tmp);
995
996 msg.address_hi = HT_IRQ_HIGH_DEST_ID(dest);
997
998 @@ -2882,10 +2879,10 @@ void __init setup_ioapic_dest(void)
999 irq_polarity(irq_entry));
1000 #ifdef CONFIG_INTR_REMAP
1001 else if (intr_remapping_enabled)
1002 - set_ir_ioapic_affinity_irq(irq, TARGET_CPUS);
1003 + set_ir_ioapic_affinity_irq(irq, *TARGET_CPUS);
1004 #endif
1005 else
1006 - set_ioapic_affinity_irq(irq, TARGET_CPUS);
1007 + set_ioapic_affinity_irq(irq, *TARGET_CPUS);
1008 }
1009
1010 }
1011 --- a/arch/x86/kernel/ipi.c
1012 +++ b/arch/x86/kernel/ipi.c
1013 @@ -114,9 +114,9 @@ static inline void __send_IPI_dest_field
1014 /*
1015 * This is only used on smaller machines.
1016 */
1017 -void send_IPI_mask_bitmask(cpumask_t cpumask, int vector)
1018 +void send_IPI_mask_bitmask(const cpumask_t *cpumask, int vector)
1019 {
1020 - unsigned long mask = cpus_addr(cpumask)[0];
1021 + unsigned long mask = cpus_addr(*cpumask)[0];
1022 unsigned long flags;
1023
1024 local_irq_save(flags);
1025 @@ -125,7 +125,7 @@ void send_IPI_mask_bitmask(cpumask_t cpu
1026 local_irq_restore(flags);
1027 }
1028
1029 -void send_IPI_mask_sequence(cpumask_t mask, int vector)
1030 +void send_IPI_mask_sequence(const cpumask_t *mask, int vector)
1031 {
1032 unsigned long flags;
1033 unsigned int query_cpu;
1034 @@ -137,12 +137,24 @@ void send_IPI_mask_sequence(cpumask_t ma
1035 */
1036
1037 local_irq_save(flags);
1038 - for_each_possible_cpu(query_cpu) {
1039 - if (cpu_isset(query_cpu, mask)) {
1040 + for_each_cpu_mask_and(query_cpu, *mask, cpu_online_map)
1041 + __send_IPI_dest_field(cpu_to_logical_apicid(query_cpu), vector);
1042 + local_irq_restore(flags);
1043 +}
1044 +
1045 +void send_IPI_mask_allbutself(const cpumask_t *mask, int vector)
1046 +{
1047 + unsigned long flags;
1048 + unsigned int query_cpu;
1049 + unsigned int this_cpu = smp_processor_id();
1050 +
1051 + /* See Hack comment above */
1052 +
1053 + local_irq_save(flags);
1054 + for_each_cpu_mask_and(query_cpu, *mask, cpu_online_map)
1055 + if (query_cpu != this_cpu)
1056 __send_IPI_dest_field(cpu_to_logical_apicid(query_cpu),
1057 vector);
1058 - }
1059 - }
1060 local_irq_restore(flags);
1061 }
1062
1063 --- a/arch/x86/kernel/smp.c
1064 +++ b/arch/x86/kernel/smp.c
1065 @@ -118,26 +118,17 @@ static void native_smp_send_reschedule(i
1066 WARN_ON(1);
1067 return;
1068 }
1069 - send_IPI_mask(cpumask_of_cpu(cpu), RESCHEDULE_VECTOR);
1070 + send_IPI_mask(&cpumask_of_cpu(cpu), RESCHEDULE_VECTOR);
1071 }
1072
1073 void native_send_call_func_single_ipi(int cpu)
1074 {
1075 - send_IPI_mask(cpumask_of_cpu(cpu), CALL_FUNCTION_SINGLE_VECTOR);
1076 + send_IPI_mask(&cpumask_of_cpu(cpu), CALL_FUNCTION_SINGLE_VECTOR);
1077 }
1078
1079 void native_send_call_func_ipi(const cpumask_t *mask)
1080 {
1081 - cpumask_t allbutself;
1082 -
1083 - allbutself = cpu_online_map;
1084 - cpu_clear(smp_processor_id(), allbutself);
1085 -
1086 - if (cpus_equal(*mask, allbutself) &&
1087 - cpus_equal(cpu_online_map, cpu_callout_map))
1088 - send_IPI_allbutself(CALL_FUNCTION_VECTOR);
1089 - else
1090 - send_IPI_mask(*mask, CALL_FUNCTION_VECTOR);
1091 + send_IPI_mask_allbutself(mask, CALL_FUNCTION_VECTOR);
1092 }
1093
1094 static void stop_this_cpu(void *dummy)
1095 --- a/arch/x86/kernel/tlb_32.c
1096 +++ b/arch/x86/kernel/tlb_32.c
1097 @@ -158,7 +158,7 @@ void native_flush_tlb_others(const cpuma
1098 * We have to send the IPI only to
1099 * CPUs affected.
1100 */
1101 - send_IPI_mask(cpumask, INVALIDATE_TLB_VECTOR);
1102 + send_IPI_mask(&cpumask, INVALIDATE_TLB_VECTOR);
1103
1104 while (!cpus_empty(flush_cpumask))
1105 /* nothing. lockup detection does not belong here */
1106 --- a/arch/x86/kernel/tlb_64.c
1107 +++ b/arch/x86/kernel/tlb_64.c
1108 @@ -186,7 +186,7 @@ void native_flush_tlb_others(const cpuma
1109 * We have to send the IPI only to
1110 * CPUs affected.
1111 */
1112 - send_IPI_mask(cpumask, INVALIDATE_TLB_VECTOR_START + sender);
1113 + send_IPI_mask(&cpumask, INVALIDATE_TLB_VECTOR_START + sender);
1114
1115 while (!cpus_empty(f->flush_cpumask))
1116 cpu_relax();
1117 --- a/arch/x86/xen/smp.c
1118 +++ b/arch/x86/xen/smp.c
1119 @@ -157,7 +157,7 @@ static void __init xen_fill_possible_map
1120 {
1121 int i, rc;
1122
1123 - for (i = 0; i < NR_CPUS; i++) {
1124 + for (i = 0; i < nr_cpu_ids; i++) {
1125 rc = HYPERVISOR_vcpu_op(VCPUOP_is_up, i, NULL);
1126 if (rc >= 0) {
1127 num_processors++;
1128 @@ -195,7 +195,7 @@ static void __init xen_smp_prepare_cpus(
1129
1130 /* Restrict the possible_map according to max_cpus. */
1131 while ((num_possible_cpus() > 1) && (num_possible_cpus() > max_cpus)) {
1132 - for (cpu = NR_CPUS - 1; !cpu_possible(cpu); cpu--)
1133 + for (cpu = nr_cpu_ids - 1; !cpu_possible(cpu); cpu--)
1134 continue;
1135 cpu_clear(cpu, cpu_possible_map);
1136 }
1137 @@ -361,13 +361,11 @@ static void xen_smp_send_reschedule(int
1138 xen_send_IPI_one(cpu, XEN_RESCHEDULE_VECTOR);
1139 }
1140
1141 -static void xen_send_IPI_mask(cpumask_t mask, enum ipi_vector vector)
1142 +static void xen_send_IPI_mask(const cpumask_t *mask, enum ipi_vector vector)
1143 {
1144 unsigned cpu;
1145
1146 - cpus_and(mask, mask, cpu_online_map);
1147 -
1148 - for_each_cpu_mask_nr(cpu, mask)
1149 + for_each_cpu_mask_and(cpu, *mask, cpu_online_map)
1150 xen_send_IPI_one(cpu, vector);
1151 }
1152
1153 @@ -375,7 +373,7 @@ static void xen_smp_send_call_function_i
1154 {
1155 int cpu;
1156
1157 - xen_send_IPI_mask(*mask, XEN_CALL_FUNCTION_VECTOR);
1158 + xen_send_IPI_mask(mask, XEN_CALL_FUNCTION_VECTOR);
1159
1160 /* Make sure other vcpus get a chance to run if they need to. */
1161 for_each_cpu_mask_nr(cpu, *mask) {
1162 @@ -388,7 +386,8 @@ static void xen_smp_send_call_function_i
1163
1164 static void xen_smp_send_call_function_single_ipi(int cpu)
1165 {
1166 - xen_send_IPI_mask(cpumask_of_cpu(cpu), XEN_CALL_FUNCTION_SINGLE_VECTOR);
1167 + xen_send_IPI_mask(&cpumask_of_cpu(cpu),
1168 + XEN_CALL_FUNCTION_SINGLE_VECTOR);
1169 }
1170
1171 static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id)
1172 --- a/include/asm-x86/genapic_32.h
1173 +++ b/include/asm-x86/genapic_32.h
1174 @@ -23,7 +23,7 @@ struct genapic {
1175 int (*probe)(void);
1176
1177 int (*apic_id_registered)(void);
1178 - cpumask_t (*target_cpus)(void);
1179 + const cpumask_t *(*target_cpus)(void);
1180 int int_delivery_mode;
1181 int int_dest_mode;
1182 int ESR_DISABLE;
1183 @@ -56,11 +56,12 @@ struct genapic {
1184
1185 unsigned (*get_apic_id)(unsigned long x);
1186 unsigned long apic_id_mask;
1187 - unsigned int (*cpu_mask_to_apicid)(cpumask_t cpumask);
1188 + unsigned int (*cpu_mask_to_apicid)(const cpumask_t *cpumask);
1189
1190 #ifdef CONFIG_SMP
1191 /* ipi */
1192 - void (*send_IPI_mask)(cpumask_t mask, int vector);
1193 + void (*send_IPI_mask)(const cpumask_t *mask, int vector);
1194 + void (*send_IPI_mask_allbutself)(const cpumask_t *mask, int vector);
1195 void (*send_IPI_allbutself)(int vector);
1196 void (*send_IPI_all)(int vector);
1197 #endif
1198 @@ -106,6 +107,7 @@ struct genapic {
1199 APICFUNC(cpu_mask_to_apicid) \
1200 APICFUNC(acpi_madt_oem_check) \
1201 IPIFUNC(send_IPI_mask) \
1202 + IPIFUNC(send_IPI_mask_allbutself) \
1203 IPIFUNC(send_IPI_allbutself) \
1204 IPIFUNC(send_IPI_all) \
1205 APICFUNC(enable_apic_mode) \
1206 --- a/include/asm-x86/genapic_64.h
1207 +++ b/include/asm-x86/genapic_64.h
1208 @@ -1,6 +1,8 @@
1209 #ifndef _ASM_GENAPIC_H
1210 #define _ASM_GENAPIC_H 1
1211
1212 +#include <linux/cpumask.h>
1213 +
1214 /*
1215 * Copyright 2004 James Cleverdon, IBM.
1216 * Subject to the GNU Public License, v.2
1217 @@ -18,16 +20,17 @@ struct genapic {
1218 u32 int_delivery_mode;
1219 u32 int_dest_mode;
1220 int (*apic_id_registered)(void);
1221 - cpumask_t (*target_cpus)(void);
1222 - cpumask_t (*vector_allocation_domain)(int cpu);
1223 + const cpumask_t *(*target_cpus)(void);
1224 + void (*vector_allocation_domain)(int cpu, cpumask_t *retmask);
1225 void (*init_apic_ldr)(void);
1226 /* ipi */
1227 - void (*send_IPI_mask)(cpumask_t mask, int vector);
1228 + void (*send_IPI_mask)(const cpumask_t *mask, int vector);
1229 + void (*send_IPI_mask_allbutself)(const cpumask_t *mask, int vector);
1230 void (*send_IPI_allbutself)(int vector);
1231 void (*send_IPI_all)(int vector);
1232 void (*send_IPI_self)(int vector);
1233 /* */
1234 - unsigned int (*cpu_mask_to_apicid)(cpumask_t cpumask);
1235 + unsigned int (*cpu_mask_to_apicid)(const cpumask_t *cpumask);
1236 unsigned int (*phys_pkg_id)(int index_msb);
1237 unsigned int (*get_apic_id)(unsigned long x);
1238 unsigned long (*set_apic_id)(unsigned int id);
1239 --- a/include/asm-x86/ipi.h
1240 +++ b/include/asm-x86/ipi.h
1241 @@ -117,7 +117,7 @@ static inline void __send_IPI_dest_field
1242 native_apic_mem_write(APIC_ICR, cfg);
1243 }
1244
1245 -static inline void send_IPI_mask_sequence(cpumask_t mask, int vector)
1246 +static inline void send_IPI_mask_sequence(const cpumask_t *mask, int vector)
1247 {
1248 unsigned long flags;
1249 unsigned long query_cpu;
1250 @@ -128,10 +128,26 @@ static inline void send_IPI_mask_sequenc
1251 * - mbligh
1252 */
1253 local_irq_save(flags);
1254 - for_each_cpu_mask_nr(query_cpu, mask) {
1255 + for_each_cpu_mask_and(query_cpu, *mask, cpu_online_map)
1256 __send_IPI_dest_field(per_cpu(x86_cpu_to_apicid, query_cpu),
1257 vector, APIC_DEST_PHYSICAL);
1258 - }
1259 + local_irq_restore(flags);
1260 +}
1261 +
1262 +static inline void send_IPI_mask_allbutself(const cpumask_t *mask, int vector)
1263 +{
1264 + unsigned long flags;
1265 + unsigned int query_cpu;
1266 + unsigned int this_cpu = smp_processor_id();
1267 +
1268 + /* See Hack comment above */
1269 +
1270 + local_irq_save(flags);
1271 + for_each_cpu_mask_and(query_cpu, *mask, cpu_online_map)
1272 + if (query_cpu != this_cpu)
1273 + __send_IPI_dest_field(
1274 + per_cpu(x86_cpu_to_apicid, query_cpu),
1275 + vector, APIC_DEST_PHYSICAL);
1276 local_irq_restore(flags);
1277 }
1278
1279 --- a/include/asm-x86/mach-bigsmp/mach_apic.h
1280 +++ b/include/asm-x86/mach-bigsmp/mach_apic.h
1281 @@ -10,7 +10,7 @@ static inline int apic_id_registered(voi
1282 }
1283
1284 /* Round robin the irqs amoung the online cpus */
1285 -static inline cpumask_t target_cpus(void)
1286 +static inline const cpumask_t *target_cpus(void)
1287 {
1288 static unsigned long cpu = NR_CPUS;
1289 do {
1290 @@ -19,7 +19,7 @@ static inline cpumask_t target_cpus(void
1291 else
1292 cpu = next_cpu(cpu, cpu_online_map);
1293 } while (cpu >= NR_CPUS);
1294 - return cpumask_of_cpu(cpu);
1295 + return &cpumask_of_cpu(cpu);
1296 }
1297
1298 #undef APIC_DEST_LOGICAL
1299 @@ -126,12 +126,12 @@ static inline int check_phys_apicid_pres
1300 }
1301
1302 /* As we are using single CPU as destination, pick only one CPU here */
1303 -static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask)
1304 +static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask)
1305 {
1306 int cpu;
1307 int apicid;
1308
1309 - cpu = first_cpu(cpumask);
1310 + cpu = first_cpu(*cpumask);
1311 apicid = cpu_to_logical_apicid(cpu);
1312 return apicid;
1313 }
1314 --- a/include/asm-x86/mach-bigsmp/mach_ipi.h
1315 +++ b/include/asm-x86/mach-bigsmp/mach_ipi.h
1316 @@ -1,25 +1,30 @@
1317 #ifndef __ASM_MACH_IPI_H
1318 #define __ASM_MACH_IPI_H
1319
1320 -void send_IPI_mask_sequence(cpumask_t mask, int vector);
1321 +void send_IPI_mask_sequence(const cpumask_t *mask, int vector);
1322
1323 -static inline void send_IPI_mask(cpumask_t mask, int vector)
1324 +static inline void send_IPI_mask(const cpumask_t *mask, int vector)
1325 {
1326 send_IPI_mask_sequence(mask, vector);
1327 }
1328
1329 -static inline void send_IPI_allbutself(int vector)
1330 +static inline void send_IPI_mask_allbutself(const cpumask_t *mask, int vector)
1331 {
1332 - cpumask_t mask = cpu_online_map;
1333 - cpu_clear(smp_processor_id(), mask);
1334 + cpumask_t allbutself = *mask;
1335 + cpu_clear(smp_processor_id(), allbutself);
1336 +
1337 + if (!cpus_empty(allbutself))
1338 + send_IPI_mask_sequence(&allbutself, vector);
1339 +}
1340
1341 - if (!cpus_empty(mask))
1342 - send_IPI_mask(mask, vector);
1343 +static inline void send_IPI_allbutself(int vector)
1344 +{
1345 + send_IPI_mask_allbutself(&cpu_online_map, vector);
1346 }
1347
1348 static inline void send_IPI_all(int vector)
1349 {
1350 - send_IPI_mask(cpu_online_map, vector);
1351 + send_IPI_mask(&cpu_online_map, vector);
1352 }
1353
1354 #endif /* __ASM_MACH_IPI_H */
1355 --- a/include/asm-x86/mach-default/mach_apic.h
1356 +++ b/include/asm-x86/mach-default/mach_apic.h
1357 @@ -8,12 +8,12 @@
1358
1359 #define APIC_DFR_VALUE (APIC_DFR_FLAT)
1360
1361 -static inline cpumask_t target_cpus(void)
1362 +static inline const cpumask_t *target_cpus(void)
1363 {
1364 #ifdef CONFIG_SMP
1365 - return cpu_online_map;
1366 + return &cpu_online_map;
1367 #else
1368 - return cpumask_of_cpu(0);
1369 + return &cpumask_of_cpu(0);
1370 #endif
1371 }
1372
1373 @@ -59,9 +59,9 @@ static inline int apic_id_registered(voi
1374 return physid_isset(read_apic_id(), phys_cpu_present_map);
1375 }
1376
1377 -static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask)
1378 +static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask)
1379 {
1380 - return cpus_addr(cpumask)[0];
1381 + return cpus_addr(*cpumask)[0];
1382 }
1383
1384 static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb)
1385 @@ -115,7 +115,7 @@ static inline int cpu_to_logical_apicid(
1386
1387 static inline int cpu_present_to_apicid(int mps_cpu)
1388 {
1389 - if (mps_cpu < NR_CPUS && cpu_present(mps_cpu))
1390 + if (mps_cpu < nr_cpu_ids && cpu_present(mps_cpu))
1391 return (int)per_cpu(x86_bios_cpu_apicid, mps_cpu);
1392 else
1393 return BAD_APICID;
1394 --- a/include/asm-x86/mach-default/mach_ipi.h
1395 +++ b/include/asm-x86/mach-default/mach_ipi.h
1396 @@ -4,7 +4,8 @@
1397 /* Avoid include hell */
1398 #define NMI_VECTOR 0x02
1399
1400 -void send_IPI_mask_bitmask(cpumask_t mask, int vector);
1401 +void send_IPI_mask_bitmask(const cpumask_t *mask, int vector);
1402 +void send_IPI_mask_allbutself(const cpumask_t *mask, int vector);
1403 void __send_IPI_shortcut(unsigned int shortcut, int vector);
1404
1405 extern int no_broadcast;
1406 @@ -12,28 +13,27 @@ extern int no_broadcast;
1407 #ifdef CONFIG_X86_64
1408 #include <asm/genapic.h>
1409 #define send_IPI_mask (genapic->send_IPI_mask)
1410 +#define send_IPI_mask_allbutself (genapic->send_IPI_mask_allbutself)
1411 #else
1412 -static inline void send_IPI_mask(cpumask_t mask, int vector)
1413 +static inline void send_IPI_mask(const cpumask_t *mask, int vector)
1414 {
1415 send_IPI_mask_bitmask(mask, vector);
1416 }
1417 +void send_IPI_mask_allbutself(const cpumask_t *mask, int vector);
1418 #endif
1419
1420 static inline void __local_send_IPI_allbutself(int vector)
1421 {
1422 - if (no_broadcast || vector == NMI_VECTOR) {
1423 - cpumask_t mask = cpu_online_map;
1424 -
1425 - cpu_clear(smp_processor_id(), mask);
1426 - send_IPI_mask(mask, vector);
1427 - } else
1428 + if (no_broadcast || vector == NMI_VECTOR)
1429 + send_IPI_mask_allbutself(&cpu_online_map, vector);
1430 + else
1431 __send_IPI_shortcut(APIC_DEST_ALLBUT, vector);
1432 }
1433
1434 static inline void __local_send_IPI_all(int vector)
1435 {
1436 if (no_broadcast || vector == NMI_VECTOR)
1437 - send_IPI_mask(cpu_online_map, vector);
1438 + send_IPI_mask(&cpu_online_map, vector);
1439 else
1440 __send_IPI_shortcut(APIC_DEST_ALLINC, vector);
1441 }
1442 --- a/include/asm-x86/mach-es7000/mach_apic.h
1443 +++ b/include/asm-x86/mach-es7000/mach_apic.h
1444 @@ -9,12 +9,12 @@ static inline int apic_id_registered(voi
1445 return (1);
1446 }
1447
1448 -static inline cpumask_t target_cpus(void)
1449 +static inline cpumask_t *target_cpus(void)
1450 {
1451 #if defined CONFIG_ES7000_CLUSTERED_APIC
1452 - return CPU_MASK_ALL;
1453 + return &CPU_MASK_ALL;
1454 #else
1455 - return cpumask_of_cpu(smp_processor_id());
1456 + return &cpumask_of_cpu(smp_processor_id());
1457 #endif
1458 }
1459 #define TARGET_CPUS (target_cpus())
1460 @@ -145,7 +145,7 @@ static inline int check_phys_apicid_pres
1461 return (1);
1462 }
1463
1464 -static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask)
1465 +static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask)
1466 {
1467 int num_bits_set;
1468 int cpus_found = 0;
1469 --- a/include/asm-x86/mach-es7000/mach_ipi.h
1470 +++ b/include/asm-x86/mach-es7000/mach_ipi.h
1471 @@ -1,24 +1,30 @@
1472 #ifndef __ASM_MACH_IPI_H
1473 #define __ASM_MACH_IPI_H
1474
1475 -void send_IPI_mask_sequence(cpumask_t mask, int vector);
1476 +void send_IPI_mask_sequence(const cpumask_t *mask, int vector);
1477
1478 -static inline void send_IPI_mask(cpumask_t mask, int vector)
1479 +static inline void send_IPI_mask(const cpumask_t *mask, int vector)
1480 {
1481 send_IPI_mask_sequence(mask, vector);
1482 }
1483
1484 +static inline void send_IPI_mask_allbutself(const cpumask_t *mask, int vector)
1485 +{
1486 + cpumask_t allbutself = *mask;
1487 + cpu_clear(smp_processor_id(), allbutself);
1488 +
1489 + if (!cpus_empty(allbutself))
1490 + send_IPI_mask_sequence(&allbutself, vector);
1491 +}
1492 +
1493 static inline void send_IPI_allbutself(int vector)
1494 {
1495 - cpumask_t mask = cpu_online_map;
1496 - cpu_clear(smp_processor_id(), mask);
1497 - if (!cpus_empty(mask))
1498 - send_IPI_mask(mask, vector);
1499 + send_IPI_mask_allbutself(&cpu_online_map, vector);
1500 }
1501
1502 static inline void send_IPI_all(int vector)
1503 {
1504 - send_IPI_mask(cpu_online_map, vector);
1505 + send_IPI_mask(&cpu_online_map, vector);
1506 }
1507
1508 #endif /* __ASM_MACH_IPI_H */
1509 --- a/include/asm-x86/mach-generic/mach_ipi.h
1510 +++ b/include/asm-x86/mach-generic/mach_ipi.h
1511 @@ -4,6 +4,7 @@
1512 #include <asm/genapic.h>
1513
1514 #define send_IPI_mask (genapic->send_IPI_mask)
1515 +#define send_IPI_mask_allbutself (genapic->send_IPI_mask_allbutself)
1516 #define send_IPI_allbutself (genapic->send_IPI_allbutself)
1517 #define send_IPI_all (genapic->send_IPI_all)
1518
1519 --- a/include/asm-x86/mach-numaq/mach_apic.h
1520 +++ b/include/asm-x86/mach-numaq/mach_apic.h
1521 @@ -7,9 +7,9 @@
1522
1523 #define APIC_DFR_VALUE (APIC_DFR_CLUSTER)
1524
1525 -static inline cpumask_t target_cpus(void)
1526 +static inline const cpumask_t *target_cpus(void)
1527 {
1528 - return CPU_MASK_ALL;
1529 + return &CPU_MASK_ALL;
1530 }
1531
1532 #define TARGET_CPUS (target_cpus())
1533 @@ -124,7 +124,7 @@ static inline void enable_apic_mode(void
1534 * We use physical apicids here, not logical, so just return the default
1535 * physical broadcast to stop people from breaking us
1536 */
1537 -static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask)
1538 +static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask)
1539 {
1540 return (int) 0xF;
1541 }
1542 --- a/include/asm-x86/mach-numaq/mach_ipi.h
1543 +++ b/include/asm-x86/mach-numaq/mach_ipi.h
1544 @@ -1,25 +1,31 @@
1545 #ifndef __ASM_MACH_IPI_H
1546 #define __ASM_MACH_IPI_H
1547
1548 -void send_IPI_mask_sequence(cpumask_t, int vector);
1549 +void send_IPI_mask_sequence(const cpumask_t *mask, int vector);
1550
1551 -static inline void send_IPI_mask(cpumask_t mask, int vector)
1552 +static inline void send_IPI_mask(const cpumask_t *mask, int vector)
1553 {
1554 send_IPI_mask_sequence(mask, vector);
1555 }
1556
1557 -static inline void send_IPI_allbutself(int vector)
1558 +static inline void send_IPI_mask_allbutself(const cpumask_t *mask, int vector)
1559 {
1560 - cpumask_t mask = cpu_online_map;
1561 - cpu_clear(smp_processor_id(), mask);
1562 + cpumask_t allbutself = *mask;
1563 + cpu_clear(smp_processor_id(), allbutself);
1564 +
1565 + if (!cpus_empty(allbutself))
1566 + send_IPI_mask_sequence(&allbutself, vector);
1567 +}
1568
1569 - if (!cpus_empty(mask))
1570 - send_IPI_mask(mask, vector);
1571 +static inline void send_IPI_allbutself(int vector)
1572 +{
1573 + send_IPI_mask_allbutself(&cpu_online_map, vector);
1574 }
1575
1576 static inline void send_IPI_all(int vector)
1577 {
1578 - send_IPI_mask(cpu_online_map, vector);
1579 + send_IPI_mask(&cpu_online_map, vector);
1580 }
1581
1582 #endif /* __ASM_MACH_IPI_H */
1583 +
1584 --- a/include/asm-x86/mach-summit/mach_apic.h
1585 +++ b/include/asm-x86/mach-summit/mach_apic.h
1586 @@ -14,13 +14,13 @@
1587
1588 #define APIC_DFR_VALUE (APIC_DFR_CLUSTER)
1589
1590 -static inline cpumask_t target_cpus(void)
1591 +static inline const cpumask_t *target_cpus(void)
1592 {
1593 /* CPU_MASK_ALL (0xff) has undefined behaviour with
1594 * dest_LowestPrio mode logical clustered apic interrupt routing
1595 * Just start on cpu 0. IRQ balancing will spread load
1596 */
1597 - return cpumask_of_cpu(0);
1598 + return &cpumask_of_cpu(0);
1599 }
1600 #define TARGET_CPUS (target_cpus())
1601
1602 @@ -138,7 +138,7 @@ static inline void enable_apic_mode(void
1603 {
1604 }
1605
1606 -static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask)
1607 +static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask)
1608 {
1609 int num_bits_set;
1610 int cpus_found = 0;
1611 --- a/include/asm-x86/mach-summit/mach_ipi.h
1612 +++ b/include/asm-x86/mach-summit/mach_ipi.h
1613 @@ -1,25 +1,31 @@
1614 #ifndef __ASM_MACH_IPI_H
1615 #define __ASM_MACH_IPI_H
1616
1617 -void send_IPI_mask_sequence(cpumask_t mask, int vector);
1618 +void send_IPI_mask_sequence(const cpumask_t *mask, int vector);
1619
1620 -static inline void send_IPI_mask(cpumask_t mask, int vector)
1621 +static inline void send_IPI_mask(const cpumask_t *mask, int vector)
1622 {
1623 send_IPI_mask_sequence(mask, vector);
1624 }
1625
1626 -static inline void send_IPI_allbutself(int vector)
1627 +static inline void send_IPI_mask_allbutself(const cpumask_t *mask, int vector)
1628 {
1629 - cpumask_t mask = cpu_online_map;
1630 - cpu_clear(smp_processor_id(), mask);
1631 + cpumask_t allbutself = *mask;
1632 + cpu_clear(smp_processor_id(), allbutself);
1633 +
1634 + if (!cpus_empty(allbutself))
1635 + send_IPI_mask_sequence(&allbutself, vector);
1636 +}
1637
1638 - if (!cpus_empty(mask))
1639 - send_IPI_mask(mask, vector);
1640 +static inline void send_IPI_allbutself(int vector)
1641 +{
1642 + send_IPI_mask_allbutself(&cpu_online_map, vector);
1643 }
1644
1645 static inline void send_IPI_all(int vector)
1646 {
1647 - send_IPI_mask(cpu_online_map, vector);
1648 + send_IPI_mask(&cpu_online_map, vector);
1649 }
1650
1651 #endif /* __ASM_MACH_IPI_H */
1652 +