]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * arch/s390/kernel/smp.c | |
3 | * | |
4 | * S390 version | |
5 | * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation | |
6 | * Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com), | |
7 | * Martin Schwidefsky (schwidefsky@de.ibm.com) | |
8 | * Heiko Carstens (heiko.carstens@de.ibm.com) | |
9 | * | |
10 | * based on other smp stuff by | |
11 | * (c) 1995 Alan Cox, CymruNET Ltd <alan@cymru.net> | |
12 | * (c) 1998 Ingo Molnar | |
13 | * | |
14 | * We work with logical cpu numbering everywhere we can. The only | |
15 | * functions using the real cpu address (got from STAP) are the sigp | |
16 | * functions. For all other functions we use the identity mapping. | |
17 | * That means that cpu_number_map[i] == i for every cpu. cpu_number_map is | |
18 | * used e.g. to find the idle task belonging to a logical cpu. Every array | |
19 | * in the kernel is sorted by the logical cpu number and not by the physical | |
20 | * one which is causing all the confusion with __cpu_logical_map and | |
21 | * cpu_number_map in other architectures. | |
22 | */ | |
23 | ||
24 | #include <linux/module.h> | |
25 | #include <linux/init.h> | |
26 | ||
27 | #include <linux/mm.h> | |
28 | #include <linux/spinlock.h> | |
29 | #include <linux/kernel_stat.h> | |
30 | #include <linux/smp_lock.h> | |
31 | ||
32 | #include <linux/delay.h> | |
33 | #include <linux/cache.h> | |
34 | #include <linux/interrupt.h> | |
35 | #include <linux/cpu.h> | |
36 | ||
37 | #include <asm/sigp.h> | |
38 | #include <asm/pgalloc.h> | |
39 | #include <asm/irq.h> | |
40 | #include <asm/s390_ext.h> | |
41 | #include <asm/cpcmd.h> | |
42 | #include <asm/tlbflush.h> | |
43 | ||
44 | /* prototypes */ | |
45 | ||
46 | extern volatile int __cpu_logical_map[]; | |
47 | ||
48 | /* | |
49 | * An array with a pointer the lowcore of every CPU. | |
50 | */ | |
51 | ||
52 | struct _lowcore *lowcore_ptr[NR_CPUS]; | |
53 | ||
54 | cpumask_t cpu_online_map; | |
55 | cpumask_t cpu_possible_map; | |
56 | ||
57 | static struct task_struct *current_set[NR_CPUS]; | |
58 | ||
59 | EXPORT_SYMBOL(cpu_online_map); | |
60 | ||
61 | /* | |
62 | * Reboot, halt and power_off routines for SMP. | |
63 | */ | |
64 | extern char vmhalt_cmd[]; | |
65 | extern char vmpoff_cmd[]; | |
66 | ||
67 | extern void reipl(unsigned long devno); | |
68 | ||
69 | static void smp_ext_bitcall(int, ec_bit_sig); | |
70 | static void smp_ext_bitcall_others(ec_bit_sig); | |
71 | ||
72 | /* | |
73 | * Structure and data for smp_call_function(). This is designed to minimise | |
74 | * static memory requirements. It also looks cleaner. | |
75 | */ | |
76 | static DEFINE_SPINLOCK(call_lock); | |
77 | ||
78 | struct call_data_struct { | |
79 | void (*func) (void *info); | |
80 | void *info; | |
81 | atomic_t started; | |
82 | atomic_t finished; | |
83 | int wait; | |
84 | }; | |
85 | ||
86 | static struct call_data_struct * call_data; | |
87 | ||
88 | /* | |
89 | * 'Call function' interrupt callback | |
90 | */ | |
91 | static void do_call_function(void) | |
92 | { | |
93 | void (*func) (void *info) = call_data->func; | |
94 | void *info = call_data->info; | |
95 | int wait = call_data->wait; | |
96 | ||
97 | atomic_inc(&call_data->started); | |
98 | (*func)(info); | |
99 | if (wait) | |
100 | atomic_inc(&call_data->finished); | |
101 | } | |
102 | ||
103 | /* | |
104 | * this function sends a 'generic call function' IPI to all other CPUs | |
105 | * in the system. | |
106 | */ | |
107 | ||
108 | int smp_call_function (void (*func) (void *info), void *info, int nonatomic, | |
109 | int wait) | |
110 | /* | |
111 | * [SUMMARY] Run a function on all other CPUs. | |
112 | * <func> The function to run. This must be fast and non-blocking. | |
113 | * <info> An arbitrary pointer to pass to the function. | |
114 | * <nonatomic> currently unused. | |
115 | * <wait> If true, wait (atomically) until function has completed on other CPUs. | |
116 | * [RETURNS] 0 on success, else a negative status code. Does not return until | |
117 | * remote CPUs are nearly ready to execute <<func>> or are or have executed. | |
118 | * | |
119 | * You must not call this function with disabled interrupts or from a | |
120 | * hardware interrupt handler or from a bottom half handler. | |
121 | */ | |
122 | { | |
123 | struct call_data_struct data; | |
124 | int cpus = num_online_cpus()-1; | |
125 | ||
126 | if (cpus <= 0) | |
127 | return 0; | |
128 | ||
129 | /* Can deadlock when called with interrupts disabled */ | |
130 | WARN_ON(irqs_disabled()); | |
131 | ||
132 | data.func = func; | |
133 | data.info = info; | |
134 | atomic_set(&data.started, 0); | |
135 | data.wait = wait; | |
136 | if (wait) | |
137 | atomic_set(&data.finished, 0); | |
138 | ||
139 | spin_lock(&call_lock); | |
140 | call_data = &data; | |
141 | /* Send a message to all other CPUs and wait for them to respond */ | |
142 | smp_ext_bitcall_others(ec_call_function); | |
143 | ||
144 | /* Wait for response */ | |
145 | while (atomic_read(&data.started) != cpus) | |
146 | cpu_relax(); | |
147 | ||
148 | if (wait) | |
149 | while (atomic_read(&data.finished) != cpus) | |
150 | cpu_relax(); | |
151 | spin_unlock(&call_lock); | |
152 | ||
153 | return 0; | |
154 | } | |
155 | ||
156 | /* | |
157 | * Call a function on one CPU | |
158 | * cpu : the CPU the function should be executed on | |
159 | * | |
160 | * You must not call this function with disabled interrupts or from a | |
161 | * hardware interrupt handler. You may call it from a bottom half. | |
162 | * | |
163 | * It is guaranteed that the called function runs on the specified CPU, | |
164 | * preemption is disabled. | |
165 | */ | |
166 | int smp_call_function_on(void (*func) (void *info), void *info, | |
167 | int nonatomic, int wait, int cpu) | |
168 | { | |
169 | struct call_data_struct data; | |
170 | int curr_cpu; | |
171 | ||
172 | if (!cpu_online(cpu)) | |
173 | return -EINVAL; | |
174 | ||
175 | /* disable preemption for local function call */ | |
176 | curr_cpu = get_cpu(); | |
177 | ||
178 | if (curr_cpu == cpu) { | |
179 | /* direct call to function */ | |
180 | func(info); | |
181 | put_cpu(); | |
182 | return 0; | |
183 | } | |
184 | ||
185 | data.func = func; | |
186 | data.info = info; | |
187 | atomic_set(&data.started, 0); | |
188 | data.wait = wait; | |
189 | if (wait) | |
190 | atomic_set(&data.finished, 0); | |
191 | ||
192 | spin_lock_bh(&call_lock); | |
193 | call_data = &data; | |
194 | smp_ext_bitcall(cpu, ec_call_function); | |
195 | ||
196 | /* Wait for response */ | |
197 | while (atomic_read(&data.started) != 1) | |
198 | cpu_relax(); | |
199 | ||
200 | if (wait) | |
201 | while (atomic_read(&data.finished) != 1) | |
202 | cpu_relax(); | |
203 | ||
204 | spin_unlock_bh(&call_lock); | |
205 | put_cpu(); | |
206 | return 0; | |
207 | } | |
208 | EXPORT_SYMBOL(smp_call_function_on); | |
209 | ||
210 | static inline void do_send_stop(void) | |
211 | { | |
212 | int cpu, rc; | |
213 | ||
214 | /* stop all processors */ | |
215 | for_each_online_cpu(cpu) { | |
216 | if (cpu == smp_processor_id()) | |
217 | continue; | |
218 | do { | |
219 | rc = signal_processor(cpu, sigp_stop); | |
220 | } while (rc == sigp_busy); | |
221 | } | |
222 | } | |
223 | ||
224 | static inline void do_store_status(void) | |
225 | { | |
226 | int cpu, rc; | |
227 | ||
228 | /* store status of all processors in their lowcores (real 0) */ | |
229 | for_each_online_cpu(cpu) { | |
230 | if (cpu == smp_processor_id()) | |
231 | continue; | |
232 | do { | |
233 | rc = signal_processor_p( | |
234 | (__u32)(unsigned long) lowcore_ptr[cpu], cpu, | |
235 | sigp_store_status_at_address); | |
236 | } while(rc == sigp_busy); | |
237 | } | |
238 | } | |
239 | ||
240 | /* | |
241 | * this function sends a 'stop' sigp to all other CPUs in the system. | |
242 | * it goes straight through. | |
243 | */ | |
244 | void smp_send_stop(void) | |
245 | { | |
246 | /* write magic number to zero page (absolute 0) */ | |
247 | lowcore_ptr[smp_processor_id()]->panic_magic = __PANIC_MAGIC; | |
248 | ||
249 | /* stop other processors. */ | |
250 | do_send_stop(); | |
251 | ||
252 | /* store status of other processors. */ | |
253 | do_store_status(); | |
254 | } | |
255 | ||
256 | /* | |
257 | * Reboot, halt and power_off routines for SMP. | |
258 | */ | |
259 | ||
260 | static void do_machine_restart(void * __unused) | |
261 | { | |
262 | int cpu; | |
263 | static atomic_t cpuid = ATOMIC_INIT(-1); | |
264 | ||
265 | if (atomic_compare_and_swap(-1, smp_processor_id(), &cpuid)) | |
266 | signal_processor(smp_processor_id(), sigp_stop); | |
267 | ||
268 | /* Wait for all other cpus to enter stopped state */ | |
269 | for_each_online_cpu(cpu) { | |
270 | if (cpu == smp_processor_id()) | |
271 | continue; | |
272 | while(!smp_cpu_not_running(cpu)) | |
273 | cpu_relax(); | |
274 | } | |
275 | ||
276 | /* Store status of other cpus. */ | |
277 | do_store_status(); | |
278 | ||
279 | /* | |
280 | * Finally call reipl. Because we waited for all other | |
281 | * cpus to enter this function we know that they do | |
282 | * not hold any s390irq-locks (the cpus have been | |
283 | * interrupted by an external interrupt and s390irq | |
284 | * locks are always held disabled). | |
285 | */ | |
286 | if (MACHINE_IS_VM) | |
287 | cpcmd ("IPL", NULL, 0); | |
288 | else | |
289 | reipl (0x10000 | S390_lowcore.ipl_device); | |
290 | } | |
291 | ||
292 | void machine_restart_smp(char * __unused) | |
293 | { | |
294 | on_each_cpu(do_machine_restart, NULL, 0, 0); | |
295 | } | |
296 | ||
297 | static void do_wait_for_stop(void) | |
298 | { | |
299 | unsigned long cr[16]; | |
300 | ||
301 | __ctl_store(cr, 0, 15); | |
302 | cr[0] &= ~0xffff; | |
303 | cr[6] = 0; | |
304 | __ctl_load(cr, 0, 15); | |
305 | for (;;) | |
306 | enabled_wait(); | |
307 | } | |
308 | ||
309 | static void do_machine_halt(void * __unused) | |
310 | { | |
311 | static atomic_t cpuid = ATOMIC_INIT(-1); | |
312 | ||
313 | if (atomic_compare_and_swap(-1, smp_processor_id(), &cpuid) == 0) { | |
314 | smp_send_stop(); | |
315 | if (MACHINE_IS_VM && strlen(vmhalt_cmd) > 0) | |
316 | cpcmd(vmhalt_cmd, NULL, 0); | |
317 | signal_processor(smp_processor_id(), | |
318 | sigp_stop_and_store_status); | |
319 | } | |
320 | do_wait_for_stop(); | |
321 | } | |
322 | ||
323 | void machine_halt_smp(void) | |
324 | { | |
325 | on_each_cpu(do_machine_halt, NULL, 0, 0); | |
326 | } | |
327 | ||
328 | static void do_machine_power_off(void * __unused) | |
329 | { | |
330 | static atomic_t cpuid = ATOMIC_INIT(-1); | |
331 | ||
332 | if (atomic_compare_and_swap(-1, smp_processor_id(), &cpuid) == 0) { | |
333 | smp_send_stop(); | |
334 | if (MACHINE_IS_VM && strlen(vmpoff_cmd) > 0) | |
335 | cpcmd(vmpoff_cmd, NULL, 0); | |
336 | signal_processor(smp_processor_id(), | |
337 | sigp_stop_and_store_status); | |
338 | } | |
339 | do_wait_for_stop(); | |
340 | } | |
341 | ||
342 | void machine_power_off_smp(void) | |
343 | { | |
344 | on_each_cpu(do_machine_power_off, NULL, 0, 0); | |
345 | } | |
346 | ||
347 | /* | |
348 | * This is the main routine where commands issued by other | |
349 | * cpus are handled. | |
350 | */ | |
351 | ||
352 | void do_ext_call_interrupt(struct pt_regs *regs, __u16 code) | |
353 | { | |
354 | unsigned long bits; | |
355 | ||
356 | /* | |
357 | * handle bit signal external calls | |
358 | * | |
359 | * For the ec_schedule signal we have to do nothing. All the work | |
360 | * is done automatically when we return from the interrupt. | |
361 | */ | |
362 | bits = xchg(&S390_lowcore.ext_call_fast, 0); | |
363 | ||
364 | if (test_bit(ec_call_function, &bits)) | |
365 | do_call_function(); | |
366 | } | |
367 | ||
368 | /* | |
369 | * Send an external call sigp to another cpu and return without waiting | |
370 | * for its completion. | |
371 | */ | |
372 | static void smp_ext_bitcall(int cpu, ec_bit_sig sig) | |
373 | { | |
374 | /* | |
375 | * Set signaling bit in lowcore of target cpu and kick it | |
376 | */ | |
377 | set_bit(sig, (unsigned long *) &lowcore_ptr[cpu]->ext_call_fast); | |
378 | while(signal_processor(cpu, sigp_external_call) == sigp_busy) | |
379 | udelay(10); | |
380 | } | |
381 | ||
382 | /* | |
383 | * Send an external call sigp to every other cpu in the system and | |
384 | * return without waiting for its completion. | |
385 | */ | |
386 | static void smp_ext_bitcall_others(ec_bit_sig sig) | |
387 | { | |
388 | int cpu; | |
389 | ||
390 | for_each_online_cpu(cpu) { | |
391 | if (cpu == smp_processor_id()) | |
392 | continue; | |
393 | /* | |
394 | * Set signaling bit in lowcore of target cpu and kick it | |
395 | */ | |
396 | set_bit(sig, (unsigned long *) &lowcore_ptr[cpu]->ext_call_fast); | |
397 | while (signal_processor(cpu, sigp_external_call) == sigp_busy) | |
398 | udelay(10); | |
399 | } | |
400 | } | |
401 | ||
402 | #ifndef CONFIG_ARCH_S390X | |
403 | /* | |
404 | * this function sends a 'purge tlb' signal to another CPU. | |
405 | */ | |
406 | void smp_ptlb_callback(void *info) | |
407 | { | |
408 | local_flush_tlb(); | |
409 | } | |
410 | ||
411 | void smp_ptlb_all(void) | |
412 | { | |
413 | on_each_cpu(smp_ptlb_callback, NULL, 0, 1); | |
414 | } | |
415 | EXPORT_SYMBOL(smp_ptlb_all); | |
416 | #endif /* ! CONFIG_ARCH_S390X */ | |
417 | ||
418 | /* | |
419 | * this function sends a 'reschedule' IPI to another CPU. | |
420 | * it goes straight through and wastes no time serializing | |
421 | * anything. Worst case is that we lose a reschedule ... | |
422 | */ | |
423 | void smp_send_reschedule(int cpu) | |
424 | { | |
425 | smp_ext_bitcall(cpu, ec_schedule); | |
426 | } | |
427 | ||
428 | /* | |
429 | * parameter area for the set/clear control bit callbacks | |
430 | */ | |
431 | typedef struct | |
432 | { | |
433 | __u16 start_ctl; | |
434 | __u16 end_ctl; | |
435 | unsigned long orvals[16]; | |
436 | unsigned long andvals[16]; | |
437 | } ec_creg_mask_parms; | |
438 | ||
439 | /* | |
440 | * callback for setting/clearing control bits | |
441 | */ | |
442 | void smp_ctl_bit_callback(void *info) { | |
443 | ec_creg_mask_parms *pp; | |
444 | unsigned long cregs[16]; | |
445 | int i; | |
446 | ||
447 | pp = (ec_creg_mask_parms *) info; | |
448 | __ctl_store(cregs[pp->start_ctl], pp->start_ctl, pp->end_ctl); | |
449 | for (i = pp->start_ctl; i <= pp->end_ctl; i++) | |
450 | cregs[i] = (cregs[i] & pp->andvals[i]) | pp->orvals[i]; | |
451 | __ctl_load(cregs[pp->start_ctl], pp->start_ctl, pp->end_ctl); | |
452 | } | |
453 | ||
454 | /* | |
455 | * Set a bit in a control register of all cpus | |
456 | */ | |
457 | void smp_ctl_set_bit(int cr, int bit) { | |
458 | ec_creg_mask_parms parms; | |
459 | ||
460 | parms.start_ctl = cr; | |
461 | parms.end_ctl = cr; | |
462 | parms.orvals[cr] = 1 << bit; | |
463 | parms.andvals[cr] = -1L; | |
464 | preempt_disable(); | |
465 | smp_call_function(smp_ctl_bit_callback, &parms, 0, 1); | |
466 | __ctl_set_bit(cr, bit); | |
467 | preempt_enable(); | |
468 | } | |
469 | ||
470 | /* | |
471 | * Clear a bit in a control register of all cpus | |
472 | */ | |
473 | void smp_ctl_clear_bit(int cr, int bit) { | |
474 | ec_creg_mask_parms parms; | |
475 | ||
476 | parms.start_ctl = cr; | |
477 | parms.end_ctl = cr; | |
478 | parms.orvals[cr] = 0; | |
479 | parms.andvals[cr] = ~(1L << bit); | |
480 | preempt_disable(); | |
481 | smp_call_function(smp_ctl_bit_callback, &parms, 0, 1); | |
482 | __ctl_clear_bit(cr, bit); | |
483 | preempt_enable(); | |
484 | } | |
485 | ||
486 | /* | |
487 | * Lets check how many CPUs we have. | |
488 | */ | |
489 | ||
490 | void | |
491 | __init smp_check_cpus(unsigned int max_cpus) | |
492 | { | |
493 | int cpu, num_cpus; | |
494 | __u16 boot_cpu_addr; | |
495 | ||
496 | /* | |
497 | * cpu 0 is the boot cpu. See smp_prepare_boot_cpu. | |
498 | */ | |
499 | ||
500 | boot_cpu_addr = S390_lowcore.cpu_data.cpu_addr; | |
501 | current_thread_info()->cpu = 0; | |
502 | num_cpus = 1; | |
503 | for (cpu = 0; cpu <= 65535 && num_cpus < max_cpus; cpu++) { | |
504 | if ((__u16) cpu == boot_cpu_addr) | |
505 | continue; | |
506 | __cpu_logical_map[num_cpus] = (__u16) cpu; | |
507 | if (signal_processor(num_cpus, sigp_sense) == | |
508 | sigp_not_operational) | |
509 | continue; | |
510 | cpu_set(num_cpus, cpu_present_map); | |
511 | num_cpus++; | |
512 | } | |
513 | ||
514 | for (cpu = 1; cpu < max_cpus; cpu++) | |
515 | cpu_set(cpu, cpu_possible_map); | |
516 | ||
517 | printk("Detected %d CPU's\n",(int) num_cpus); | |
518 | printk("Boot cpu address %2X\n", boot_cpu_addr); | |
519 | } | |
520 | ||
521 | /* | |
522 | * Activate a secondary processor. | |
523 | */ | |
524 | extern void init_cpu_timer(void); | |
525 | extern void init_cpu_vtimer(void); | |
526 | extern int pfault_init(void); | |
527 | extern void pfault_fini(void); | |
528 | ||
529 | int __devinit start_secondary(void *cpuvoid) | |
530 | { | |
531 | /* Setup the cpu */ | |
532 | cpu_init(); | |
533 | /* init per CPU timer */ | |
534 | init_cpu_timer(); | |
535 | #ifdef CONFIG_VIRT_TIMER | |
536 | init_cpu_vtimer(); | |
537 | #endif | |
538 | #ifdef CONFIG_PFAULT | |
539 | /* Enable pfault pseudo page faults on this cpu. */ | |
540 | pfault_init(); | |
541 | #endif | |
542 | /* Mark this cpu as online */ | |
543 | cpu_set(smp_processor_id(), cpu_online_map); | |
544 | /* Switch on interrupts */ | |
545 | local_irq_enable(); | |
546 | /* Print info about this processor */ | |
547 | print_cpu_info(&S390_lowcore.cpu_data); | |
548 | /* cpu_idle will call schedule for us */ | |
549 | cpu_idle(); | |
550 | return 0; | |
551 | } | |
552 | ||
553 | static void __init smp_create_idle(unsigned int cpu) | |
554 | { | |
555 | struct task_struct *p; | |
556 | ||
557 | /* | |
558 | * don't care about the psw and regs settings since we'll never | |
559 | * reschedule the forked task. | |
560 | */ | |
561 | p = fork_idle(cpu); | |
562 | if (IS_ERR(p)) | |
563 | panic("failed fork for CPU %u: %li", cpu, PTR_ERR(p)); | |
564 | current_set[cpu] = p; | |
565 | } | |
566 | ||
567 | /* Reserving and releasing of CPUs */ | |
568 | ||
569 | static DEFINE_SPINLOCK(smp_reserve_lock); | |
570 | static int smp_cpu_reserved[NR_CPUS]; | |
571 | ||
572 | int | |
573 | smp_get_cpu(cpumask_t cpu_mask) | |
574 | { | |
575 | unsigned long flags; | |
576 | int cpu; | |
577 | ||
578 | spin_lock_irqsave(&smp_reserve_lock, flags); | |
579 | /* Try to find an already reserved cpu. */ | |
580 | for_each_cpu_mask(cpu, cpu_mask) { | |
581 | if (smp_cpu_reserved[cpu] != 0) { | |
582 | smp_cpu_reserved[cpu]++; | |
583 | /* Found one. */ | |
584 | goto out; | |
585 | } | |
586 | } | |
587 | /* Reserve a new cpu from cpu_mask. */ | |
588 | for_each_cpu_mask(cpu, cpu_mask) { | |
589 | if (cpu_online(cpu)) { | |
590 | smp_cpu_reserved[cpu]++; | |
591 | goto out; | |
592 | } | |
593 | } | |
594 | cpu = -ENODEV; | |
595 | out: | |
596 | spin_unlock_irqrestore(&smp_reserve_lock, flags); | |
597 | return cpu; | |
598 | } | |
599 | ||
600 | void | |
601 | smp_put_cpu(int cpu) | |
602 | { | |
603 | unsigned long flags; | |
604 | ||
605 | spin_lock_irqsave(&smp_reserve_lock, flags); | |
606 | smp_cpu_reserved[cpu]--; | |
607 | spin_unlock_irqrestore(&smp_reserve_lock, flags); | |
608 | } | |
609 | ||
610 | static inline int | |
611 | cpu_stopped(int cpu) | |
612 | { | |
613 | __u32 status; | |
614 | ||
615 | /* Check for stopped state */ | |
616 | if (signal_processor_ps(&status, 0, cpu, sigp_sense) == sigp_status_stored) { | |
617 | if (status & 0x40) | |
618 | return 1; | |
619 | } | |
620 | return 0; | |
621 | } | |
622 | ||
623 | /* Upping and downing of CPUs */ | |
624 | ||
625 | int | |
626 | __cpu_up(unsigned int cpu) | |
627 | { | |
628 | struct task_struct *idle; | |
629 | struct _lowcore *cpu_lowcore; | |
630 | struct stack_frame *sf; | |
631 | sigp_ccode ccode; | |
632 | int curr_cpu; | |
633 | ||
634 | for (curr_cpu = 0; curr_cpu <= 65535; curr_cpu++) { | |
635 | __cpu_logical_map[cpu] = (__u16) curr_cpu; | |
636 | if (cpu_stopped(cpu)) | |
637 | break; | |
638 | } | |
639 | ||
640 | if (!cpu_stopped(cpu)) | |
641 | return -ENODEV; | |
642 | ||
643 | ccode = signal_processor_p((__u32)(unsigned long)(lowcore_ptr[cpu]), | |
644 | cpu, sigp_set_prefix); | |
645 | if (ccode){ | |
646 | printk("sigp_set_prefix failed for cpu %d " | |
647 | "with condition code %d\n", | |
648 | (int) cpu, (int) ccode); | |
649 | return -EIO; | |
650 | } | |
651 | ||
652 | idle = current_set[cpu]; | |
653 | cpu_lowcore = lowcore_ptr[cpu]; | |
654 | cpu_lowcore->kernel_stack = (unsigned long) | |
655 | idle->thread_info + (THREAD_SIZE); | |
656 | sf = (struct stack_frame *) (cpu_lowcore->kernel_stack | |
657 | - sizeof(struct pt_regs) | |
658 | - sizeof(struct stack_frame)); | |
659 | memset(sf, 0, sizeof(struct stack_frame)); | |
660 | sf->gprs[9] = (unsigned long) sf; | |
661 | cpu_lowcore->save_area[15] = (unsigned long) sf; | |
662 | __ctl_store(cpu_lowcore->cregs_save_area[0], 0, 15); | |
663 | __asm__ __volatile__("stam 0,15,0(%0)" | |
664 | : : "a" (&cpu_lowcore->access_regs_save_area) | |
665 | : "memory"); | |
666 | cpu_lowcore->percpu_offset = __per_cpu_offset[cpu]; | |
667 | cpu_lowcore->current_task = (unsigned long) idle; | |
668 | cpu_lowcore->cpu_data.cpu_nr = cpu; | |
669 | eieio(); | |
670 | signal_processor(cpu,sigp_restart); | |
671 | ||
672 | while (!cpu_online(cpu)) | |
673 | cpu_relax(); | |
674 | return 0; | |
675 | } | |
676 | ||
677 | int | |
678 | __cpu_disable(void) | |
679 | { | |
680 | unsigned long flags; | |
681 | ec_creg_mask_parms cr_parms; | |
682 | ||
683 | spin_lock_irqsave(&smp_reserve_lock, flags); | |
684 | if (smp_cpu_reserved[smp_processor_id()] != 0) { | |
685 | spin_unlock_irqrestore(&smp_reserve_lock, flags); | |
686 | return -EBUSY; | |
687 | } | |
688 | ||
689 | #ifdef CONFIG_PFAULT | |
690 | /* Disable pfault pseudo page faults on this cpu. */ | |
691 | pfault_fini(); | |
692 | #endif | |
693 | ||
694 | /* disable all external interrupts */ | |
695 | ||
696 | cr_parms.start_ctl = 0; | |
697 | cr_parms.end_ctl = 0; | |
698 | cr_parms.orvals[0] = 0; | |
699 | cr_parms.andvals[0] = ~(1<<15 | 1<<14 | 1<<13 | 1<<12 | | |
700 | 1<<11 | 1<<10 | 1<< 6 | 1<< 4); | |
701 | smp_ctl_bit_callback(&cr_parms); | |
702 | ||
703 | /* disable all I/O interrupts */ | |
704 | ||
705 | cr_parms.start_ctl = 6; | |
706 | cr_parms.end_ctl = 6; | |
707 | cr_parms.orvals[6] = 0; | |
708 | cr_parms.andvals[6] = ~(1<<31 | 1<<30 | 1<<29 | 1<<28 | | |
709 | 1<<27 | 1<<26 | 1<<25 | 1<<24); | |
710 | smp_ctl_bit_callback(&cr_parms); | |
711 | ||
712 | /* disable most machine checks */ | |
713 | ||
714 | cr_parms.start_ctl = 14; | |
715 | cr_parms.end_ctl = 14; | |
716 | cr_parms.orvals[14] = 0; | |
717 | cr_parms.andvals[14] = ~(1<<28 | 1<<27 | 1<<26 | 1<<25 | 1<<24); | |
718 | smp_ctl_bit_callback(&cr_parms); | |
719 | ||
720 | spin_unlock_irqrestore(&smp_reserve_lock, flags); | |
721 | return 0; | |
722 | } | |
723 | ||
724 | void | |
725 | __cpu_die(unsigned int cpu) | |
726 | { | |
727 | /* Wait until target cpu is down */ | |
728 | while (!smp_cpu_not_running(cpu)) | |
729 | cpu_relax(); | |
730 | printk("Processor %d spun down\n", cpu); | |
731 | } | |
732 | ||
733 | void | |
734 | cpu_die(void) | |
735 | { | |
736 | idle_task_exit(); | |
737 | signal_processor(smp_processor_id(), sigp_stop); | |
738 | BUG(); | |
739 | for(;;); | |
740 | } | |
741 | ||
742 | /* | |
743 | * Cycle through the processors and setup structures. | |
744 | */ | |
745 | ||
746 | void __init smp_prepare_cpus(unsigned int max_cpus) | |
747 | { | |
748 | unsigned long stack; | |
749 | unsigned int cpu; | |
750 | int i; | |
751 | ||
752 | /* request the 0x1202 external interrupt */ | |
753 | if (register_external_interrupt(0x1202, do_ext_call_interrupt) != 0) | |
754 | panic("Couldn't request external interrupt 0x1202"); | |
755 | smp_check_cpus(max_cpus); | |
756 | memset(lowcore_ptr,0,sizeof(lowcore_ptr)); | |
757 | /* | |
758 | * Initialize prefix pages and stacks for all possible cpus | |
759 | */ | |
760 | print_cpu_info(&S390_lowcore.cpu_data); | |
761 | ||
762 | for(i = 0; i < NR_CPUS; i++) { | |
763 | if (!cpu_possible(i)) | |
764 | continue; | |
765 | lowcore_ptr[i] = (struct _lowcore *) | |
766 | __get_free_pages(GFP_KERNEL|GFP_DMA, | |
767 | sizeof(void*) == 8 ? 1 : 0); | |
768 | stack = __get_free_pages(GFP_KERNEL,ASYNC_ORDER); | |
769 | if (lowcore_ptr[i] == NULL || stack == 0ULL) | |
770 | panic("smp_boot_cpus failed to allocate memory\n"); | |
771 | ||
772 | *(lowcore_ptr[i]) = S390_lowcore; | |
773 | lowcore_ptr[i]->async_stack = stack + (ASYNC_SIZE); | |
774 | #ifdef CONFIG_CHECK_STACK | |
775 | stack = __get_free_pages(GFP_KERNEL,0); | |
776 | if (stack == 0ULL) | |
777 | panic("smp_boot_cpus failed to allocate memory\n"); | |
778 | lowcore_ptr[i]->panic_stack = stack + (PAGE_SIZE); | |
779 | #endif | |
780 | } | |
781 | set_prefix((u32)(unsigned long) lowcore_ptr[smp_processor_id()]); | |
782 | ||
783 | for_each_cpu(cpu) | |
784 | if (cpu != smp_processor_id()) | |
785 | smp_create_idle(cpu); | |
786 | } | |
787 | ||
788 | void __devinit smp_prepare_boot_cpu(void) | |
789 | { | |
790 | BUG_ON(smp_processor_id() != 0); | |
791 | ||
792 | cpu_set(0, cpu_online_map); | |
793 | cpu_set(0, cpu_present_map); | |
794 | cpu_set(0, cpu_possible_map); | |
795 | S390_lowcore.percpu_offset = __per_cpu_offset[0]; | |
796 | current_set[0] = current; | |
797 | } | |
798 | ||
799 | void smp_cpus_done(unsigned int max_cpus) | |
800 | { | |
801 | cpu_present_map = cpu_possible_map; | |
802 | } | |
803 | ||
804 | /* | |
805 | * the frequency of the profiling timer can be changed | |
806 | * by writing a multiplier value into /proc/profile. | |
807 | * | |
808 | * usually you want to run this on all CPUs ;) | |
809 | */ | |
810 | int setup_profiling_timer(unsigned int multiplier) | |
811 | { | |
812 | return 0; | |
813 | } | |
814 | ||
815 | static DEFINE_PER_CPU(struct cpu, cpu_devices); | |
816 | ||
817 | static int __init topology_init(void) | |
818 | { | |
819 | int cpu; | |
820 | int ret; | |
821 | ||
822 | for_each_cpu(cpu) { | |
823 | ret = register_cpu(&per_cpu(cpu_devices, cpu), cpu, NULL); | |
824 | if (ret) | |
825 | printk(KERN_WARNING "topology_init: register_cpu %d " | |
826 | "failed (%d)\n", cpu, ret); | |
827 | } | |
828 | return 0; | |
829 | } | |
830 | ||
831 | subsys_initcall(topology_init); | |
832 | ||
833 | EXPORT_SYMBOL(cpu_possible_map); | |
834 | EXPORT_SYMBOL(lowcore_ptr); | |
835 | EXPORT_SYMBOL(smp_ctl_set_bit); | |
836 | EXPORT_SYMBOL(smp_ctl_clear_bit); | |
837 | EXPORT_SYMBOL(smp_call_function); | |
838 | EXPORT_SYMBOL(smp_get_cpu); | |
839 | EXPORT_SYMBOL(smp_put_cpu); | |
840 |