]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * arch/sh/kernel/smp.c | |
3 | * | |
4 | * SMP support for the SuperH processors. | |
5 | * | |
3366e358 | 6 | * Copyright (C) 2002 - 2010 Paul Mundt |
aba1030a | 7 | * Copyright (C) 2006 - 2007 Akio Idehara |
1da177e4 | 8 | * |
aba1030a PM |
9 | * This file is subject to the terms and conditions of the GNU General Public |
10 | * License. See the file "COPYING" in the main directory of this archive | |
11 | * for more details. | |
1da177e4 | 12 | */ |
66c5227e | 13 | #include <linux/err.h> |
1da177e4 LT |
14 | #include <linux/cache.h> |
15 | #include <linux/cpumask.h> | |
16 | #include <linux/delay.h> | |
17 | #include <linux/init.h> | |
1da177e4 | 18 | #include <linux/spinlock.h> |
aba1030a | 19 | #include <linux/mm.h> |
1da177e4 | 20 | #include <linux/module.h> |
b56050ae | 21 | #include <linux/cpu.h> |
aba1030a | 22 | #include <linux/interrupt.h> |
184748cc | 23 | #include <linux/sched.h> |
60063497 | 24 | #include <linux/atomic.h> |
1da177e4 LT |
25 | #include <asm/processor.h> |
26 | #include <asm/system.h> | |
27 | #include <asm/mmu_context.h> | |
28 | #include <asm/smp.h> | |
aba1030a PM |
29 | #include <asm/cacheflush.h> |
30 | #include <asm/sections.h> | |
1da177e4 | 31 | |
aba1030a PM |
32 | int __cpu_number_map[NR_CPUS]; /* Map physical to logical */ |
33 | int __cpu_logical_map[NR_CPUS]; /* Map logical to physical */ | |
1da177e4 | 34 | |
3366e358 PM |
35 | struct plat_smp_ops *mp_ops = NULL; |
36 | ||
9715b8c7 PM |
37 | /* State of each CPU */ |
38 | DEFINE_PER_CPU(int, cpu_state) = { 0 }; | |
39 | ||
3366e358 PM |
40 | void __cpuinit register_smp_ops(struct plat_smp_ops *ops) |
41 | { | |
42 | if (mp_ops) | |
43 | printk(KERN_WARNING "Overriding previously set SMP ops\n"); | |
44 | ||
45 | mp_ops = ops; | |
46 | } | |
47 | ||
1cfa1e8f | 48 | static inline void __cpuinit smp_store_cpu_info(unsigned int cpu) |
1da177e4 | 49 | { |
aba1030a PM |
50 | struct sh_cpuinfo *c = cpu_data + cpu; |
51 | ||
a66c2ede PM |
52 | memcpy(c, &boot_cpu_data, sizeof(struct sh_cpuinfo)); |
53 | ||
aba1030a | 54 | c->loops_per_jiffy = loops_per_jiffy; |
1da177e4 LT |
55 | } |
56 | ||
57 | void __init smp_prepare_cpus(unsigned int max_cpus) | |
58 | { | |
59 | unsigned int cpu = smp_processor_id(); | |
1da177e4 | 60 | |
aba1030a PM |
61 | init_new_context(current, &init_mm); |
62 | current_thread_info()->cpu = cpu; | |
3366e358 | 63 | mp_ops->prepare_cpus(max_cpus); |
aba1030a PM |
64 | |
65 | #ifndef CONFIG_HOTPLUG_CPU | |
e09377ba | 66 | init_cpu_present(&cpu_possible_map); |
aba1030a | 67 | #endif |
1da177e4 LT |
68 | } |
69 | ||
1cfa1e8f | 70 | void __init smp_prepare_boot_cpu(void) |
1da177e4 LT |
71 | { |
72 | unsigned int cpu = smp_processor_id(); | |
73 | ||
aba1030a PM |
74 | __cpu_number_map[0] = cpu; |
75 | __cpu_logical_map[0] = cpu; | |
76 | ||
e09377ba RR |
77 | set_cpu_online(cpu, true); |
78 | set_cpu_possible(cpu, true); | |
9715b8c7 PM |
79 | |
80 | per_cpu(cpu_state, cpu) = CPU_ONLINE; | |
1da177e4 LT |
81 | } |
82 | ||
763142d1 PM |
83 | #ifdef CONFIG_HOTPLUG_CPU |
84 | void native_cpu_die(unsigned int cpu) | |
85 | { | |
86 | unsigned int i; | |
87 | ||
88 | for (i = 0; i < 10; i++) { | |
89 | smp_rmb(); | |
90 | if (per_cpu(cpu_state, cpu) == CPU_DEAD) { | |
91 | if (system_state == SYSTEM_RUNNING) | |
92 | pr_info("CPU %u is now offline\n", cpu); | |
93 | ||
94 | return; | |
95 | } | |
96 | ||
97 | msleep(100); | |
98 | } | |
99 | ||
100 | pr_err("CPU %u didn't die...\n", cpu); | |
101 | } | |
102 | ||
103 | int native_cpu_disable(unsigned int cpu) | |
104 | { | |
105 | return cpu == 0 ? -EPERM : 0; | |
106 | } | |
107 | ||
108 | void play_dead_common(void) | |
109 | { | |
110 | idle_task_exit(); | |
111 | irq_ctx_exit(raw_smp_processor_id()); | |
112 | mb(); | |
113 | ||
114 | __get_cpu_var(cpu_state) = CPU_DEAD; | |
115 | local_irq_disable(); | |
116 | } | |
117 | ||
118 | void native_play_dead(void) | |
119 | { | |
120 | play_dead_common(); | |
121 | } | |
122 | ||
123 | int __cpu_disable(void) | |
124 | { | |
125 | unsigned int cpu = smp_processor_id(); | |
126 | struct task_struct *p; | |
127 | int ret; | |
128 | ||
129 | ret = mp_ops->cpu_disable(cpu); | |
130 | if (ret) | |
131 | return ret; | |
132 | ||
133 | /* | |
134 | * Take this CPU offline. Once we clear this, we can't return, | |
135 | * and we must not schedule until we're ready to give up the cpu. | |
136 | */ | |
137 | set_cpu_online(cpu, false); | |
138 | ||
139 | /* | |
140 | * OK - migrate IRQs away from this CPU | |
141 | */ | |
142 | migrate_irqs(); | |
143 | ||
144 | /* | |
145 | * Stop the local timer for this CPU. | |
146 | */ | |
147 | local_timer_stop(cpu); | |
148 | ||
149 | /* | |
150 | * Flush user cache and TLB mappings, and then remove this CPU | |
151 | * from the vm mask set of all processes. | |
152 | */ | |
153 | flush_cache_all(); | |
154 | local_flush_tlb_all(); | |
155 | ||
156 | read_lock(&tasklist_lock); | |
157 | for_each_process(p) | |
158 | if (p->mm) | |
159 | cpumask_clear_cpu(cpu, mm_cpumask(p->mm)); | |
160 | read_unlock(&tasklist_lock); | |
161 | ||
162 | return 0; | |
163 | } | |
164 | #else /* ... !CONFIG_HOTPLUG_CPU */ | |
1483feac | 165 | int native_cpu_disable(unsigned int cpu) |
763142d1 PM |
166 | { |
167 | return -ENOSYS; | |
168 | } | |
169 | ||
170 | void native_cpu_die(unsigned int cpu) | |
171 | { | |
172 | /* We said "no" in __cpu_disable */ | |
173 | BUG(); | |
174 | } | |
175 | ||
176 | void native_play_dead(void) | |
177 | { | |
178 | BUG(); | |
179 | } | |
180 | #endif | |
181 | ||
aba1030a | 182 | asmlinkage void __cpuinit start_secondary(void) |
1da177e4 | 183 | { |
9715b8c7 | 184 | unsigned int cpu = smp_processor_id(); |
aba1030a | 185 | struct mm_struct *mm = &init_mm; |
1da177e4 | 186 | |
4bea3418 | 187 | enable_mmu(); |
aba1030a PM |
188 | atomic_inc(&mm->mm_count); |
189 | atomic_inc(&mm->mm_users); | |
190 | current->active_mm = mm; | |
aba1030a | 191 | enter_lazy_tlb(mm, current); |
763142d1 | 192 | local_flush_tlb_all(); |
aba1030a PM |
193 | |
194 | per_cpu_trap_init(); | |
195 | ||
196 | preempt_disable(); | |
197 | ||
9715b8c7 | 198 | notify_cpu_starting(cpu); |
e545a614 | 199 | |
aba1030a | 200 | local_irq_enable(); |
1da177e4 | 201 | |
8c24594d PM |
202 | /* Enable local timers */ |
203 | local_timer_setup(cpu); | |
aba1030a PM |
204 | calibrate_delay(); |
205 | ||
aba1030a | 206 | smp_store_cpu_info(cpu); |
1da177e4 | 207 | |
f0ccf277 | 208 | set_cpu_online(cpu, true); |
9715b8c7 | 209 | per_cpu(cpu_state, cpu) = CPU_ONLINE; |
1da177e4 | 210 | |
aba1030a | 211 | cpu_idle(); |
1da177e4 LT |
212 | } |
213 | ||
aba1030a PM |
214 | extern struct { |
215 | unsigned long sp; | |
216 | unsigned long bss_start; | |
217 | unsigned long bss_end; | |
218 | void *start_kernel_fn; | |
219 | void *cpu_init_fn; | |
220 | void *thread_info; | |
221 | } stack_start; | |
222 | ||
223 | int __cpuinit __cpu_up(unsigned int cpu) | |
1da177e4 | 224 | { |
aba1030a PM |
225 | struct task_struct *tsk; |
226 | unsigned long timeout; | |
5bfb5d69 | 227 | |
8db2bc45 PM |
228 | tsk = cpu_data[cpu].idle; |
229 | if (!tsk) { | |
230 | tsk = fork_idle(cpu); | |
231 | if (IS_ERR(tsk)) { | |
232 | pr_err("Failed forking idle task for cpu %d\n", cpu); | |
233 | return PTR_ERR(tsk); | |
234 | } | |
235 | ||
236 | cpu_data[cpu].idle = tsk; | |
aba1030a | 237 | } |
1da177e4 | 238 | |
9715b8c7 PM |
239 | per_cpu(cpu_state, cpu) = CPU_UP_PREPARE; |
240 | ||
aba1030a PM |
241 | /* Fill in data in head.S for secondary cpus */ |
242 | stack_start.sp = tsk->thread.sp; | |
243 | stack_start.thread_info = tsk->stack; | |
244 | stack_start.bss_start = 0; /* don't clear bss for secondary cpus */ | |
245 | stack_start.start_kernel_fn = start_secondary; | |
1da177e4 | 246 | |
d780613a PM |
247 | flush_icache_range((unsigned long)&stack_start, |
248 | (unsigned long)&stack_start + sizeof(stack_start)); | |
249 | wmb(); | |
1da177e4 | 250 | |
3366e358 | 251 | mp_ops->start_cpu(cpu, (unsigned long)_stext); |
1da177e4 | 252 | |
aba1030a PM |
253 | timeout = jiffies + HZ; |
254 | while (time_before(jiffies, timeout)) { | |
255 | if (cpu_online(cpu)) | |
256 | break; | |
257 | ||
258 | udelay(10); | |
763142d1 | 259 | barrier(); |
aba1030a PM |
260 | } |
261 | ||
262 | if (cpu_online(cpu)) | |
263 | return 0; | |
264 | ||
265 | return -ENOENT; | |
1da177e4 LT |
266 | } |
267 | ||
268 | void __init smp_cpus_done(unsigned int max_cpus) | |
269 | { | |
aba1030a PM |
270 | unsigned long bogosum = 0; |
271 | int cpu; | |
272 | ||
273 | for_each_online_cpu(cpu) | |
274 | bogosum += cpu_data[cpu].loops_per_jiffy; | |
275 | ||
276 | printk(KERN_INFO "SMP: Total of %d processors activated " | |
277 | "(%lu.%02lu BogoMIPS).\n", num_online_cpus(), | |
278 | bogosum / (500000/HZ), | |
279 | (bogosum / (5000/HZ)) % 100); | |
1da177e4 LT |
280 | } |
281 | ||
282 | void smp_send_reschedule(int cpu) | |
283 | { | |
3366e358 | 284 | mp_ops->send_ipi(cpu, SMP_MSG_RESCHEDULE); |
1da177e4 LT |
285 | } |
286 | ||
1da177e4 LT |
287 | void smp_send_stop(void) |
288 | { | |
8691e5a8 | 289 | smp_call_function(stop_this_cpu, 0, 0); |
1da177e4 LT |
290 | } |
291 | ||
819807df | 292 | void arch_send_call_function_ipi_mask(const struct cpumask *mask) |
1da177e4 | 293 | { |
490f5de5 | 294 | int cpu; |
1da177e4 | 295 | |
819807df | 296 | for_each_cpu(cpu, mask) |
3366e358 | 297 | mp_ops->send_ipi(cpu, SMP_MSG_FUNCTION); |
490f5de5 | 298 | } |
1da177e4 | 299 | |
490f5de5 JA |
300 | void arch_send_call_function_single_ipi(int cpu) |
301 | { | |
3366e358 | 302 | mp_ops->send_ipi(cpu, SMP_MSG_FUNCTION_SINGLE); |
1da177e4 LT |
303 | } |
304 | ||
320ab2b0 | 305 | void smp_timer_broadcast(const struct cpumask *mask) |
6f52707e PM |
306 | { |
307 | int cpu; | |
308 | ||
320ab2b0 | 309 | for_each_cpu(cpu, mask) |
3366e358 | 310 | mp_ops->send_ipi(cpu, SMP_MSG_TIMER); |
6f52707e PM |
311 | } |
312 | ||
313 | static void ipi_timer(void) | |
314 | { | |
315 | irq_enter(); | |
8c24594d | 316 | local_timer_interrupt(); |
6f52707e PM |
317 | irq_exit(); |
318 | } | |
319 | ||
173a44dd PM |
320 | void smp_message_recv(unsigned int msg) |
321 | { | |
322 | switch (msg) { | |
323 | case SMP_MSG_FUNCTION: | |
324 | generic_smp_call_function_interrupt(); | |
325 | break; | |
326 | case SMP_MSG_RESCHEDULE: | |
184748cc | 327 | scheduler_ipi(); |
173a44dd PM |
328 | break; |
329 | case SMP_MSG_FUNCTION_SINGLE: | |
330 | generic_smp_call_function_single_interrupt(); | |
331 | break; | |
6f52707e PM |
332 | case SMP_MSG_TIMER: |
333 | ipi_timer(); | |
334 | break; | |
173a44dd PM |
335 | default: |
336 | printk(KERN_WARNING "SMP %d: %s(): unknown IPI %d\n", | |
337 | smp_processor_id(), __func__, msg); | |
338 | break; | |
339 | } | |
340 | } | |
341 | ||
1da177e4 LT |
342 | /* Not really SMP stuff ... */ |
343 | int setup_profiling_timer(unsigned int multiplier) | |
344 | { | |
345 | return 0; | |
346 | } | |
347 | ||
9964fa8b PM |
348 | static void flush_tlb_all_ipi(void *info) |
349 | { | |
350 | local_flush_tlb_all(); | |
351 | } | |
352 | ||
353 | void flush_tlb_all(void) | |
354 | { | |
15c8b6c1 | 355 | on_each_cpu(flush_tlb_all_ipi, 0, 1); |
9964fa8b PM |
356 | } |
357 | ||
358 | static void flush_tlb_mm_ipi(void *mm) | |
359 | { | |
360 | local_flush_tlb_mm((struct mm_struct *)mm); | |
361 | } | |
362 | ||
363 | /* | |
364 | * The following tlb flush calls are invoked when old translations are | |
365 | * being torn down, or pte attributes are changing. For single threaded | |
366 | * address spaces, a new context is obtained on the current cpu, and tlb | |
367 | * context on other cpus are invalidated to force a new context allocation | |
368 | * at switch_mm time, should the mm ever be used on other cpus. For | |
369 | * multithreaded address spaces, intercpu interrupts have to be sent. | |
370 | * Another case where intercpu interrupts are required is when the target | |
371 | * mm might be active on another cpu (eg debuggers doing the flushes on | |
372 | * behalf of debugees, kswapd stealing pages from another process etc). | |
373 | * Kanoj 07/00. | |
374 | */ | |
9964fa8b PM |
375 | void flush_tlb_mm(struct mm_struct *mm) |
376 | { | |
377 | preempt_disable(); | |
378 | ||
379 | if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) { | |
8691e5a8 | 380 | smp_call_function(flush_tlb_mm_ipi, (void *)mm, 1); |
9964fa8b PM |
381 | } else { |
382 | int i; | |
383 | for (i = 0; i < num_online_cpus(); i++) | |
384 | if (smp_processor_id() != i) | |
385 | cpu_context(i, mm) = 0; | |
386 | } | |
387 | local_flush_tlb_mm(mm); | |
388 | ||
389 | preempt_enable(); | |
390 | } | |
391 | ||
392 | struct flush_tlb_data { | |
393 | struct vm_area_struct *vma; | |
394 | unsigned long addr1; | |
395 | unsigned long addr2; | |
396 | }; | |
397 | ||
398 | static void flush_tlb_range_ipi(void *info) | |
399 | { | |
400 | struct flush_tlb_data *fd = (struct flush_tlb_data *)info; | |
401 | ||
402 | local_flush_tlb_range(fd->vma, fd->addr1, fd->addr2); | |
403 | } | |
404 | ||
405 | void flush_tlb_range(struct vm_area_struct *vma, | |
406 | unsigned long start, unsigned long end) | |
407 | { | |
408 | struct mm_struct *mm = vma->vm_mm; | |
409 | ||
410 | preempt_disable(); | |
411 | if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) { | |
412 | struct flush_tlb_data fd; | |
413 | ||
414 | fd.vma = vma; | |
415 | fd.addr1 = start; | |
416 | fd.addr2 = end; | |
8691e5a8 | 417 | smp_call_function(flush_tlb_range_ipi, (void *)&fd, 1); |
9964fa8b PM |
418 | } else { |
419 | int i; | |
420 | for (i = 0; i < num_online_cpus(); i++) | |
421 | if (smp_processor_id() != i) | |
422 | cpu_context(i, mm) = 0; | |
423 | } | |
424 | local_flush_tlb_range(vma, start, end); | |
425 | preempt_enable(); | |
426 | } | |
427 | ||
428 | static void flush_tlb_kernel_range_ipi(void *info) | |
429 | { | |
430 | struct flush_tlb_data *fd = (struct flush_tlb_data *)info; | |
431 | ||
432 | local_flush_tlb_kernel_range(fd->addr1, fd->addr2); | |
433 | } | |
434 | ||
435 | void flush_tlb_kernel_range(unsigned long start, unsigned long end) | |
436 | { | |
437 | struct flush_tlb_data fd; | |
438 | ||
439 | fd.addr1 = start; | |
440 | fd.addr2 = end; | |
15c8b6c1 | 441 | on_each_cpu(flush_tlb_kernel_range_ipi, (void *)&fd, 1); |
9964fa8b PM |
442 | } |
443 | ||
444 | static void flush_tlb_page_ipi(void *info) | |
445 | { | |
446 | struct flush_tlb_data *fd = (struct flush_tlb_data *)info; | |
447 | ||
448 | local_flush_tlb_page(fd->vma, fd->addr1); | |
449 | } | |
450 | ||
451 | void flush_tlb_page(struct vm_area_struct *vma, unsigned long page) | |
452 | { | |
453 | preempt_disable(); | |
454 | if ((atomic_read(&vma->vm_mm->mm_users) != 1) || | |
455 | (current->mm != vma->vm_mm)) { | |
456 | struct flush_tlb_data fd; | |
457 | ||
458 | fd.vma = vma; | |
459 | fd.addr1 = page; | |
8691e5a8 | 460 | smp_call_function(flush_tlb_page_ipi, (void *)&fd, 1); |
9964fa8b PM |
461 | } else { |
462 | int i; | |
463 | for (i = 0; i < num_online_cpus(); i++) | |
464 | if (smp_processor_id() != i) | |
465 | cpu_context(i, vma->vm_mm) = 0; | |
466 | } | |
467 | local_flush_tlb_page(vma, page); | |
468 | preempt_enable(); | |
469 | } | |
470 | ||
471 | static void flush_tlb_one_ipi(void *info) | |
472 | { | |
473 | struct flush_tlb_data *fd = (struct flush_tlb_data *)info; | |
474 | local_flush_tlb_one(fd->addr1, fd->addr2); | |
475 | } | |
476 | ||
477 | void flush_tlb_one(unsigned long asid, unsigned long vaddr) | |
478 | { | |
479 | struct flush_tlb_data fd; | |
480 | ||
481 | fd.addr1 = asid; | |
482 | fd.addr2 = vaddr; | |
483 | ||
8691e5a8 | 484 | smp_call_function(flush_tlb_one_ipi, (void *)&fd, 1); |
9964fa8b PM |
485 | local_flush_tlb_one(asid, vaddr); |
486 | } |