]> git.ipfire.org Git - thirdparty/linux.git/blob - arch/powerpc/kernel/rtas.c
Merge branch 'smp-hotplug-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[thirdparty/linux.git] / arch / powerpc / kernel / rtas.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 *
4 * Procedures for interfacing to the RTAS on CHRP machines.
5 *
6 * Peter Bergner, IBM March 2001.
7 * Copyright (C) 2001 IBM.
8 */
9
10 #include <stdarg.h>
11 #include <linux/kernel.h>
12 #include <linux/types.h>
13 #include <linux/spinlock.h>
14 #include <linux/export.h>
15 #include <linux/init.h>
16 #include <linux/capability.h>
17 #include <linux/delay.h>
18 #include <linux/cpu.h>
19 #include <linux/smp.h>
20 #include <linux/completion.h>
21 #include <linux/cpumask.h>
22 #include <linux/memblock.h>
23 #include <linux/slab.h>
24 #include <linux/reboot.h>
25 #include <linux/syscalls.h>
26
27 #include <asm/prom.h>
28 #include <asm/rtas.h>
29 #include <asm/hvcall.h>
30 #include <asm/machdep.h>
31 #include <asm/firmware.h>
32 #include <asm/page.h>
33 #include <asm/param.h>
34 #include <asm/delay.h>
35 #include <linux/uaccess.h>
36 #include <asm/udbg.h>
37 #include <asm/syscalls.h>
38 #include <asm/smp.h>
39 #include <linux/atomic.h>
40 #include <asm/time.h>
41 #include <asm/mmu.h>
42 #include <asm/topology.h>
43
44 /* This is here deliberately so it's only used in this file */
45 void enter_rtas(unsigned long);
46
47 struct rtas_t rtas = {
48 .lock = __ARCH_SPIN_LOCK_UNLOCKED
49 };
50 EXPORT_SYMBOL(rtas);
51
52 DEFINE_SPINLOCK(rtas_data_buf_lock);
53 EXPORT_SYMBOL(rtas_data_buf_lock);
54
55 char rtas_data_buf[RTAS_DATA_BUF_SIZE] __cacheline_aligned;
56 EXPORT_SYMBOL(rtas_data_buf);
57
58 unsigned long rtas_rmo_buf;
59
60 /*
61 * If non-NULL, this gets called when the kernel terminates.
62 * This is done like this so rtas_flash can be a module.
63 */
64 void (*rtas_flash_term_hook)(int);
65 EXPORT_SYMBOL(rtas_flash_term_hook);
66
67 /* RTAS use home made raw locking instead of spin_lock_irqsave
68 * because those can be called from within really nasty contexts
69 * such as having the timebase stopped which would lockup with
70 * normal locks and spinlock debugging enabled
71 */
72 static unsigned long lock_rtas(void)
73 {
74 unsigned long flags;
75
76 local_irq_save(flags);
77 preempt_disable();
78 arch_spin_lock(&rtas.lock);
79 return flags;
80 }
81
82 static void unlock_rtas(unsigned long flags)
83 {
84 arch_spin_unlock(&rtas.lock);
85 local_irq_restore(flags);
86 preempt_enable();
87 }
88
89 /*
90 * call_rtas_display_status and call_rtas_display_status_delay
91 * are designed only for very early low-level debugging, which
92 * is why the token is hard-coded to 10.
93 */
94 static void call_rtas_display_status(unsigned char c)
95 {
96 unsigned long s;
97
98 if (!rtas.base)
99 return;
100
101 s = lock_rtas();
102 rtas_call_unlocked(&rtas.args, 10, 1, 1, NULL, c);
103 unlock_rtas(s);
104 }
105
106 static void call_rtas_display_status_delay(char c)
107 {
108 static int pending_newline = 0; /* did last write end with unprinted newline? */
109 static int width = 16;
110
111 if (c == '\n') {
112 while (width-- > 0)
113 call_rtas_display_status(' ');
114 width = 16;
115 mdelay(500);
116 pending_newline = 1;
117 } else {
118 if (pending_newline) {
119 call_rtas_display_status('\r');
120 call_rtas_display_status('\n');
121 }
122 pending_newline = 0;
123 if (width--) {
124 call_rtas_display_status(c);
125 udelay(10000);
126 }
127 }
128 }
129
130 void __init udbg_init_rtas_panel(void)
131 {
132 udbg_putc = call_rtas_display_status_delay;
133 }
134
135 #ifdef CONFIG_UDBG_RTAS_CONSOLE
136
137 /* If you think you're dying before early_init_dt_scan_rtas() does its
138 * work, you can hard code the token values for your firmware here and
139 * hardcode rtas.base/entry etc.
140 */
141 static unsigned int rtas_putchar_token = RTAS_UNKNOWN_SERVICE;
142 static unsigned int rtas_getchar_token = RTAS_UNKNOWN_SERVICE;
143
144 static void udbg_rtascon_putc(char c)
145 {
146 int tries;
147
148 if (!rtas.base)
149 return;
150
151 /* Add CRs before LFs */
152 if (c == '\n')
153 udbg_rtascon_putc('\r');
154
155 /* if there is more than one character to be displayed, wait a bit */
156 for (tries = 0; tries < 16; tries++) {
157 if (rtas_call(rtas_putchar_token, 1, 1, NULL, c) == 0)
158 break;
159 udelay(1000);
160 }
161 }
162
163 static int udbg_rtascon_getc_poll(void)
164 {
165 int c;
166
167 if (!rtas.base)
168 return -1;
169
170 if (rtas_call(rtas_getchar_token, 0, 2, &c))
171 return -1;
172
173 return c;
174 }
175
176 static int udbg_rtascon_getc(void)
177 {
178 int c;
179
180 while ((c = udbg_rtascon_getc_poll()) == -1)
181 ;
182
183 return c;
184 }
185
186
187 void __init udbg_init_rtas_console(void)
188 {
189 udbg_putc = udbg_rtascon_putc;
190 udbg_getc = udbg_rtascon_getc;
191 udbg_getc_poll = udbg_rtascon_getc_poll;
192 }
193 #endif /* CONFIG_UDBG_RTAS_CONSOLE */
194
195 void rtas_progress(char *s, unsigned short hex)
196 {
197 struct device_node *root;
198 int width;
199 const __be32 *p;
200 char *os;
201 static int display_character, set_indicator;
202 static int display_width, display_lines, form_feed;
203 static const int *row_width;
204 static DEFINE_SPINLOCK(progress_lock);
205 static int current_line;
206 static int pending_newline = 0; /* did last write end with unprinted newline? */
207
208 if (!rtas.base)
209 return;
210
211 if (display_width == 0) {
212 display_width = 0x10;
213 if ((root = of_find_node_by_path("/rtas"))) {
214 if ((p = of_get_property(root,
215 "ibm,display-line-length", NULL)))
216 display_width = be32_to_cpu(*p);
217 if ((p = of_get_property(root,
218 "ibm,form-feed", NULL)))
219 form_feed = be32_to_cpu(*p);
220 if ((p = of_get_property(root,
221 "ibm,display-number-of-lines", NULL)))
222 display_lines = be32_to_cpu(*p);
223 row_width = of_get_property(root,
224 "ibm,display-truncation-length", NULL);
225 of_node_put(root);
226 }
227 display_character = rtas_token("display-character");
228 set_indicator = rtas_token("set-indicator");
229 }
230
231 if (display_character == RTAS_UNKNOWN_SERVICE) {
232 /* use hex display if available */
233 if (set_indicator != RTAS_UNKNOWN_SERVICE)
234 rtas_call(set_indicator, 3, 1, NULL, 6, 0, hex);
235 return;
236 }
237
238 spin_lock(&progress_lock);
239
240 /*
241 * Last write ended with newline, but we didn't print it since
242 * it would just clear the bottom line of output. Print it now
243 * instead.
244 *
245 * If no newline is pending and form feed is supported, clear the
246 * display with a form feed; otherwise, print a CR to start output
247 * at the beginning of the line.
248 */
249 if (pending_newline) {
250 rtas_call(display_character, 1, 1, NULL, '\r');
251 rtas_call(display_character, 1, 1, NULL, '\n');
252 pending_newline = 0;
253 } else {
254 current_line = 0;
255 if (form_feed)
256 rtas_call(display_character, 1, 1, NULL,
257 (char)form_feed);
258 else
259 rtas_call(display_character, 1, 1, NULL, '\r');
260 }
261
262 if (row_width)
263 width = row_width[current_line];
264 else
265 width = display_width;
266 os = s;
267 while (*os) {
268 if (*os == '\n' || *os == '\r') {
269 /* If newline is the last character, save it
270 * until next call to avoid bumping up the
271 * display output.
272 */
273 if (*os == '\n' && !os[1]) {
274 pending_newline = 1;
275 current_line++;
276 if (current_line > display_lines-1)
277 current_line = display_lines-1;
278 spin_unlock(&progress_lock);
279 return;
280 }
281
282 /* RTAS wants CR-LF, not just LF */
283
284 if (*os == '\n') {
285 rtas_call(display_character, 1, 1, NULL, '\r');
286 rtas_call(display_character, 1, 1, NULL, '\n');
287 } else {
288 /* CR might be used to re-draw a line, so we'll
289 * leave it alone and not add LF.
290 */
291 rtas_call(display_character, 1, 1, NULL, *os);
292 }
293
294 if (row_width)
295 width = row_width[current_line];
296 else
297 width = display_width;
298 } else {
299 width--;
300 rtas_call(display_character, 1, 1, NULL, *os);
301 }
302
303 os++;
304
305 /* if we overwrite the screen length */
306 if (width <= 0)
307 while ((*os != 0) && (*os != '\n') && (*os != '\r'))
308 os++;
309 }
310
311 spin_unlock(&progress_lock);
312 }
313 EXPORT_SYMBOL(rtas_progress); /* needed by rtas_flash module */
314
315 int rtas_token(const char *service)
316 {
317 const __be32 *tokp;
318 if (rtas.dev == NULL)
319 return RTAS_UNKNOWN_SERVICE;
320 tokp = of_get_property(rtas.dev, service, NULL);
321 return tokp ? be32_to_cpu(*tokp) : RTAS_UNKNOWN_SERVICE;
322 }
323 EXPORT_SYMBOL(rtas_token);
324
325 int rtas_service_present(const char *service)
326 {
327 return rtas_token(service) != RTAS_UNKNOWN_SERVICE;
328 }
329 EXPORT_SYMBOL(rtas_service_present);
330
331 #ifdef CONFIG_RTAS_ERROR_LOGGING
332 /*
333 * Return the firmware-specified size of the error log buffer
334 * for all rtas calls that require an error buffer argument.
335 * This includes 'check-exception' and 'rtas-last-error'.
336 */
337 int rtas_get_error_log_max(void)
338 {
339 static int rtas_error_log_max;
340 if (rtas_error_log_max)
341 return rtas_error_log_max;
342
343 rtas_error_log_max = rtas_token ("rtas-error-log-max");
344 if ((rtas_error_log_max == RTAS_UNKNOWN_SERVICE) ||
345 (rtas_error_log_max > RTAS_ERROR_LOG_MAX)) {
346 printk (KERN_WARNING "RTAS: bad log buffer size %d\n",
347 rtas_error_log_max);
348 rtas_error_log_max = RTAS_ERROR_LOG_MAX;
349 }
350 return rtas_error_log_max;
351 }
352 EXPORT_SYMBOL(rtas_get_error_log_max);
353
354
355 static char rtas_err_buf[RTAS_ERROR_LOG_MAX];
356 static int rtas_last_error_token;
357
358 /** Return a copy of the detailed error text associated with the
359 * most recent failed call to rtas. Because the error text
360 * might go stale if there are any other intervening rtas calls,
361 * this routine must be called atomically with whatever produced
362 * the error (i.e. with rtas.lock still held from the previous call).
363 */
364 static char *__fetch_rtas_last_error(char *altbuf)
365 {
366 struct rtas_args err_args, save_args;
367 u32 bufsz;
368 char *buf = NULL;
369
370 if (rtas_last_error_token == -1)
371 return NULL;
372
373 bufsz = rtas_get_error_log_max();
374
375 err_args.token = cpu_to_be32(rtas_last_error_token);
376 err_args.nargs = cpu_to_be32(2);
377 err_args.nret = cpu_to_be32(1);
378 err_args.args[0] = cpu_to_be32(__pa(rtas_err_buf));
379 err_args.args[1] = cpu_to_be32(bufsz);
380 err_args.args[2] = 0;
381
382 save_args = rtas.args;
383 rtas.args = err_args;
384
385 enter_rtas(__pa(&rtas.args));
386
387 err_args = rtas.args;
388 rtas.args = save_args;
389
390 /* Log the error in the unlikely case that there was one. */
391 if (unlikely(err_args.args[2] == 0)) {
392 if (altbuf) {
393 buf = altbuf;
394 } else {
395 buf = rtas_err_buf;
396 if (slab_is_available())
397 buf = kmalloc(RTAS_ERROR_LOG_MAX, GFP_ATOMIC);
398 }
399 if (buf)
400 memcpy(buf, rtas_err_buf, RTAS_ERROR_LOG_MAX);
401 }
402
403 return buf;
404 }
405
406 #define get_errorlog_buffer() kmalloc(RTAS_ERROR_LOG_MAX, GFP_KERNEL)
407
408 #else /* CONFIG_RTAS_ERROR_LOGGING */
409 #define __fetch_rtas_last_error(x) NULL
410 #define get_errorlog_buffer() NULL
411 #endif
412
413
414 static void
415 va_rtas_call_unlocked(struct rtas_args *args, int token, int nargs, int nret,
416 va_list list)
417 {
418 int i;
419
420 args->token = cpu_to_be32(token);
421 args->nargs = cpu_to_be32(nargs);
422 args->nret = cpu_to_be32(nret);
423 args->rets = &(args->args[nargs]);
424
425 for (i = 0; i < nargs; ++i)
426 args->args[i] = cpu_to_be32(va_arg(list, __u32));
427
428 for (i = 0; i < nret; ++i)
429 args->rets[i] = 0;
430
431 enter_rtas(__pa(args));
432 }
433
434 void rtas_call_unlocked(struct rtas_args *args, int token, int nargs, int nret, ...)
435 {
436 va_list list;
437
438 va_start(list, nret);
439 va_rtas_call_unlocked(args, token, nargs, nret, list);
440 va_end(list);
441 }
442
443 int rtas_call(int token, int nargs, int nret, int *outputs, ...)
444 {
445 va_list list;
446 int i;
447 unsigned long s;
448 struct rtas_args *rtas_args;
449 char *buff_copy = NULL;
450 int ret;
451
452 if (!rtas.entry || token == RTAS_UNKNOWN_SERVICE)
453 return -1;
454
455 s = lock_rtas();
456
457 /* We use the global rtas args buffer */
458 rtas_args = &rtas.args;
459
460 va_start(list, outputs);
461 va_rtas_call_unlocked(rtas_args, token, nargs, nret, list);
462 va_end(list);
463
464 /* A -1 return code indicates that the last command couldn't
465 be completed due to a hardware error. */
466 if (be32_to_cpu(rtas_args->rets[0]) == -1)
467 buff_copy = __fetch_rtas_last_error(NULL);
468
469 if (nret > 1 && outputs != NULL)
470 for (i = 0; i < nret-1; ++i)
471 outputs[i] = be32_to_cpu(rtas_args->rets[i+1]);
472 ret = (nret > 0)? be32_to_cpu(rtas_args->rets[0]): 0;
473
474 unlock_rtas(s);
475
476 if (buff_copy) {
477 log_error(buff_copy, ERR_TYPE_RTAS_LOG, 0);
478 if (slab_is_available())
479 kfree(buff_copy);
480 }
481 return ret;
482 }
483 EXPORT_SYMBOL(rtas_call);
484
485 /* For RTAS_BUSY (-2), delay for 1 millisecond. For an extended busy status
486 * code of 990n, perform the hinted delay of 10^n (last digit) milliseconds.
487 */
488 unsigned int rtas_busy_delay_time(int status)
489 {
490 int order;
491 unsigned int ms = 0;
492
493 if (status == RTAS_BUSY) {
494 ms = 1;
495 } else if (status >= RTAS_EXTENDED_DELAY_MIN &&
496 status <= RTAS_EXTENDED_DELAY_MAX) {
497 order = status - RTAS_EXTENDED_DELAY_MIN;
498 for (ms = 1; order > 0; order--)
499 ms *= 10;
500 }
501
502 return ms;
503 }
504 EXPORT_SYMBOL(rtas_busy_delay_time);
505
506 /* For an RTAS busy status code, perform the hinted delay. */
507 unsigned int rtas_busy_delay(int status)
508 {
509 unsigned int ms;
510
511 might_sleep();
512 ms = rtas_busy_delay_time(status);
513 if (ms && need_resched())
514 msleep(ms);
515
516 return ms;
517 }
518 EXPORT_SYMBOL(rtas_busy_delay);
519
520 static int rtas_error_rc(int rtas_rc)
521 {
522 int rc;
523
524 switch (rtas_rc) {
525 case -1: /* Hardware Error */
526 rc = -EIO;
527 break;
528 case -3: /* Bad indicator/domain/etc */
529 rc = -EINVAL;
530 break;
531 case -9000: /* Isolation error */
532 rc = -EFAULT;
533 break;
534 case -9001: /* Outstanding TCE/PTE */
535 rc = -EEXIST;
536 break;
537 case -9002: /* No usable slot */
538 rc = -ENODEV;
539 break;
540 default:
541 printk(KERN_ERR "%s: unexpected RTAS error %d\n",
542 __func__, rtas_rc);
543 rc = -ERANGE;
544 break;
545 }
546 return rc;
547 }
548
549 int rtas_get_power_level(int powerdomain, int *level)
550 {
551 int token = rtas_token("get-power-level");
552 int rc;
553
554 if (token == RTAS_UNKNOWN_SERVICE)
555 return -ENOENT;
556
557 while ((rc = rtas_call(token, 1, 2, level, powerdomain)) == RTAS_BUSY)
558 udelay(1);
559
560 if (rc < 0)
561 return rtas_error_rc(rc);
562 return rc;
563 }
564 EXPORT_SYMBOL(rtas_get_power_level);
565
566 int rtas_set_power_level(int powerdomain, int level, int *setlevel)
567 {
568 int token = rtas_token("set-power-level");
569 int rc;
570
571 if (token == RTAS_UNKNOWN_SERVICE)
572 return -ENOENT;
573
574 do {
575 rc = rtas_call(token, 2, 2, setlevel, powerdomain, level);
576 } while (rtas_busy_delay(rc));
577
578 if (rc < 0)
579 return rtas_error_rc(rc);
580 return rc;
581 }
582 EXPORT_SYMBOL(rtas_set_power_level);
583
584 int rtas_get_sensor(int sensor, int index, int *state)
585 {
586 int token = rtas_token("get-sensor-state");
587 int rc;
588
589 if (token == RTAS_UNKNOWN_SERVICE)
590 return -ENOENT;
591
592 do {
593 rc = rtas_call(token, 2, 2, state, sensor, index);
594 } while (rtas_busy_delay(rc));
595
596 if (rc < 0)
597 return rtas_error_rc(rc);
598 return rc;
599 }
600 EXPORT_SYMBOL(rtas_get_sensor);
601
602 int rtas_get_sensor_fast(int sensor, int index, int *state)
603 {
604 int token = rtas_token("get-sensor-state");
605 int rc;
606
607 if (token == RTAS_UNKNOWN_SERVICE)
608 return -ENOENT;
609
610 rc = rtas_call(token, 2, 2, state, sensor, index);
611 WARN_ON(rc == RTAS_BUSY || (rc >= RTAS_EXTENDED_DELAY_MIN &&
612 rc <= RTAS_EXTENDED_DELAY_MAX));
613
614 if (rc < 0)
615 return rtas_error_rc(rc);
616 return rc;
617 }
618
619 bool rtas_indicator_present(int token, int *maxindex)
620 {
621 int proplen, count, i;
622 const struct indicator_elem {
623 __be32 token;
624 __be32 maxindex;
625 } *indicators;
626
627 indicators = of_get_property(rtas.dev, "rtas-indicators", &proplen);
628 if (!indicators)
629 return false;
630
631 count = proplen / sizeof(struct indicator_elem);
632
633 for (i = 0; i < count; i++) {
634 if (__be32_to_cpu(indicators[i].token) != token)
635 continue;
636 if (maxindex)
637 *maxindex = __be32_to_cpu(indicators[i].maxindex);
638 return true;
639 }
640
641 return false;
642 }
643 EXPORT_SYMBOL(rtas_indicator_present);
644
645 int rtas_set_indicator(int indicator, int index, int new_value)
646 {
647 int token = rtas_token("set-indicator");
648 int rc;
649
650 if (token == RTAS_UNKNOWN_SERVICE)
651 return -ENOENT;
652
653 do {
654 rc = rtas_call(token, 3, 1, NULL, indicator, index, new_value);
655 } while (rtas_busy_delay(rc));
656
657 if (rc < 0)
658 return rtas_error_rc(rc);
659 return rc;
660 }
661 EXPORT_SYMBOL(rtas_set_indicator);
662
663 /*
664 * Ignoring RTAS extended delay
665 */
666 int rtas_set_indicator_fast(int indicator, int index, int new_value)
667 {
668 int rc;
669 int token = rtas_token("set-indicator");
670
671 if (token == RTAS_UNKNOWN_SERVICE)
672 return -ENOENT;
673
674 rc = rtas_call(token, 3, 1, NULL, indicator, index, new_value);
675
676 WARN_ON(rc == RTAS_BUSY || (rc >= RTAS_EXTENDED_DELAY_MIN &&
677 rc <= RTAS_EXTENDED_DELAY_MAX));
678
679 if (rc < 0)
680 return rtas_error_rc(rc);
681
682 return rc;
683 }
684
685 void __noreturn rtas_restart(char *cmd)
686 {
687 if (rtas_flash_term_hook)
688 rtas_flash_term_hook(SYS_RESTART);
689 printk("RTAS system-reboot returned %d\n",
690 rtas_call(rtas_token("system-reboot"), 0, 1, NULL));
691 for (;;);
692 }
693
694 void rtas_power_off(void)
695 {
696 if (rtas_flash_term_hook)
697 rtas_flash_term_hook(SYS_POWER_OFF);
698 /* allow power on only with power button press */
699 printk("RTAS power-off returned %d\n",
700 rtas_call(rtas_token("power-off"), 2, 1, NULL, -1, -1));
701 for (;;);
702 }
703
704 void __noreturn rtas_halt(void)
705 {
706 if (rtas_flash_term_hook)
707 rtas_flash_term_hook(SYS_HALT);
708 /* allow power on only with power button press */
709 printk("RTAS power-off returned %d\n",
710 rtas_call(rtas_token("power-off"), 2, 1, NULL, -1, -1));
711 for (;;);
712 }
713
714 /* Must be in the RMO region, so we place it here */
715 static char rtas_os_term_buf[2048];
716
717 void rtas_os_term(char *str)
718 {
719 int status;
720
721 /*
722 * Firmware with the ibm,extended-os-term property is guaranteed
723 * to always return from an ibm,os-term call. Earlier versions without
724 * this property may terminate the partition which we want to avoid
725 * since it interferes with panic_timeout.
726 */
727 if (RTAS_UNKNOWN_SERVICE == rtas_token("ibm,os-term") ||
728 RTAS_UNKNOWN_SERVICE == rtas_token("ibm,extended-os-term"))
729 return;
730
731 snprintf(rtas_os_term_buf, 2048, "OS panic: %s", str);
732
733 do {
734 status = rtas_call(rtas_token("ibm,os-term"), 1, 1, NULL,
735 __pa(rtas_os_term_buf));
736 } while (rtas_busy_delay(status));
737
738 if (status != 0)
739 printk(KERN_EMERG "ibm,os-term call failed %d\n", status);
740 }
741
742 static int ibm_suspend_me_token = RTAS_UNKNOWN_SERVICE;
743 #ifdef CONFIG_PPC_PSERIES
744 static int __rtas_suspend_last_cpu(struct rtas_suspend_me_data *data, int wake_when_done)
745 {
746 u16 slb_size = mmu_slb_size;
747 int rc = H_MULTI_THREADS_ACTIVE;
748 int cpu;
749
750 slb_set_size(SLB_MIN_SIZE);
751 printk(KERN_DEBUG "calling ibm,suspend-me on cpu %i\n", smp_processor_id());
752
753 while (rc == H_MULTI_THREADS_ACTIVE && !atomic_read(&data->done) &&
754 !atomic_read(&data->error))
755 rc = rtas_call(data->token, 0, 1, NULL);
756
757 if (rc || atomic_read(&data->error)) {
758 printk(KERN_DEBUG "ibm,suspend-me returned %d\n", rc);
759 slb_set_size(slb_size);
760 }
761
762 if (atomic_read(&data->error))
763 rc = atomic_read(&data->error);
764
765 atomic_set(&data->error, rc);
766 pSeries_coalesce_init();
767
768 if (wake_when_done) {
769 atomic_set(&data->done, 1);
770
771 for_each_online_cpu(cpu)
772 plpar_hcall_norets(H_PROD, get_hard_smp_processor_id(cpu));
773 }
774
775 if (atomic_dec_return(&data->working) == 0)
776 complete(data->complete);
777
778 return rc;
779 }
780
781 int rtas_suspend_last_cpu(struct rtas_suspend_me_data *data)
782 {
783 atomic_inc(&data->working);
784 return __rtas_suspend_last_cpu(data, 0);
785 }
786
787 static int __rtas_suspend_cpu(struct rtas_suspend_me_data *data, int wake_when_done)
788 {
789 long rc = H_SUCCESS;
790 unsigned long msr_save;
791 int cpu;
792
793 atomic_inc(&data->working);
794
795 /* really need to ensure MSR.EE is off for H_JOIN */
796 msr_save = mfmsr();
797 mtmsr(msr_save & ~(MSR_EE));
798
799 while (rc == H_SUCCESS && !atomic_read(&data->done) && !atomic_read(&data->error))
800 rc = plpar_hcall_norets(H_JOIN);
801
802 mtmsr(msr_save);
803
804 if (rc == H_SUCCESS) {
805 /* This cpu was prodded and the suspend is complete. */
806 goto out;
807 } else if (rc == H_CONTINUE) {
808 /* All other cpus are in H_JOIN, this cpu does
809 * the suspend.
810 */
811 return __rtas_suspend_last_cpu(data, wake_when_done);
812 } else {
813 printk(KERN_ERR "H_JOIN on cpu %i failed with rc = %ld\n",
814 smp_processor_id(), rc);
815 atomic_set(&data->error, rc);
816 }
817
818 if (wake_when_done) {
819 atomic_set(&data->done, 1);
820
821 /* This cpu did the suspend or got an error; in either case,
822 * we need to prod all other other cpus out of join state.
823 * Extra prods are harmless.
824 */
825 for_each_online_cpu(cpu)
826 plpar_hcall_norets(H_PROD, get_hard_smp_processor_id(cpu));
827 }
828 out:
829 if (atomic_dec_return(&data->working) == 0)
830 complete(data->complete);
831 return rc;
832 }
833
834 int rtas_suspend_cpu(struct rtas_suspend_me_data *data)
835 {
836 return __rtas_suspend_cpu(data, 0);
837 }
838
839 static void rtas_percpu_suspend_me(void *info)
840 {
841 __rtas_suspend_cpu((struct rtas_suspend_me_data *)info, 1);
842 }
843
844 enum rtas_cpu_state {
845 DOWN,
846 UP,
847 };
848
849 #ifndef CONFIG_SMP
850 static int rtas_cpu_state_change_mask(enum rtas_cpu_state state,
851 cpumask_var_t cpus)
852 {
853 if (!cpumask_empty(cpus)) {
854 cpumask_clear(cpus);
855 return -EINVAL;
856 } else
857 return 0;
858 }
859 #else
860 /* On return cpumask will be altered to indicate CPUs changed.
861 * CPUs with states changed will be set in the mask,
862 * CPUs with status unchanged will be unset in the mask. */
863 static int rtas_cpu_state_change_mask(enum rtas_cpu_state state,
864 cpumask_var_t cpus)
865 {
866 int cpu;
867 int cpuret = 0;
868 int ret = 0;
869
870 if (cpumask_empty(cpus))
871 return 0;
872
873 for_each_cpu(cpu, cpus) {
874 switch (state) {
875 case DOWN:
876 cpuret = cpu_down(cpu);
877 break;
878 case UP:
879 cpuret = cpu_up(cpu);
880 break;
881 }
882 if (cpuret) {
883 pr_debug("%s: cpu_%s for cpu#%d returned %d.\n",
884 __func__,
885 ((state == UP) ? "up" : "down"),
886 cpu, cpuret);
887 if (!ret)
888 ret = cpuret;
889 if (state == UP) {
890 /* clear bits for unchanged cpus, return */
891 cpumask_shift_right(cpus, cpus, cpu);
892 cpumask_shift_left(cpus, cpus, cpu);
893 break;
894 } else {
895 /* clear bit for unchanged cpu, continue */
896 cpumask_clear_cpu(cpu, cpus);
897 }
898 }
899 }
900
901 return ret;
902 }
903 #endif
904
905 int rtas_online_cpus_mask(cpumask_var_t cpus)
906 {
907 int ret;
908
909 ret = rtas_cpu_state_change_mask(UP, cpus);
910
911 if (ret) {
912 cpumask_var_t tmp_mask;
913
914 if (!alloc_cpumask_var(&tmp_mask, GFP_KERNEL))
915 return ret;
916
917 /* Use tmp_mask to preserve cpus mask from first failure */
918 cpumask_copy(tmp_mask, cpus);
919 rtas_offline_cpus_mask(tmp_mask);
920 free_cpumask_var(tmp_mask);
921 }
922
923 return ret;
924 }
925 EXPORT_SYMBOL(rtas_online_cpus_mask);
926
927 int rtas_offline_cpus_mask(cpumask_var_t cpus)
928 {
929 return rtas_cpu_state_change_mask(DOWN, cpus);
930 }
931 EXPORT_SYMBOL(rtas_offline_cpus_mask);
932
933 int rtas_ibm_suspend_me(u64 handle)
934 {
935 long state;
936 long rc;
937 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
938 struct rtas_suspend_me_data data;
939 DECLARE_COMPLETION_ONSTACK(done);
940 cpumask_var_t offline_mask;
941 int cpuret;
942
943 if (!rtas_service_present("ibm,suspend-me"))
944 return -ENOSYS;
945
946 /* Make sure the state is valid */
947 rc = plpar_hcall(H_VASI_STATE, retbuf, handle);
948
949 state = retbuf[0];
950
951 if (rc) {
952 printk(KERN_ERR "rtas_ibm_suspend_me: vasi_state returned %ld\n",rc);
953 return rc;
954 } else if (state == H_VASI_ENABLED) {
955 return -EAGAIN;
956 } else if (state != H_VASI_SUSPENDING) {
957 printk(KERN_ERR "rtas_ibm_suspend_me: vasi_state returned state %ld\n",
958 state);
959 return -EIO;
960 }
961
962 if (!alloc_cpumask_var(&offline_mask, GFP_KERNEL))
963 return -ENOMEM;
964
965 atomic_set(&data.working, 0);
966 atomic_set(&data.done, 0);
967 atomic_set(&data.error, 0);
968 data.token = rtas_token("ibm,suspend-me");
969 data.complete = &done;
970
971 /* All present CPUs must be online */
972 cpumask_andnot(offline_mask, cpu_present_mask, cpu_online_mask);
973 cpuret = rtas_online_cpus_mask(offline_mask);
974 if (cpuret) {
975 pr_err("%s: Could not bring present CPUs online.\n", __func__);
976 atomic_set(&data.error, cpuret);
977 goto out;
978 }
979
980 cpu_hotplug_disable();
981
982 /* Check if we raced with a CPU-Offline Operation */
983 if (unlikely(!cpumask_equal(cpu_present_mask, cpu_online_mask))) {
984 pr_err("%s: Raced against a concurrent CPU-Offline\n",
985 __func__);
986 atomic_set(&data.error, -EBUSY);
987 goto out_hotplug_enable;
988 }
989
990 /* Call function on all CPUs. One of us will make the
991 * rtas call
992 */
993 on_each_cpu(rtas_percpu_suspend_me, &data, 0);
994
995 wait_for_completion(&done);
996
997 if (atomic_read(&data.error) != 0)
998 printk(KERN_ERR "Error doing global join\n");
999
1000 out_hotplug_enable:
1001 cpu_hotplug_enable();
1002
1003 /* Take down CPUs not online prior to suspend */
1004 cpuret = rtas_offline_cpus_mask(offline_mask);
1005 if (cpuret)
1006 pr_warn("%s: Could not restore CPUs to offline state.\n",
1007 __func__);
1008
1009 out:
1010 free_cpumask_var(offline_mask);
1011 return atomic_read(&data.error);
1012 }
1013 #else /* CONFIG_PPC_PSERIES */
1014 int rtas_ibm_suspend_me(u64 handle)
1015 {
1016 return -ENOSYS;
1017 }
1018 #endif
1019
1020 /**
1021 * Find a specific pseries error log in an RTAS extended event log.
1022 * @log: RTAS error/event log
1023 * @section_id: two character section identifier
1024 *
1025 * Returns a pointer to the specified errorlog or NULL if not found.
1026 */
1027 struct pseries_errorlog *get_pseries_errorlog(struct rtas_error_log *log,
1028 uint16_t section_id)
1029 {
1030 struct rtas_ext_event_log_v6 *ext_log =
1031 (struct rtas_ext_event_log_v6 *)log->buffer;
1032 struct pseries_errorlog *sect;
1033 unsigned char *p, *log_end;
1034 uint32_t ext_log_length = rtas_error_extended_log_length(log);
1035 uint8_t log_format = rtas_ext_event_log_format(ext_log);
1036 uint32_t company_id = rtas_ext_event_company_id(ext_log);
1037
1038 /* Check that we understand the format */
1039 if (ext_log_length < sizeof(struct rtas_ext_event_log_v6) ||
1040 log_format != RTAS_V6EXT_LOG_FORMAT_EVENT_LOG ||
1041 company_id != RTAS_V6EXT_COMPANY_ID_IBM)
1042 return NULL;
1043
1044 log_end = log->buffer + ext_log_length;
1045 p = ext_log->vendor_log;
1046
1047 while (p < log_end) {
1048 sect = (struct pseries_errorlog *)p;
1049 if (pseries_errorlog_id(sect) == section_id)
1050 return sect;
1051 p += pseries_errorlog_length(sect);
1052 }
1053
1054 return NULL;
1055 }
1056
1057 /* We assume to be passed big endian arguments */
1058 SYSCALL_DEFINE1(rtas, struct rtas_args __user *, uargs)
1059 {
1060 struct rtas_args args;
1061 unsigned long flags;
1062 char *buff_copy, *errbuf = NULL;
1063 int nargs, nret, token;
1064
1065 if (!capable(CAP_SYS_ADMIN))
1066 return -EPERM;
1067
1068 if (!rtas.entry)
1069 return -EINVAL;
1070
1071 if (copy_from_user(&args, uargs, 3 * sizeof(u32)) != 0)
1072 return -EFAULT;
1073
1074 nargs = be32_to_cpu(args.nargs);
1075 nret = be32_to_cpu(args.nret);
1076 token = be32_to_cpu(args.token);
1077
1078 if (nargs >= ARRAY_SIZE(args.args)
1079 || nret > ARRAY_SIZE(args.args)
1080 || nargs + nret > ARRAY_SIZE(args.args))
1081 return -EINVAL;
1082
1083 /* Copy in args. */
1084 if (copy_from_user(args.args, uargs->args,
1085 nargs * sizeof(rtas_arg_t)) != 0)
1086 return -EFAULT;
1087
1088 if (token == RTAS_UNKNOWN_SERVICE)
1089 return -EINVAL;
1090
1091 args.rets = &args.args[nargs];
1092 memset(args.rets, 0, nret * sizeof(rtas_arg_t));
1093
1094 /* Need to handle ibm,suspend_me call specially */
1095 if (token == ibm_suspend_me_token) {
1096
1097 /*
1098 * rtas_ibm_suspend_me assumes the streamid handle is in cpu
1099 * endian, or at least the hcall within it requires it.
1100 */
1101 int rc = 0;
1102 u64 handle = ((u64)be32_to_cpu(args.args[0]) << 32)
1103 | be32_to_cpu(args.args[1]);
1104 rc = rtas_ibm_suspend_me(handle);
1105 if (rc == -EAGAIN)
1106 args.rets[0] = cpu_to_be32(RTAS_NOT_SUSPENDABLE);
1107 else if (rc == -EIO)
1108 args.rets[0] = cpu_to_be32(-1);
1109 else if (rc)
1110 return rc;
1111 goto copy_return;
1112 }
1113
1114 buff_copy = get_errorlog_buffer();
1115
1116 flags = lock_rtas();
1117
1118 rtas.args = args;
1119 enter_rtas(__pa(&rtas.args));
1120 args = rtas.args;
1121
1122 /* A -1 return code indicates that the last command couldn't
1123 be completed due to a hardware error. */
1124 if (be32_to_cpu(args.rets[0]) == -1)
1125 errbuf = __fetch_rtas_last_error(buff_copy);
1126
1127 unlock_rtas(flags);
1128
1129 if (buff_copy) {
1130 if (errbuf)
1131 log_error(errbuf, ERR_TYPE_RTAS_LOG, 0);
1132 kfree(buff_copy);
1133 }
1134
1135 copy_return:
1136 /* Copy out args. */
1137 if (copy_to_user(uargs->args + nargs,
1138 args.args + nargs,
1139 nret * sizeof(rtas_arg_t)) != 0)
1140 return -EFAULT;
1141
1142 return 0;
1143 }
1144
1145 /*
1146 * Call early during boot, before mem init, to retrieve the RTAS
1147 * information from the device-tree and allocate the RMO buffer for userland
1148 * accesses.
1149 */
1150 void __init rtas_initialize(void)
1151 {
1152 unsigned long rtas_region = RTAS_INSTANTIATE_MAX;
1153 u32 base, size, entry;
1154 int no_base, no_size, no_entry;
1155
1156 /* Get RTAS dev node and fill up our "rtas" structure with infos
1157 * about it.
1158 */
1159 rtas.dev = of_find_node_by_name(NULL, "rtas");
1160 if (!rtas.dev)
1161 return;
1162
1163 no_base = of_property_read_u32(rtas.dev, "linux,rtas-base", &base);
1164 no_size = of_property_read_u32(rtas.dev, "rtas-size", &size);
1165 if (no_base || no_size) {
1166 of_node_put(rtas.dev);
1167 rtas.dev = NULL;
1168 return;
1169 }
1170
1171 rtas.base = base;
1172 rtas.size = size;
1173 no_entry = of_property_read_u32(rtas.dev, "linux,rtas-entry", &entry);
1174 rtas.entry = no_entry ? rtas.base : entry;
1175
1176 /* If RTAS was found, allocate the RMO buffer for it and look for
1177 * the stop-self token if any
1178 */
1179 #ifdef CONFIG_PPC64
1180 if (firmware_has_feature(FW_FEATURE_LPAR)) {
1181 rtas_region = min(ppc64_rma_size, RTAS_INSTANTIATE_MAX);
1182 ibm_suspend_me_token = rtas_token("ibm,suspend-me");
1183 }
1184 #endif
1185 rtas_rmo_buf = memblock_phys_alloc_range(RTAS_RMOBUF_MAX, PAGE_SIZE,
1186 0, rtas_region);
1187 if (!rtas_rmo_buf)
1188 panic("ERROR: RTAS: Failed to allocate %lx bytes below %pa\n",
1189 PAGE_SIZE, &rtas_region);
1190
1191 #ifdef CONFIG_RTAS_ERROR_LOGGING
1192 rtas_last_error_token = rtas_token("rtas-last-error");
1193 #endif
1194 }
1195
1196 int __init early_init_dt_scan_rtas(unsigned long node,
1197 const char *uname, int depth, void *data)
1198 {
1199 const u32 *basep, *entryp, *sizep;
1200
1201 if (depth != 1 || strcmp(uname, "rtas") != 0)
1202 return 0;
1203
1204 basep = of_get_flat_dt_prop(node, "linux,rtas-base", NULL);
1205 entryp = of_get_flat_dt_prop(node, "linux,rtas-entry", NULL);
1206 sizep = of_get_flat_dt_prop(node, "rtas-size", NULL);
1207
1208 if (basep && entryp && sizep) {
1209 rtas.base = *basep;
1210 rtas.entry = *entryp;
1211 rtas.size = *sizep;
1212 }
1213
1214 #ifdef CONFIG_UDBG_RTAS_CONSOLE
1215 basep = of_get_flat_dt_prop(node, "put-term-char", NULL);
1216 if (basep)
1217 rtas_putchar_token = *basep;
1218
1219 basep = of_get_flat_dt_prop(node, "get-term-char", NULL);
1220 if (basep)
1221 rtas_getchar_token = *basep;
1222
1223 if (rtas_putchar_token != RTAS_UNKNOWN_SERVICE &&
1224 rtas_getchar_token != RTAS_UNKNOWN_SERVICE)
1225 udbg_init_rtas_console();
1226
1227 #endif
1228
1229 /* break now */
1230 return 1;
1231 }
1232
1233 static arch_spinlock_t timebase_lock;
1234 static u64 timebase = 0;
1235
1236 void rtas_give_timebase(void)
1237 {
1238 unsigned long flags;
1239
1240 local_irq_save(flags);
1241 hard_irq_disable();
1242 arch_spin_lock(&timebase_lock);
1243 rtas_call(rtas_token("freeze-time-base"), 0, 1, NULL);
1244 timebase = get_tb();
1245 arch_spin_unlock(&timebase_lock);
1246
1247 while (timebase)
1248 barrier();
1249 rtas_call(rtas_token("thaw-time-base"), 0, 1, NULL);
1250 local_irq_restore(flags);
1251 }
1252
1253 void rtas_take_timebase(void)
1254 {
1255 while (!timebase)
1256 barrier();
1257 arch_spin_lock(&timebase_lock);
1258 set_tb(timebase >> 32, timebase & 0xffffffff);
1259 timebase = 0;
1260 arch_spin_unlock(&timebase_lock);
1261 }