]> git.ipfire.org Git - thirdparty/linux.git/blob - arch/powerpc/kernel/prom_init.c
Merge remote-tracking branches 'asoc/topic/samsung', 'asoc/topic/sgtl5000', 'asoc...
[thirdparty/linux.git] / arch / powerpc / kernel / prom_init.c
1 /*
2 * Procedures for interfacing to Open Firmware.
3 *
4 * Paul Mackerras August 1996.
5 * Copyright (C) 1996-2005 Paul Mackerras.
6 *
7 * Adapted for 64bit PowerPC by Dave Engebretsen and Peter Bergner.
8 * {engebret|bergner}@us.ibm.com
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
14 */
15
16 #undef DEBUG_PROM
17
18 #include <stdarg.h>
19 #include <linux/kernel.h>
20 #include <linux/string.h>
21 #include <linux/init.h>
22 #include <linux/threads.h>
23 #include <linux/spinlock.h>
24 #include <linux/types.h>
25 #include <linux/pci.h>
26 #include <linux/proc_fs.h>
27 #include <linux/stringify.h>
28 #include <linux/delay.h>
29 #include <linux/initrd.h>
30 #include <linux/bitops.h>
31 #include <asm/prom.h>
32 #include <asm/rtas.h>
33 #include <asm/page.h>
34 #include <asm/processor.h>
35 #include <asm/irq.h>
36 #include <asm/io.h>
37 #include <asm/smp.h>
38 #include <asm/mmu.h>
39 #include <asm/pgtable.h>
40 #include <asm/iommu.h>
41 #include <asm/btext.h>
42 #include <asm/sections.h>
43 #include <asm/machdep.h>
44 #include <asm/opal.h>
45 #include <asm/asm-prototypes.h>
46
47 #include <linux/linux_logo.h>
48
49 /*
50 * Eventually bump that one up
51 */
52 #define DEVTREE_CHUNK_SIZE 0x100000
53
54 /*
55 * This is the size of the local memory reserve map that gets copied
56 * into the boot params passed to the kernel. That size is totally
57 * flexible as the kernel just reads the list until it encounters an
58 * entry with size 0, so it can be changed without breaking binary
59 * compatibility
60 */
61 #define MEM_RESERVE_MAP_SIZE 8
62
63 /*
64 * prom_init() is called very early on, before the kernel text
65 * and data have been mapped to KERNELBASE. At this point the code
66 * is running at whatever address it has been loaded at.
67 * On ppc32 we compile with -mrelocatable, which means that references
68 * to extern and static variables get relocated automatically.
69 * ppc64 objects are always relocatable, we just need to relocate the
70 * TOC.
71 *
72 * Because OF may have mapped I/O devices into the area starting at
73 * KERNELBASE, particularly on CHRP machines, we can't safely call
74 * OF once the kernel has been mapped to KERNELBASE. Therefore all
75 * OF calls must be done within prom_init().
76 *
77 * ADDR is used in calls to call_prom. The 4th and following
78 * arguments to call_prom should be 32-bit values.
79 * On ppc64, 64 bit values are truncated to 32 bits (and
80 * fortunately don't get interpreted as two arguments).
81 */
82 #define ADDR(x) (u32)(unsigned long)(x)
83
84 #ifdef CONFIG_PPC64
85 #define OF_WORKAROUNDS 0
86 #else
87 #define OF_WORKAROUNDS of_workarounds
88 int of_workarounds;
89 #endif
90
91 #define OF_WA_CLAIM 1 /* do phys/virt claim separately, then map */
92 #define OF_WA_LONGTRAIL 2 /* work around longtrail bugs */
93
94 #define PROM_BUG() do { \
95 prom_printf("kernel BUG at %s line 0x%x!\n", \
96 __FILE__, __LINE__); \
97 __asm__ __volatile__(".long " BUG_ILLEGAL_INSTR); \
98 } while (0)
99
100 #ifdef DEBUG_PROM
101 #define prom_debug(x...) prom_printf(x)
102 #else
103 #define prom_debug(x...)
104 #endif
105
106
107 typedef u32 prom_arg_t;
108
109 struct prom_args {
110 __be32 service;
111 __be32 nargs;
112 __be32 nret;
113 __be32 args[10];
114 };
115
116 struct prom_t {
117 ihandle root;
118 phandle chosen;
119 int cpu;
120 ihandle stdout;
121 ihandle mmumap;
122 ihandle memory;
123 };
124
125 struct mem_map_entry {
126 __be64 base;
127 __be64 size;
128 };
129
130 typedef __be32 cell_t;
131
132 extern void __start(unsigned long r3, unsigned long r4, unsigned long r5,
133 unsigned long r6, unsigned long r7, unsigned long r8,
134 unsigned long r9);
135
136 #ifdef CONFIG_PPC64
137 extern int enter_prom(struct prom_args *args, unsigned long entry);
138 #else
139 static inline int enter_prom(struct prom_args *args, unsigned long entry)
140 {
141 return ((int (*)(struct prom_args *))entry)(args);
142 }
143 #endif
144
145 extern void copy_and_flush(unsigned long dest, unsigned long src,
146 unsigned long size, unsigned long offset);
147
148 /* prom structure */
149 static struct prom_t __initdata prom;
150
151 static unsigned long prom_entry __initdata;
152
153 #define PROM_SCRATCH_SIZE 256
154
155 static char __initdata of_stdout_device[256];
156 static char __initdata prom_scratch[PROM_SCRATCH_SIZE];
157
158 static unsigned long __initdata dt_header_start;
159 static unsigned long __initdata dt_struct_start, dt_struct_end;
160 static unsigned long __initdata dt_string_start, dt_string_end;
161
162 static unsigned long __initdata prom_initrd_start, prom_initrd_end;
163
164 #ifdef CONFIG_PPC64
165 static int __initdata prom_iommu_force_on;
166 static int __initdata prom_iommu_off;
167 static unsigned long __initdata prom_tce_alloc_start;
168 static unsigned long __initdata prom_tce_alloc_end;
169 #endif
170
171 static bool __initdata prom_radix_disable;
172
173 struct platform_support {
174 bool hash_mmu;
175 bool radix_mmu;
176 bool radix_gtse;
177 };
178
179 /* Platforms codes are now obsolete in the kernel. Now only used within this
180 * file and ultimately gone too. Feel free to change them if you need, they
181 * are not shared with anything outside of this file anymore
182 */
183 #define PLATFORM_PSERIES 0x0100
184 #define PLATFORM_PSERIES_LPAR 0x0101
185 #define PLATFORM_LPAR 0x0001
186 #define PLATFORM_POWERMAC 0x0400
187 #define PLATFORM_GENERIC 0x0500
188 #define PLATFORM_OPAL 0x0600
189
190 static int __initdata of_platform;
191
192 static char __initdata prom_cmd_line[COMMAND_LINE_SIZE];
193
194 static unsigned long __initdata prom_memory_limit;
195
196 static unsigned long __initdata alloc_top;
197 static unsigned long __initdata alloc_top_high;
198 static unsigned long __initdata alloc_bottom;
199 static unsigned long __initdata rmo_top;
200 static unsigned long __initdata ram_top;
201
202 static struct mem_map_entry __initdata mem_reserve_map[MEM_RESERVE_MAP_SIZE];
203 static int __initdata mem_reserve_cnt;
204
205 static cell_t __initdata regbuf[1024];
206
207 static bool rtas_has_query_cpu_stopped;
208
209
210 /*
211 * Error results ... some OF calls will return "-1" on error, some
212 * will return 0, some will return either. To simplify, here are
213 * macros to use with any ihandle or phandle return value to check if
214 * it is valid
215 */
216
217 #define PROM_ERROR (-1u)
218 #define PHANDLE_VALID(p) ((p) != 0 && (p) != PROM_ERROR)
219 #define IHANDLE_VALID(i) ((i) != 0 && (i) != PROM_ERROR)
220
221
222 /* This is the one and *ONLY* place where we actually call open
223 * firmware.
224 */
225
226 static int __init call_prom(const char *service, int nargs, int nret, ...)
227 {
228 int i;
229 struct prom_args args;
230 va_list list;
231
232 args.service = cpu_to_be32(ADDR(service));
233 args.nargs = cpu_to_be32(nargs);
234 args.nret = cpu_to_be32(nret);
235
236 va_start(list, nret);
237 for (i = 0; i < nargs; i++)
238 args.args[i] = cpu_to_be32(va_arg(list, prom_arg_t));
239 va_end(list);
240
241 for (i = 0; i < nret; i++)
242 args.args[nargs+i] = 0;
243
244 if (enter_prom(&args, prom_entry) < 0)
245 return PROM_ERROR;
246
247 return (nret > 0) ? be32_to_cpu(args.args[nargs]) : 0;
248 }
249
250 static int __init call_prom_ret(const char *service, int nargs, int nret,
251 prom_arg_t *rets, ...)
252 {
253 int i;
254 struct prom_args args;
255 va_list list;
256
257 args.service = cpu_to_be32(ADDR(service));
258 args.nargs = cpu_to_be32(nargs);
259 args.nret = cpu_to_be32(nret);
260
261 va_start(list, rets);
262 for (i = 0; i < nargs; i++)
263 args.args[i] = cpu_to_be32(va_arg(list, prom_arg_t));
264 va_end(list);
265
266 for (i = 0; i < nret; i++)
267 args.args[nargs+i] = 0;
268
269 if (enter_prom(&args, prom_entry) < 0)
270 return PROM_ERROR;
271
272 if (rets != NULL)
273 for (i = 1; i < nret; ++i)
274 rets[i-1] = be32_to_cpu(args.args[nargs+i]);
275
276 return (nret > 0) ? be32_to_cpu(args.args[nargs]) : 0;
277 }
278
279
280 static void __init prom_print(const char *msg)
281 {
282 const char *p, *q;
283
284 if (prom.stdout == 0)
285 return;
286
287 for (p = msg; *p != 0; p = q) {
288 for (q = p; *q != 0 && *q != '\n'; ++q)
289 ;
290 if (q > p)
291 call_prom("write", 3, 1, prom.stdout, p, q - p);
292 if (*q == 0)
293 break;
294 ++q;
295 call_prom("write", 3, 1, prom.stdout, ADDR("\r\n"), 2);
296 }
297 }
298
299
300 static void __init prom_print_hex(unsigned long val)
301 {
302 int i, nibbles = sizeof(val)*2;
303 char buf[sizeof(val)*2+1];
304
305 for (i = nibbles-1; i >= 0; i--) {
306 buf[i] = (val & 0xf) + '0';
307 if (buf[i] > '9')
308 buf[i] += ('a'-'0'-10);
309 val >>= 4;
310 }
311 buf[nibbles] = '\0';
312 call_prom("write", 3, 1, prom.stdout, buf, nibbles);
313 }
314
315 /* max number of decimal digits in an unsigned long */
316 #define UL_DIGITS 21
317 static void __init prom_print_dec(unsigned long val)
318 {
319 int i, size;
320 char buf[UL_DIGITS+1];
321
322 for (i = UL_DIGITS-1; i >= 0; i--) {
323 buf[i] = (val % 10) + '0';
324 val = val/10;
325 if (val == 0)
326 break;
327 }
328 /* shift stuff down */
329 size = UL_DIGITS - i;
330 call_prom("write", 3, 1, prom.stdout, buf+i, size);
331 }
332
333 static void __init prom_printf(const char *format, ...)
334 {
335 const char *p, *q, *s;
336 va_list args;
337 unsigned long v;
338 long vs;
339
340 va_start(args, format);
341 for (p = format; *p != 0; p = q) {
342 for (q = p; *q != 0 && *q != '\n' && *q != '%'; ++q)
343 ;
344 if (q > p)
345 call_prom("write", 3, 1, prom.stdout, p, q - p);
346 if (*q == 0)
347 break;
348 if (*q == '\n') {
349 ++q;
350 call_prom("write", 3, 1, prom.stdout,
351 ADDR("\r\n"), 2);
352 continue;
353 }
354 ++q;
355 if (*q == 0)
356 break;
357 switch (*q) {
358 case 's':
359 ++q;
360 s = va_arg(args, const char *);
361 prom_print(s);
362 break;
363 case 'x':
364 ++q;
365 v = va_arg(args, unsigned long);
366 prom_print_hex(v);
367 break;
368 case 'd':
369 ++q;
370 vs = va_arg(args, int);
371 if (vs < 0) {
372 prom_print("-");
373 vs = -vs;
374 }
375 prom_print_dec(vs);
376 break;
377 case 'l':
378 ++q;
379 if (*q == 0)
380 break;
381 else if (*q == 'x') {
382 ++q;
383 v = va_arg(args, unsigned long);
384 prom_print_hex(v);
385 } else if (*q == 'u') { /* '%lu' */
386 ++q;
387 v = va_arg(args, unsigned long);
388 prom_print_dec(v);
389 } else if (*q == 'd') { /* %ld */
390 ++q;
391 vs = va_arg(args, long);
392 if (vs < 0) {
393 prom_print("-");
394 vs = -vs;
395 }
396 prom_print_dec(vs);
397 }
398 break;
399 }
400 }
401 va_end(args);
402 }
403
404
405 static unsigned int __init prom_claim(unsigned long virt, unsigned long size,
406 unsigned long align)
407 {
408
409 if (align == 0 && (OF_WORKAROUNDS & OF_WA_CLAIM)) {
410 /*
411 * Old OF requires we claim physical and virtual separately
412 * and then map explicitly (assuming virtual mode)
413 */
414 int ret;
415 prom_arg_t result;
416
417 ret = call_prom_ret("call-method", 5, 2, &result,
418 ADDR("claim"), prom.memory,
419 align, size, virt);
420 if (ret != 0 || result == -1)
421 return -1;
422 ret = call_prom_ret("call-method", 5, 2, &result,
423 ADDR("claim"), prom.mmumap,
424 align, size, virt);
425 if (ret != 0) {
426 call_prom("call-method", 4, 1, ADDR("release"),
427 prom.memory, size, virt);
428 return -1;
429 }
430 /* the 0x12 is M (coherence) + PP == read/write */
431 call_prom("call-method", 6, 1,
432 ADDR("map"), prom.mmumap, 0x12, size, virt, virt);
433 return virt;
434 }
435 return call_prom("claim", 3, 1, (prom_arg_t)virt, (prom_arg_t)size,
436 (prom_arg_t)align);
437 }
438
439 static void __init __attribute__((noreturn)) prom_panic(const char *reason)
440 {
441 prom_print(reason);
442 /* Do not call exit because it clears the screen on pmac
443 * it also causes some sort of double-fault on early pmacs */
444 if (of_platform == PLATFORM_POWERMAC)
445 asm("trap\n");
446
447 /* ToDo: should put up an SRC here on pSeries */
448 call_prom("exit", 0, 0);
449
450 for (;;) /* should never get here */
451 ;
452 }
453
454
455 static int __init prom_next_node(phandle *nodep)
456 {
457 phandle node;
458
459 if ((node = *nodep) != 0
460 && (*nodep = call_prom("child", 1, 1, node)) != 0)
461 return 1;
462 if ((*nodep = call_prom("peer", 1, 1, node)) != 0)
463 return 1;
464 for (;;) {
465 if ((node = call_prom("parent", 1, 1, node)) == 0)
466 return 0;
467 if ((*nodep = call_prom("peer", 1, 1, node)) != 0)
468 return 1;
469 }
470 }
471
472 static inline int prom_getprop(phandle node, const char *pname,
473 void *value, size_t valuelen)
474 {
475 return call_prom("getprop", 4, 1, node, ADDR(pname),
476 (u32)(unsigned long) value, (u32) valuelen);
477 }
478
479 static inline int prom_getproplen(phandle node, const char *pname)
480 {
481 return call_prom("getproplen", 2, 1, node, ADDR(pname));
482 }
483
484 static void add_string(char **str, const char *q)
485 {
486 char *p = *str;
487
488 while (*q)
489 *p++ = *q++;
490 *p++ = ' ';
491 *str = p;
492 }
493
494 static char *tohex(unsigned int x)
495 {
496 static char digits[] = "0123456789abcdef";
497 static char result[9];
498 int i;
499
500 result[8] = 0;
501 i = 8;
502 do {
503 --i;
504 result[i] = digits[x & 0xf];
505 x >>= 4;
506 } while (x != 0 && i > 0);
507 return &result[i];
508 }
509
510 static int __init prom_setprop(phandle node, const char *nodename,
511 const char *pname, void *value, size_t valuelen)
512 {
513 char cmd[256], *p;
514
515 if (!(OF_WORKAROUNDS & OF_WA_LONGTRAIL))
516 return call_prom("setprop", 4, 1, node, ADDR(pname),
517 (u32)(unsigned long) value, (u32) valuelen);
518
519 /* gah... setprop doesn't work on longtrail, have to use interpret */
520 p = cmd;
521 add_string(&p, "dev");
522 add_string(&p, nodename);
523 add_string(&p, tohex((u32)(unsigned long) value));
524 add_string(&p, tohex(valuelen));
525 add_string(&p, tohex(ADDR(pname)));
526 add_string(&p, tohex(strlen(pname)));
527 add_string(&p, "property");
528 *p = 0;
529 return call_prom("interpret", 1, 1, (u32)(unsigned long) cmd);
530 }
531
532 /* We can't use the standard versions because of relocation headaches. */
533 #define isxdigit(c) (('0' <= (c) && (c) <= '9') \
534 || ('a' <= (c) && (c) <= 'f') \
535 || ('A' <= (c) && (c) <= 'F'))
536
537 #define isdigit(c) ('0' <= (c) && (c) <= '9')
538 #define islower(c) ('a' <= (c) && (c) <= 'z')
539 #define toupper(c) (islower(c) ? ((c) - 'a' + 'A') : (c))
540
541 static unsigned long prom_strtoul(const char *cp, const char **endp)
542 {
543 unsigned long result = 0, base = 10, value;
544
545 if (*cp == '0') {
546 base = 8;
547 cp++;
548 if (toupper(*cp) == 'X') {
549 cp++;
550 base = 16;
551 }
552 }
553
554 while (isxdigit(*cp) &&
555 (value = isdigit(*cp) ? *cp - '0' : toupper(*cp) - 'A' + 10) < base) {
556 result = result * base + value;
557 cp++;
558 }
559
560 if (endp)
561 *endp = cp;
562
563 return result;
564 }
565
566 static unsigned long prom_memparse(const char *ptr, const char **retptr)
567 {
568 unsigned long ret = prom_strtoul(ptr, retptr);
569 int shift = 0;
570
571 /*
572 * We can't use a switch here because GCC *may* generate a
573 * jump table which won't work, because we're not running at
574 * the address we're linked at.
575 */
576 if ('G' == **retptr || 'g' == **retptr)
577 shift = 30;
578
579 if ('M' == **retptr || 'm' == **retptr)
580 shift = 20;
581
582 if ('K' == **retptr || 'k' == **retptr)
583 shift = 10;
584
585 if (shift) {
586 ret <<= shift;
587 (*retptr)++;
588 }
589
590 return ret;
591 }
592
593 /*
594 * Early parsing of the command line passed to the kernel, used for
595 * "mem=x" and the options that affect the iommu
596 */
597 static void __init early_cmdline_parse(void)
598 {
599 const char *opt;
600
601 char *p;
602 int l = 0;
603
604 prom_cmd_line[0] = 0;
605 p = prom_cmd_line;
606 if ((long)prom.chosen > 0)
607 l = prom_getprop(prom.chosen, "bootargs", p, COMMAND_LINE_SIZE-1);
608 #ifdef CONFIG_CMDLINE
609 if (l <= 0 || p[0] == '\0') /* dbl check */
610 strlcpy(prom_cmd_line,
611 CONFIG_CMDLINE, sizeof(prom_cmd_line));
612 #endif /* CONFIG_CMDLINE */
613 prom_printf("command line: %s\n", prom_cmd_line);
614
615 #ifdef CONFIG_PPC64
616 opt = strstr(prom_cmd_line, "iommu=");
617 if (opt) {
618 prom_printf("iommu opt is: %s\n", opt);
619 opt += 6;
620 while (*opt && *opt == ' ')
621 opt++;
622 if (!strncmp(opt, "off", 3))
623 prom_iommu_off = 1;
624 else if (!strncmp(opt, "force", 5))
625 prom_iommu_force_on = 1;
626 }
627 #endif
628 opt = strstr(prom_cmd_line, "mem=");
629 if (opt) {
630 opt += 4;
631 prom_memory_limit = prom_memparse(opt, (const char **)&opt);
632 #ifdef CONFIG_PPC64
633 /* Align to 16 MB == size of ppc64 large page */
634 prom_memory_limit = ALIGN(prom_memory_limit, 0x1000000);
635 #endif
636 }
637
638 opt = strstr(prom_cmd_line, "disable_radix");
639 if (opt) {
640 prom_debug("Radix disabled from cmdline\n");
641 prom_radix_disable = true;
642 }
643 }
644
645 #if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV)
646 /*
647 * The architecture vector has an array of PVR mask/value pairs,
648 * followed by # option vectors - 1, followed by the option vectors.
649 *
650 * See prom.h for the definition of the bits specified in the
651 * architecture vector.
652 */
653
654 /* Firmware expects the value to be n - 1, where n is the # of vectors */
655 #define NUM_VECTORS(n) ((n) - 1)
656
657 /*
658 * Firmware expects 1 + n - 2, where n is the length of the option vector in
659 * bytes. The 1 accounts for the length byte itself, the - 2 .. ?
660 */
661 #define VECTOR_LENGTH(n) (1 + (n) - 2)
662
663 struct option_vector1 {
664 u8 byte1;
665 u8 arch_versions;
666 u8 arch_versions3;
667 } __packed;
668
669 struct option_vector2 {
670 u8 byte1;
671 __be16 reserved;
672 __be32 real_base;
673 __be32 real_size;
674 __be32 virt_base;
675 __be32 virt_size;
676 __be32 load_base;
677 __be32 min_rma;
678 __be32 min_load;
679 u8 min_rma_percent;
680 u8 max_pft_size;
681 } __packed;
682
683 struct option_vector3 {
684 u8 byte1;
685 u8 byte2;
686 } __packed;
687
688 struct option_vector4 {
689 u8 byte1;
690 u8 min_vp_cap;
691 } __packed;
692
693 struct option_vector5 {
694 u8 byte1;
695 u8 byte2;
696 u8 byte3;
697 u8 cmo;
698 u8 associativity;
699 u8 bin_opts;
700 u8 micro_checkpoint;
701 u8 reserved0;
702 __be32 max_cpus;
703 __be16 papr_level;
704 __be16 reserved1;
705 u8 platform_facilities;
706 u8 reserved2;
707 __be16 reserved3;
708 u8 subprocessors;
709 u8 byte22;
710 u8 intarch;
711 u8 mmu;
712 u8 hash_ext;
713 u8 radix_ext;
714 } __packed;
715
716 struct option_vector6 {
717 u8 reserved;
718 u8 secondary_pteg;
719 u8 os_name;
720 } __packed;
721
722 struct ibm_arch_vec {
723 struct { u32 mask, val; } pvrs[12];
724
725 u8 num_vectors;
726
727 u8 vec1_len;
728 struct option_vector1 vec1;
729
730 u8 vec2_len;
731 struct option_vector2 vec2;
732
733 u8 vec3_len;
734 struct option_vector3 vec3;
735
736 u8 vec4_len;
737 struct option_vector4 vec4;
738
739 u8 vec5_len;
740 struct option_vector5 vec5;
741
742 u8 vec6_len;
743 struct option_vector6 vec6;
744 } __packed;
745
746 struct ibm_arch_vec __cacheline_aligned ibm_architecture_vec = {
747 .pvrs = {
748 {
749 .mask = cpu_to_be32(0xfffe0000), /* POWER5/POWER5+ */
750 .val = cpu_to_be32(0x003a0000),
751 },
752 {
753 .mask = cpu_to_be32(0xffff0000), /* POWER6 */
754 .val = cpu_to_be32(0x003e0000),
755 },
756 {
757 .mask = cpu_to_be32(0xffff0000), /* POWER7 */
758 .val = cpu_to_be32(0x003f0000),
759 },
760 {
761 .mask = cpu_to_be32(0xffff0000), /* POWER8E */
762 .val = cpu_to_be32(0x004b0000),
763 },
764 {
765 .mask = cpu_to_be32(0xffff0000), /* POWER8NVL */
766 .val = cpu_to_be32(0x004c0000),
767 },
768 {
769 .mask = cpu_to_be32(0xffff0000), /* POWER8 */
770 .val = cpu_to_be32(0x004d0000),
771 },
772 {
773 .mask = cpu_to_be32(0xffff0000), /* POWER9 */
774 .val = cpu_to_be32(0x004e0000),
775 },
776 {
777 .mask = cpu_to_be32(0xffffffff), /* all 3.00-compliant */
778 .val = cpu_to_be32(0x0f000005),
779 },
780 {
781 .mask = cpu_to_be32(0xffffffff), /* all 2.07-compliant */
782 .val = cpu_to_be32(0x0f000004),
783 },
784 {
785 .mask = cpu_to_be32(0xffffffff), /* all 2.06-compliant */
786 .val = cpu_to_be32(0x0f000003),
787 },
788 {
789 .mask = cpu_to_be32(0xffffffff), /* all 2.05-compliant */
790 .val = cpu_to_be32(0x0f000002),
791 },
792 {
793 .mask = cpu_to_be32(0xfffffffe), /* all 2.04-compliant and earlier */
794 .val = cpu_to_be32(0x0f000001),
795 },
796 },
797
798 .num_vectors = NUM_VECTORS(6),
799
800 .vec1_len = VECTOR_LENGTH(sizeof(struct option_vector1)),
801 .vec1 = {
802 .byte1 = 0,
803 .arch_versions = OV1_PPC_2_00 | OV1_PPC_2_01 | OV1_PPC_2_02 | OV1_PPC_2_03 |
804 OV1_PPC_2_04 | OV1_PPC_2_05 | OV1_PPC_2_06 | OV1_PPC_2_07,
805 .arch_versions3 = OV1_PPC_3_00,
806 },
807
808 .vec2_len = VECTOR_LENGTH(sizeof(struct option_vector2)),
809 /* option vector 2: Open Firmware options supported */
810 .vec2 = {
811 .byte1 = OV2_REAL_MODE,
812 .reserved = 0,
813 .real_base = cpu_to_be32(0xffffffff),
814 .real_size = cpu_to_be32(0xffffffff),
815 .virt_base = cpu_to_be32(0xffffffff),
816 .virt_size = cpu_to_be32(0xffffffff),
817 .load_base = cpu_to_be32(0xffffffff),
818 .min_rma = cpu_to_be32(256), /* 256MB min RMA */
819 .min_load = cpu_to_be32(0xffffffff), /* full client load */
820 .min_rma_percent = 0, /* min RMA percentage of total RAM */
821 .max_pft_size = 48, /* max log_2(hash table size) */
822 },
823
824 .vec3_len = VECTOR_LENGTH(sizeof(struct option_vector3)),
825 /* option vector 3: processor options supported */
826 .vec3 = {
827 .byte1 = 0, /* don't ignore, don't halt */
828 .byte2 = OV3_FP | OV3_VMX | OV3_DFP,
829 },
830
831 .vec4_len = VECTOR_LENGTH(sizeof(struct option_vector4)),
832 /* option vector 4: IBM PAPR implementation */
833 .vec4 = {
834 .byte1 = 0, /* don't halt */
835 .min_vp_cap = OV4_MIN_ENT_CAP, /* minimum VP entitled capacity */
836 },
837
838 .vec5_len = VECTOR_LENGTH(sizeof(struct option_vector5)),
839 /* option vector 5: PAPR/OF options */
840 .vec5 = {
841 .byte1 = 0, /* don't ignore, don't halt */
842 .byte2 = OV5_FEAT(OV5_LPAR) | OV5_FEAT(OV5_SPLPAR) | OV5_FEAT(OV5_LARGE_PAGES) |
843 OV5_FEAT(OV5_DRCONF_MEMORY) | OV5_FEAT(OV5_DONATE_DEDICATE_CPU) |
844 #ifdef CONFIG_PCI_MSI
845 /* PCIe/MSI support. Without MSI full PCIe is not supported */
846 OV5_FEAT(OV5_MSI),
847 #else
848 0,
849 #endif
850 .byte3 = 0,
851 .cmo =
852 #ifdef CONFIG_PPC_SMLPAR
853 OV5_FEAT(OV5_CMO) | OV5_FEAT(OV5_XCMO),
854 #else
855 0,
856 #endif
857 .associativity = OV5_FEAT(OV5_TYPE1_AFFINITY) | OV5_FEAT(OV5_PRRN),
858 .bin_opts = OV5_FEAT(OV5_RESIZE_HPT) | OV5_FEAT(OV5_HP_EVT),
859 .micro_checkpoint = 0,
860 .reserved0 = 0,
861 .max_cpus = cpu_to_be32(NR_CPUS), /* number of cores supported */
862 .papr_level = 0,
863 .reserved1 = 0,
864 .platform_facilities = OV5_FEAT(OV5_PFO_HW_RNG) | OV5_FEAT(OV5_PFO_HW_ENCR) | OV5_FEAT(OV5_PFO_HW_842),
865 .reserved2 = 0,
866 .reserved3 = 0,
867 .subprocessors = 1,
868 .intarch = 0,
869 .mmu = 0,
870 .hash_ext = 0,
871 .radix_ext = 0,
872 },
873
874 /* option vector 6: IBM PAPR hints */
875 .vec6_len = VECTOR_LENGTH(sizeof(struct option_vector6)),
876 .vec6 = {
877 .reserved = 0,
878 .secondary_pteg = 0,
879 .os_name = OV6_LINUX,
880 },
881 };
882
883 /* Old method - ELF header with PT_NOTE sections only works on BE */
884 #ifdef __BIG_ENDIAN__
885 static struct fake_elf {
886 Elf32_Ehdr elfhdr;
887 Elf32_Phdr phdr[2];
888 struct chrpnote {
889 u32 namesz;
890 u32 descsz;
891 u32 type;
892 char name[8]; /* "PowerPC" */
893 struct chrpdesc {
894 u32 real_mode;
895 u32 real_base;
896 u32 real_size;
897 u32 virt_base;
898 u32 virt_size;
899 u32 load_base;
900 } chrpdesc;
901 } chrpnote;
902 struct rpanote {
903 u32 namesz;
904 u32 descsz;
905 u32 type;
906 char name[24]; /* "IBM,RPA-Client-Config" */
907 struct rpadesc {
908 u32 lpar_affinity;
909 u32 min_rmo_size;
910 u32 min_rmo_percent;
911 u32 max_pft_size;
912 u32 splpar;
913 u32 min_load;
914 u32 new_mem_def;
915 u32 ignore_me;
916 } rpadesc;
917 } rpanote;
918 } fake_elf = {
919 .elfhdr = {
920 .e_ident = { 0x7f, 'E', 'L', 'F',
921 ELFCLASS32, ELFDATA2MSB, EV_CURRENT },
922 .e_type = ET_EXEC, /* yeah right */
923 .e_machine = EM_PPC,
924 .e_version = EV_CURRENT,
925 .e_phoff = offsetof(struct fake_elf, phdr),
926 .e_phentsize = sizeof(Elf32_Phdr),
927 .e_phnum = 2
928 },
929 .phdr = {
930 [0] = {
931 .p_type = PT_NOTE,
932 .p_offset = offsetof(struct fake_elf, chrpnote),
933 .p_filesz = sizeof(struct chrpnote)
934 }, [1] = {
935 .p_type = PT_NOTE,
936 .p_offset = offsetof(struct fake_elf, rpanote),
937 .p_filesz = sizeof(struct rpanote)
938 }
939 },
940 .chrpnote = {
941 .namesz = sizeof("PowerPC"),
942 .descsz = sizeof(struct chrpdesc),
943 .type = 0x1275,
944 .name = "PowerPC",
945 .chrpdesc = {
946 .real_mode = ~0U, /* ~0 means "don't care" */
947 .real_base = ~0U,
948 .real_size = ~0U,
949 .virt_base = ~0U,
950 .virt_size = ~0U,
951 .load_base = ~0U
952 },
953 },
954 .rpanote = {
955 .namesz = sizeof("IBM,RPA-Client-Config"),
956 .descsz = sizeof(struct rpadesc),
957 .type = 0x12759999,
958 .name = "IBM,RPA-Client-Config",
959 .rpadesc = {
960 .lpar_affinity = 0,
961 .min_rmo_size = 64, /* in megabytes */
962 .min_rmo_percent = 0,
963 .max_pft_size = 48, /* 2^48 bytes max PFT size */
964 .splpar = 1,
965 .min_load = ~0U,
966 .new_mem_def = 0
967 }
968 }
969 };
970 #endif /* __BIG_ENDIAN__ */
971
972 static int __init prom_count_smt_threads(void)
973 {
974 phandle node;
975 char type[64];
976 unsigned int plen;
977
978 /* Pick up th first CPU node we can find */
979 for (node = 0; prom_next_node(&node); ) {
980 type[0] = 0;
981 prom_getprop(node, "device_type", type, sizeof(type));
982
983 if (strcmp(type, "cpu"))
984 continue;
985 /*
986 * There is an entry for each smt thread, each entry being
987 * 4 bytes long. All cpus should have the same number of
988 * smt threads, so return after finding the first.
989 */
990 plen = prom_getproplen(node, "ibm,ppc-interrupt-server#s");
991 if (plen == PROM_ERROR)
992 break;
993 plen >>= 2;
994 prom_debug("Found %lu smt threads per core\n", (unsigned long)plen);
995
996 /* Sanity check */
997 if (plen < 1 || plen > 64) {
998 prom_printf("Threads per core %lu out of bounds, assuming 1\n",
999 (unsigned long)plen);
1000 return 1;
1001 }
1002 return plen;
1003 }
1004 prom_debug("No threads found, assuming 1 per core\n");
1005
1006 return 1;
1007
1008 }
1009
1010 static void __init prom_parse_mmu_model(u8 val,
1011 struct platform_support *support)
1012 {
1013 switch (val) {
1014 case OV5_FEAT(OV5_MMU_DYNAMIC):
1015 case OV5_FEAT(OV5_MMU_EITHER): /* Either Available */
1016 prom_debug("MMU - either supported\n");
1017 support->radix_mmu = !prom_radix_disable;
1018 support->hash_mmu = true;
1019 break;
1020 case OV5_FEAT(OV5_MMU_RADIX): /* Only Radix */
1021 prom_debug("MMU - radix only\n");
1022 if (prom_radix_disable) {
1023 /*
1024 * If we __have__ to do radix, we're better off ignoring
1025 * the command line rather than not booting.
1026 */
1027 prom_printf("WARNING: Ignoring cmdline option disable_radix\n");
1028 }
1029 support->radix_mmu = true;
1030 break;
1031 case OV5_FEAT(OV5_MMU_HASH):
1032 prom_debug("MMU - hash only\n");
1033 support->hash_mmu = true;
1034 break;
1035 default:
1036 prom_debug("Unknown mmu support option: 0x%x\n", val);
1037 break;
1038 }
1039 }
1040
1041 static void __init prom_parse_platform_support(u8 index, u8 val,
1042 struct platform_support *support)
1043 {
1044 switch (index) {
1045 case OV5_INDX(OV5_MMU_SUPPORT): /* MMU Model */
1046 prom_parse_mmu_model(val & OV5_FEAT(OV5_MMU_SUPPORT), support);
1047 break;
1048 case OV5_INDX(OV5_RADIX_GTSE): /* Radix Extensions */
1049 if (val & OV5_FEAT(OV5_RADIX_GTSE)) {
1050 prom_debug("Radix - GTSE supported\n");
1051 support->radix_gtse = true;
1052 }
1053 break;
1054 }
1055 }
1056
1057 static void __init prom_check_platform_support(void)
1058 {
1059 struct platform_support supported = {
1060 .hash_mmu = false,
1061 .radix_mmu = false,
1062 .radix_gtse = false
1063 };
1064 int prop_len = prom_getproplen(prom.chosen,
1065 "ibm,arch-vec-5-platform-support");
1066 if (prop_len > 1) {
1067 int i;
1068 u8 vec[prop_len];
1069 prom_debug("Found ibm,arch-vec-5-platform-support, len: %d\n",
1070 prop_len);
1071 prom_getprop(prom.chosen, "ibm,arch-vec-5-platform-support",
1072 &vec, sizeof(vec));
1073 for (i = 0; i < prop_len; i += 2) {
1074 prom_debug("%d: index = 0x%x val = 0x%x\n", i / 2
1075 , vec[i]
1076 , vec[i + 1]);
1077 prom_parse_platform_support(vec[i], vec[i + 1],
1078 &supported);
1079 }
1080 }
1081
1082 if (supported.radix_mmu && supported.radix_gtse) {
1083 /* Radix preferred - but we require GTSE for now */
1084 prom_debug("Asking for radix with GTSE\n");
1085 ibm_architecture_vec.vec5.mmu = OV5_FEAT(OV5_MMU_RADIX);
1086 ibm_architecture_vec.vec5.radix_ext = OV5_FEAT(OV5_RADIX_GTSE);
1087 } else if (supported.hash_mmu) {
1088 /* Default to hash mmu (if we can) */
1089 prom_debug("Asking for hash\n");
1090 ibm_architecture_vec.vec5.mmu = OV5_FEAT(OV5_MMU_HASH);
1091 } else {
1092 /* We're probably on a legacy hypervisor */
1093 prom_debug("Assuming legacy hash support\n");
1094 }
1095 }
1096
1097 static void __init prom_send_capabilities(void)
1098 {
1099 ihandle root;
1100 prom_arg_t ret;
1101 u32 cores;
1102
1103 /* Check ibm,arch-vec-5-platform-support and fixup vec5 if required */
1104 prom_check_platform_support();
1105
1106 root = call_prom("open", 1, 1, ADDR("/"));
1107 if (root != 0) {
1108 /* We need to tell the FW about the number of cores we support.
1109 *
1110 * To do that, we count the number of threads on the first core
1111 * (we assume this is the same for all cores) and use it to
1112 * divide NR_CPUS.
1113 */
1114
1115 cores = DIV_ROUND_UP(NR_CPUS, prom_count_smt_threads());
1116 prom_printf("Max number of cores passed to firmware: %lu (NR_CPUS = %lu)\n",
1117 cores, NR_CPUS);
1118
1119 ibm_architecture_vec.vec5.max_cpus = cpu_to_be32(cores);
1120
1121 /* try calling the ibm,client-architecture-support method */
1122 prom_printf("Calling ibm,client-architecture-support...");
1123 if (call_prom_ret("call-method", 3, 2, &ret,
1124 ADDR("ibm,client-architecture-support"),
1125 root,
1126 ADDR(&ibm_architecture_vec)) == 0) {
1127 /* the call exists... */
1128 if (ret)
1129 prom_printf("\nWARNING: ibm,client-architecture"
1130 "-support call FAILED!\n");
1131 call_prom("close", 1, 0, root);
1132 prom_printf(" done\n");
1133 return;
1134 }
1135 call_prom("close", 1, 0, root);
1136 prom_printf(" not implemented\n");
1137 }
1138
1139 #ifdef __BIG_ENDIAN__
1140 {
1141 ihandle elfloader;
1142
1143 /* no ibm,client-architecture-support call, try the old way */
1144 elfloader = call_prom("open", 1, 1,
1145 ADDR("/packages/elf-loader"));
1146 if (elfloader == 0) {
1147 prom_printf("couldn't open /packages/elf-loader\n");
1148 return;
1149 }
1150 call_prom("call-method", 3, 1, ADDR("process-elf-header"),
1151 elfloader, ADDR(&fake_elf));
1152 call_prom("close", 1, 0, elfloader);
1153 }
1154 #endif /* __BIG_ENDIAN__ */
1155 }
1156 #endif /* #if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV) */
1157
1158 /*
1159 * Memory allocation strategy... our layout is normally:
1160 *
1161 * at 14Mb or more we have vmlinux, then a gap and initrd. In some
1162 * rare cases, initrd might end up being before the kernel though.
1163 * We assume this won't override the final kernel at 0, we have no
1164 * provision to handle that in this version, but it should hopefully
1165 * never happen.
1166 *
1167 * alloc_top is set to the top of RMO, eventually shrink down if the
1168 * TCEs overlap
1169 *
1170 * alloc_bottom is set to the top of kernel/initrd
1171 *
1172 * from there, allocations are done this way : rtas is allocated
1173 * topmost, and the device-tree is allocated from the bottom. We try
1174 * to grow the device-tree allocation as we progress. If we can't,
1175 * then we fail, we don't currently have a facility to restart
1176 * elsewhere, but that shouldn't be necessary.
1177 *
1178 * Note that calls to reserve_mem have to be done explicitly, memory
1179 * allocated with either alloc_up or alloc_down isn't automatically
1180 * reserved.
1181 */
1182
1183
1184 /*
1185 * Allocates memory in the RMO upward from the kernel/initrd
1186 *
1187 * When align is 0, this is a special case, it means to allocate in place
1188 * at the current location of alloc_bottom or fail (that is basically
1189 * extending the previous allocation). Used for the device-tree flattening
1190 */
1191 static unsigned long __init alloc_up(unsigned long size, unsigned long align)
1192 {
1193 unsigned long base = alloc_bottom;
1194 unsigned long addr = 0;
1195
1196 if (align)
1197 base = _ALIGN_UP(base, align);
1198 prom_debug("alloc_up(%x, %x)\n", size, align);
1199 if (ram_top == 0)
1200 prom_panic("alloc_up() called with mem not initialized\n");
1201
1202 if (align)
1203 base = _ALIGN_UP(alloc_bottom, align);
1204 else
1205 base = alloc_bottom;
1206
1207 for(; (base + size) <= alloc_top;
1208 base = _ALIGN_UP(base + 0x100000, align)) {
1209 prom_debug(" trying: 0x%x\n\r", base);
1210 addr = (unsigned long)prom_claim(base, size, 0);
1211 if (addr != PROM_ERROR && addr != 0)
1212 break;
1213 addr = 0;
1214 if (align == 0)
1215 break;
1216 }
1217 if (addr == 0)
1218 return 0;
1219 alloc_bottom = addr + size;
1220
1221 prom_debug(" -> %x\n", addr);
1222 prom_debug(" alloc_bottom : %x\n", alloc_bottom);
1223 prom_debug(" alloc_top : %x\n", alloc_top);
1224 prom_debug(" alloc_top_hi : %x\n", alloc_top_high);
1225 prom_debug(" rmo_top : %x\n", rmo_top);
1226 prom_debug(" ram_top : %x\n", ram_top);
1227
1228 return addr;
1229 }
1230
1231 /*
1232 * Allocates memory downward, either from top of RMO, or if highmem
1233 * is set, from the top of RAM. Note that this one doesn't handle
1234 * failures. It does claim memory if highmem is not set.
1235 */
1236 static unsigned long __init alloc_down(unsigned long size, unsigned long align,
1237 int highmem)
1238 {
1239 unsigned long base, addr = 0;
1240
1241 prom_debug("alloc_down(%x, %x, %s)\n", size, align,
1242 highmem ? "(high)" : "(low)");
1243 if (ram_top == 0)
1244 prom_panic("alloc_down() called with mem not initialized\n");
1245
1246 if (highmem) {
1247 /* Carve out storage for the TCE table. */
1248 addr = _ALIGN_DOWN(alloc_top_high - size, align);
1249 if (addr <= alloc_bottom)
1250 return 0;
1251 /* Will we bump into the RMO ? If yes, check out that we
1252 * didn't overlap existing allocations there, if we did,
1253 * we are dead, we must be the first in town !
1254 */
1255 if (addr < rmo_top) {
1256 /* Good, we are first */
1257 if (alloc_top == rmo_top)
1258 alloc_top = rmo_top = addr;
1259 else
1260 return 0;
1261 }
1262 alloc_top_high = addr;
1263 goto bail;
1264 }
1265
1266 base = _ALIGN_DOWN(alloc_top - size, align);
1267 for (; base > alloc_bottom;
1268 base = _ALIGN_DOWN(base - 0x100000, align)) {
1269 prom_debug(" trying: 0x%x\n\r", base);
1270 addr = (unsigned long)prom_claim(base, size, 0);
1271 if (addr != PROM_ERROR && addr != 0)
1272 break;
1273 addr = 0;
1274 }
1275 if (addr == 0)
1276 return 0;
1277 alloc_top = addr;
1278
1279 bail:
1280 prom_debug(" -> %x\n", addr);
1281 prom_debug(" alloc_bottom : %x\n", alloc_bottom);
1282 prom_debug(" alloc_top : %x\n", alloc_top);
1283 prom_debug(" alloc_top_hi : %x\n", alloc_top_high);
1284 prom_debug(" rmo_top : %x\n", rmo_top);
1285 prom_debug(" ram_top : %x\n", ram_top);
1286
1287 return addr;
1288 }
1289
1290 /*
1291 * Parse a "reg" cell
1292 */
1293 static unsigned long __init prom_next_cell(int s, cell_t **cellp)
1294 {
1295 cell_t *p = *cellp;
1296 unsigned long r = 0;
1297
1298 /* Ignore more than 2 cells */
1299 while (s > sizeof(unsigned long) / 4) {
1300 p++;
1301 s--;
1302 }
1303 r = be32_to_cpu(*p++);
1304 #ifdef CONFIG_PPC64
1305 if (s > 1) {
1306 r <<= 32;
1307 r |= be32_to_cpu(*(p++));
1308 }
1309 #endif
1310 *cellp = p;
1311 return r;
1312 }
1313
1314 /*
1315 * Very dumb function for adding to the memory reserve list, but
1316 * we don't need anything smarter at this point
1317 *
1318 * XXX Eventually check for collisions. They should NEVER happen.
1319 * If problems seem to show up, it would be a good start to track
1320 * them down.
1321 */
1322 static void __init reserve_mem(u64 base, u64 size)
1323 {
1324 u64 top = base + size;
1325 unsigned long cnt = mem_reserve_cnt;
1326
1327 if (size == 0)
1328 return;
1329
1330 /* We need to always keep one empty entry so that we
1331 * have our terminator with "size" set to 0 since we are
1332 * dumb and just copy this entire array to the boot params
1333 */
1334 base = _ALIGN_DOWN(base, PAGE_SIZE);
1335 top = _ALIGN_UP(top, PAGE_SIZE);
1336 size = top - base;
1337
1338 if (cnt >= (MEM_RESERVE_MAP_SIZE - 1))
1339 prom_panic("Memory reserve map exhausted !\n");
1340 mem_reserve_map[cnt].base = cpu_to_be64(base);
1341 mem_reserve_map[cnt].size = cpu_to_be64(size);
1342 mem_reserve_cnt = cnt + 1;
1343 }
1344
1345 /*
1346 * Initialize memory allocation mechanism, parse "memory" nodes and
1347 * obtain that way the top of memory and RMO to setup out local allocator
1348 */
1349 static void __init prom_init_mem(void)
1350 {
1351 phandle node;
1352 char *path, type[64];
1353 unsigned int plen;
1354 cell_t *p, *endp;
1355 __be32 val;
1356 u32 rac, rsc;
1357
1358 /*
1359 * We iterate the memory nodes to find
1360 * 1) top of RMO (first node)
1361 * 2) top of memory
1362 */
1363 val = cpu_to_be32(2);
1364 prom_getprop(prom.root, "#address-cells", &val, sizeof(val));
1365 rac = be32_to_cpu(val);
1366 val = cpu_to_be32(1);
1367 prom_getprop(prom.root, "#size-cells", &val, sizeof(rsc));
1368 rsc = be32_to_cpu(val);
1369 prom_debug("root_addr_cells: %x\n", rac);
1370 prom_debug("root_size_cells: %x\n", rsc);
1371
1372 prom_debug("scanning memory:\n");
1373 path = prom_scratch;
1374
1375 for (node = 0; prom_next_node(&node); ) {
1376 type[0] = 0;
1377 prom_getprop(node, "device_type", type, sizeof(type));
1378
1379 if (type[0] == 0) {
1380 /*
1381 * CHRP Longtrail machines have no device_type
1382 * on the memory node, so check the name instead...
1383 */
1384 prom_getprop(node, "name", type, sizeof(type));
1385 }
1386 if (strcmp(type, "memory"))
1387 continue;
1388
1389 plen = prom_getprop(node, "reg", regbuf, sizeof(regbuf));
1390 if (plen > sizeof(regbuf)) {
1391 prom_printf("memory node too large for buffer !\n");
1392 plen = sizeof(regbuf);
1393 }
1394 p = regbuf;
1395 endp = p + (plen / sizeof(cell_t));
1396
1397 #ifdef DEBUG_PROM
1398 memset(path, 0, PROM_SCRATCH_SIZE);
1399 call_prom("package-to-path", 3, 1, node, path, PROM_SCRATCH_SIZE-1);
1400 prom_debug(" node %s :\n", path);
1401 #endif /* DEBUG_PROM */
1402
1403 while ((endp - p) >= (rac + rsc)) {
1404 unsigned long base, size;
1405
1406 base = prom_next_cell(rac, &p);
1407 size = prom_next_cell(rsc, &p);
1408
1409 if (size == 0)
1410 continue;
1411 prom_debug(" %x %x\n", base, size);
1412 if (base == 0 && (of_platform & PLATFORM_LPAR))
1413 rmo_top = size;
1414 if ((base + size) > ram_top)
1415 ram_top = base + size;
1416 }
1417 }
1418
1419 alloc_bottom = PAGE_ALIGN((unsigned long)&_end + 0x4000);
1420
1421 /*
1422 * If prom_memory_limit is set we reduce the upper limits *except* for
1423 * alloc_top_high. This must be the real top of RAM so we can put
1424 * TCE's up there.
1425 */
1426
1427 alloc_top_high = ram_top;
1428
1429 if (prom_memory_limit) {
1430 if (prom_memory_limit <= alloc_bottom) {
1431 prom_printf("Ignoring mem=%x <= alloc_bottom.\n",
1432 prom_memory_limit);
1433 prom_memory_limit = 0;
1434 } else if (prom_memory_limit >= ram_top) {
1435 prom_printf("Ignoring mem=%x >= ram_top.\n",
1436 prom_memory_limit);
1437 prom_memory_limit = 0;
1438 } else {
1439 ram_top = prom_memory_limit;
1440 rmo_top = min(rmo_top, prom_memory_limit);
1441 }
1442 }
1443
1444 /*
1445 * Setup our top alloc point, that is top of RMO or top of
1446 * segment 0 when running non-LPAR.
1447 * Some RS64 machines have buggy firmware where claims up at
1448 * 1GB fail. Cap at 768MB as a workaround.
1449 * Since 768MB is plenty of room, and we need to cap to something
1450 * reasonable on 32-bit, cap at 768MB on all machines.
1451 */
1452 if (!rmo_top)
1453 rmo_top = ram_top;
1454 rmo_top = min(0x30000000ul, rmo_top);
1455 alloc_top = rmo_top;
1456 alloc_top_high = ram_top;
1457
1458 /*
1459 * Check if we have an initrd after the kernel but still inside
1460 * the RMO. If we do move our bottom point to after it.
1461 */
1462 if (prom_initrd_start &&
1463 prom_initrd_start < rmo_top &&
1464 prom_initrd_end > alloc_bottom)
1465 alloc_bottom = PAGE_ALIGN(prom_initrd_end);
1466
1467 prom_printf("memory layout at init:\n");
1468 prom_printf(" memory_limit : %x (16 MB aligned)\n", prom_memory_limit);
1469 prom_printf(" alloc_bottom : %x\n", alloc_bottom);
1470 prom_printf(" alloc_top : %x\n", alloc_top);
1471 prom_printf(" alloc_top_hi : %x\n", alloc_top_high);
1472 prom_printf(" rmo_top : %x\n", rmo_top);
1473 prom_printf(" ram_top : %x\n", ram_top);
1474 }
1475
1476 static void __init prom_close_stdin(void)
1477 {
1478 __be32 val;
1479 ihandle stdin;
1480
1481 if (prom_getprop(prom.chosen, "stdin", &val, sizeof(val)) > 0) {
1482 stdin = be32_to_cpu(val);
1483 call_prom("close", 1, 0, stdin);
1484 }
1485 }
1486
1487 #ifdef CONFIG_PPC_POWERNV
1488
1489 #ifdef CONFIG_PPC_EARLY_DEBUG_OPAL
1490 static u64 __initdata prom_opal_base;
1491 static u64 __initdata prom_opal_entry;
1492 #endif
1493
1494 /*
1495 * Allocate room for and instantiate OPAL
1496 */
1497 static void __init prom_instantiate_opal(void)
1498 {
1499 phandle opal_node;
1500 ihandle opal_inst;
1501 u64 base, entry;
1502 u64 size = 0, align = 0x10000;
1503 __be64 val64;
1504 u32 rets[2];
1505
1506 prom_debug("prom_instantiate_opal: start...\n");
1507
1508 opal_node = call_prom("finddevice", 1, 1, ADDR("/ibm,opal"));
1509 prom_debug("opal_node: %x\n", opal_node);
1510 if (!PHANDLE_VALID(opal_node))
1511 return;
1512
1513 val64 = 0;
1514 prom_getprop(opal_node, "opal-runtime-size", &val64, sizeof(val64));
1515 size = be64_to_cpu(val64);
1516 if (size == 0)
1517 return;
1518 val64 = 0;
1519 prom_getprop(opal_node, "opal-runtime-alignment", &val64,sizeof(val64));
1520 align = be64_to_cpu(val64);
1521
1522 base = alloc_down(size, align, 0);
1523 if (base == 0) {
1524 prom_printf("OPAL allocation failed !\n");
1525 return;
1526 }
1527
1528 opal_inst = call_prom("open", 1, 1, ADDR("/ibm,opal"));
1529 if (!IHANDLE_VALID(opal_inst)) {
1530 prom_printf("opening opal package failed (%x)\n", opal_inst);
1531 return;
1532 }
1533
1534 prom_printf("instantiating opal at 0x%x...", base);
1535
1536 if (call_prom_ret("call-method", 4, 3, rets,
1537 ADDR("load-opal-runtime"),
1538 opal_inst,
1539 base >> 32, base & 0xffffffff) != 0
1540 || (rets[0] == 0 && rets[1] == 0)) {
1541 prom_printf(" failed\n");
1542 return;
1543 }
1544 entry = (((u64)rets[0]) << 32) | rets[1];
1545
1546 prom_printf(" done\n");
1547
1548 reserve_mem(base, size);
1549
1550 prom_debug("opal base = 0x%x\n", base);
1551 prom_debug("opal align = 0x%x\n", align);
1552 prom_debug("opal entry = 0x%x\n", entry);
1553 prom_debug("opal size = 0x%x\n", (long)size);
1554
1555 prom_setprop(opal_node, "/ibm,opal", "opal-base-address",
1556 &base, sizeof(base));
1557 prom_setprop(opal_node, "/ibm,opal", "opal-entry-address",
1558 &entry, sizeof(entry));
1559
1560 #ifdef CONFIG_PPC_EARLY_DEBUG_OPAL
1561 prom_opal_base = base;
1562 prom_opal_entry = entry;
1563 #endif
1564 prom_debug("prom_instantiate_opal: end...\n");
1565 }
1566
1567 #endif /* CONFIG_PPC_POWERNV */
1568
1569 /*
1570 * Allocate room for and instantiate RTAS
1571 */
1572 static void __init prom_instantiate_rtas(void)
1573 {
1574 phandle rtas_node;
1575 ihandle rtas_inst;
1576 u32 base, entry = 0;
1577 __be32 val;
1578 u32 size = 0;
1579
1580 prom_debug("prom_instantiate_rtas: start...\n");
1581
1582 rtas_node = call_prom("finddevice", 1, 1, ADDR("/rtas"));
1583 prom_debug("rtas_node: %x\n", rtas_node);
1584 if (!PHANDLE_VALID(rtas_node))
1585 return;
1586
1587 val = 0;
1588 prom_getprop(rtas_node, "rtas-size", &val, sizeof(size));
1589 size = be32_to_cpu(val);
1590 if (size == 0)
1591 return;
1592
1593 base = alloc_down(size, PAGE_SIZE, 0);
1594 if (base == 0)
1595 prom_panic("Could not allocate memory for RTAS\n");
1596
1597 rtas_inst = call_prom("open", 1, 1, ADDR("/rtas"));
1598 if (!IHANDLE_VALID(rtas_inst)) {
1599 prom_printf("opening rtas package failed (%x)\n", rtas_inst);
1600 return;
1601 }
1602
1603 prom_printf("instantiating rtas at 0x%x...", base);
1604
1605 if (call_prom_ret("call-method", 3, 2, &entry,
1606 ADDR("instantiate-rtas"),
1607 rtas_inst, base) != 0
1608 || entry == 0) {
1609 prom_printf(" failed\n");
1610 return;
1611 }
1612 prom_printf(" done\n");
1613
1614 reserve_mem(base, size);
1615
1616 val = cpu_to_be32(base);
1617 prom_setprop(rtas_node, "/rtas", "linux,rtas-base",
1618 &val, sizeof(val));
1619 val = cpu_to_be32(entry);
1620 prom_setprop(rtas_node, "/rtas", "linux,rtas-entry",
1621 &val, sizeof(val));
1622
1623 /* Check if it supports "query-cpu-stopped-state" */
1624 if (prom_getprop(rtas_node, "query-cpu-stopped-state",
1625 &val, sizeof(val)) != PROM_ERROR)
1626 rtas_has_query_cpu_stopped = true;
1627
1628 prom_debug("rtas base = 0x%x\n", base);
1629 prom_debug("rtas entry = 0x%x\n", entry);
1630 prom_debug("rtas size = 0x%x\n", (long)size);
1631
1632 prom_debug("prom_instantiate_rtas: end...\n");
1633 }
1634
1635 #ifdef CONFIG_PPC64
1636 /*
1637 * Allocate room for and instantiate Stored Measurement Log (SML)
1638 */
1639 static void __init prom_instantiate_sml(void)
1640 {
1641 phandle ibmvtpm_node;
1642 ihandle ibmvtpm_inst;
1643 u32 entry = 0, size = 0, succ = 0;
1644 u64 base;
1645 __be32 val;
1646
1647 prom_debug("prom_instantiate_sml: start...\n");
1648
1649 ibmvtpm_node = call_prom("finddevice", 1, 1, ADDR("/vdevice/vtpm"));
1650 prom_debug("ibmvtpm_node: %x\n", ibmvtpm_node);
1651 if (!PHANDLE_VALID(ibmvtpm_node))
1652 return;
1653
1654 ibmvtpm_inst = call_prom("open", 1, 1, ADDR("/vdevice/vtpm"));
1655 if (!IHANDLE_VALID(ibmvtpm_inst)) {
1656 prom_printf("opening vtpm package failed (%x)\n", ibmvtpm_inst);
1657 return;
1658 }
1659
1660 if (prom_getprop(ibmvtpm_node, "ibm,sml-efi-reformat-supported",
1661 &val, sizeof(val)) != PROM_ERROR) {
1662 if (call_prom_ret("call-method", 2, 2, &succ,
1663 ADDR("reformat-sml-to-efi-alignment"),
1664 ibmvtpm_inst) != 0 || succ == 0) {
1665 prom_printf("Reformat SML to EFI alignment failed\n");
1666 return;
1667 }
1668
1669 if (call_prom_ret("call-method", 2, 2, &size,
1670 ADDR("sml-get-allocated-size"),
1671 ibmvtpm_inst) != 0 || size == 0) {
1672 prom_printf("SML get allocated size failed\n");
1673 return;
1674 }
1675 } else {
1676 if (call_prom_ret("call-method", 2, 2, &size,
1677 ADDR("sml-get-handover-size"),
1678 ibmvtpm_inst) != 0 || size == 0) {
1679 prom_printf("SML get handover size failed\n");
1680 return;
1681 }
1682 }
1683
1684 base = alloc_down(size, PAGE_SIZE, 0);
1685 if (base == 0)
1686 prom_panic("Could not allocate memory for sml\n");
1687
1688 prom_printf("instantiating sml at 0x%x...", base);
1689
1690 memset((void *)base, 0, size);
1691
1692 if (call_prom_ret("call-method", 4, 2, &entry,
1693 ADDR("sml-handover"),
1694 ibmvtpm_inst, size, base) != 0 || entry == 0) {
1695 prom_printf("SML handover failed\n");
1696 return;
1697 }
1698 prom_printf(" done\n");
1699
1700 reserve_mem(base, size);
1701
1702 prom_setprop(ibmvtpm_node, "/vdevice/vtpm", "linux,sml-base",
1703 &base, sizeof(base));
1704 prom_setprop(ibmvtpm_node, "/vdevice/vtpm", "linux,sml-size",
1705 &size, sizeof(size));
1706
1707 prom_debug("sml base = 0x%x\n", base);
1708 prom_debug("sml size = 0x%x\n", (long)size);
1709
1710 prom_debug("prom_instantiate_sml: end...\n");
1711 }
1712
1713 /*
1714 * Allocate room for and initialize TCE tables
1715 */
1716 #ifdef __BIG_ENDIAN__
1717 static void __init prom_initialize_tce_table(void)
1718 {
1719 phandle node;
1720 ihandle phb_node;
1721 char compatible[64], type[64], model[64];
1722 char *path = prom_scratch;
1723 u64 base, align;
1724 u32 minalign, minsize;
1725 u64 tce_entry, *tce_entryp;
1726 u64 local_alloc_top, local_alloc_bottom;
1727 u64 i;
1728
1729 if (prom_iommu_off)
1730 return;
1731
1732 prom_debug("starting prom_initialize_tce_table\n");
1733
1734 /* Cache current top of allocs so we reserve a single block */
1735 local_alloc_top = alloc_top_high;
1736 local_alloc_bottom = local_alloc_top;
1737
1738 /* Search all nodes looking for PHBs. */
1739 for (node = 0; prom_next_node(&node); ) {
1740 compatible[0] = 0;
1741 type[0] = 0;
1742 model[0] = 0;
1743 prom_getprop(node, "compatible",
1744 compatible, sizeof(compatible));
1745 prom_getprop(node, "device_type", type, sizeof(type));
1746 prom_getprop(node, "model", model, sizeof(model));
1747
1748 if ((type[0] == 0) || (strstr(type, "pci") == NULL))
1749 continue;
1750
1751 /* Keep the old logic intact to avoid regression. */
1752 if (compatible[0] != 0) {
1753 if ((strstr(compatible, "python") == NULL) &&
1754 (strstr(compatible, "Speedwagon") == NULL) &&
1755 (strstr(compatible, "Winnipeg") == NULL))
1756 continue;
1757 } else if (model[0] != 0) {
1758 if ((strstr(model, "ython") == NULL) &&
1759 (strstr(model, "peedwagon") == NULL) &&
1760 (strstr(model, "innipeg") == NULL))
1761 continue;
1762 }
1763
1764 if (prom_getprop(node, "tce-table-minalign", &minalign,
1765 sizeof(minalign)) == PROM_ERROR)
1766 minalign = 0;
1767 if (prom_getprop(node, "tce-table-minsize", &minsize,
1768 sizeof(minsize)) == PROM_ERROR)
1769 minsize = 4UL << 20;
1770
1771 /*
1772 * Even though we read what OF wants, we just set the table
1773 * size to 4 MB. This is enough to map 2GB of PCI DMA space.
1774 * By doing this, we avoid the pitfalls of trying to DMA to
1775 * MMIO space and the DMA alias hole.
1776 *
1777 * On POWER4, firmware sets the TCE region by assuming
1778 * each TCE table is 8MB. Using this memory for anything
1779 * else will impact performance, so we always allocate 8MB.
1780 * Anton
1781 */
1782 if (pvr_version_is(PVR_POWER4) || pvr_version_is(PVR_POWER4p))
1783 minsize = 8UL << 20;
1784 else
1785 minsize = 4UL << 20;
1786
1787 /* Align to the greater of the align or size */
1788 align = max(minalign, minsize);
1789 base = alloc_down(minsize, align, 1);
1790 if (base == 0)
1791 prom_panic("ERROR, cannot find space for TCE table.\n");
1792 if (base < local_alloc_bottom)
1793 local_alloc_bottom = base;
1794
1795 /* It seems OF doesn't null-terminate the path :-( */
1796 memset(path, 0, PROM_SCRATCH_SIZE);
1797 /* Call OF to setup the TCE hardware */
1798 if (call_prom("package-to-path", 3, 1, node,
1799 path, PROM_SCRATCH_SIZE-1) == PROM_ERROR) {
1800 prom_printf("package-to-path failed\n");
1801 }
1802
1803 /* Save away the TCE table attributes for later use. */
1804 prom_setprop(node, path, "linux,tce-base", &base, sizeof(base));
1805 prom_setprop(node, path, "linux,tce-size", &minsize, sizeof(minsize));
1806
1807 prom_debug("TCE table: %s\n", path);
1808 prom_debug("\tnode = 0x%x\n", node);
1809 prom_debug("\tbase = 0x%x\n", base);
1810 prom_debug("\tsize = 0x%x\n", minsize);
1811
1812 /* Initialize the table to have a one-to-one mapping
1813 * over the allocated size.
1814 */
1815 tce_entryp = (u64 *)base;
1816 for (i = 0; i < (minsize >> 3) ;tce_entryp++, i++) {
1817 tce_entry = (i << PAGE_SHIFT);
1818 tce_entry |= 0x3;
1819 *tce_entryp = tce_entry;
1820 }
1821
1822 prom_printf("opening PHB %s", path);
1823 phb_node = call_prom("open", 1, 1, path);
1824 if (phb_node == 0)
1825 prom_printf("... failed\n");
1826 else
1827 prom_printf("... done\n");
1828
1829 call_prom("call-method", 6, 0, ADDR("set-64-bit-addressing"),
1830 phb_node, -1, minsize,
1831 (u32) base, (u32) (base >> 32));
1832 call_prom("close", 1, 0, phb_node);
1833 }
1834
1835 reserve_mem(local_alloc_bottom, local_alloc_top - local_alloc_bottom);
1836
1837 /* These are only really needed if there is a memory limit in
1838 * effect, but we don't know so export them always. */
1839 prom_tce_alloc_start = local_alloc_bottom;
1840 prom_tce_alloc_end = local_alloc_top;
1841
1842 /* Flag the first invalid entry */
1843 prom_debug("ending prom_initialize_tce_table\n");
1844 }
1845 #endif /* __BIG_ENDIAN__ */
1846 #endif /* CONFIG_PPC64 */
1847
1848 /*
1849 * With CHRP SMP we need to use the OF to start the other processors.
1850 * We can't wait until smp_boot_cpus (the OF is trashed by then)
1851 * so we have to put the processors into a holding pattern controlled
1852 * by the kernel (not OF) before we destroy the OF.
1853 *
1854 * This uses a chunk of low memory, puts some holding pattern
1855 * code there and sends the other processors off to there until
1856 * smp_boot_cpus tells them to do something. The holding pattern
1857 * checks that address until its cpu # is there, when it is that
1858 * cpu jumps to __secondary_start(). smp_boot_cpus() takes care
1859 * of setting those values.
1860 *
1861 * We also use physical address 0x4 here to tell when a cpu
1862 * is in its holding pattern code.
1863 *
1864 * -- Cort
1865 */
1866 /*
1867 * We want to reference the copy of __secondary_hold_* in the
1868 * 0 - 0x100 address range
1869 */
1870 #define LOW_ADDR(x) (((unsigned long) &(x)) & 0xff)
1871
1872 static void __init prom_hold_cpus(void)
1873 {
1874 unsigned long i;
1875 phandle node;
1876 char type[64];
1877 unsigned long *spinloop
1878 = (void *) LOW_ADDR(__secondary_hold_spinloop);
1879 unsigned long *acknowledge
1880 = (void *) LOW_ADDR(__secondary_hold_acknowledge);
1881 unsigned long secondary_hold = LOW_ADDR(__secondary_hold);
1882
1883 /*
1884 * On pseries, if RTAS supports "query-cpu-stopped-state",
1885 * we skip this stage, the CPUs will be started by the
1886 * kernel using RTAS.
1887 */
1888 if ((of_platform == PLATFORM_PSERIES ||
1889 of_platform == PLATFORM_PSERIES_LPAR) &&
1890 rtas_has_query_cpu_stopped) {
1891 prom_printf("prom_hold_cpus: skipped\n");
1892 return;
1893 }
1894
1895 prom_debug("prom_hold_cpus: start...\n");
1896 prom_debug(" 1) spinloop = 0x%x\n", (unsigned long)spinloop);
1897 prom_debug(" 1) *spinloop = 0x%x\n", *spinloop);
1898 prom_debug(" 1) acknowledge = 0x%x\n",
1899 (unsigned long)acknowledge);
1900 prom_debug(" 1) *acknowledge = 0x%x\n", *acknowledge);
1901 prom_debug(" 1) secondary_hold = 0x%x\n", secondary_hold);
1902
1903 /* Set the common spinloop variable, so all of the secondary cpus
1904 * will block when they are awakened from their OF spinloop.
1905 * This must occur for both SMP and non SMP kernels, since OF will
1906 * be trashed when we move the kernel.
1907 */
1908 *spinloop = 0;
1909
1910 /* look for cpus */
1911 for (node = 0; prom_next_node(&node); ) {
1912 unsigned int cpu_no;
1913 __be32 reg;
1914
1915 type[0] = 0;
1916 prom_getprop(node, "device_type", type, sizeof(type));
1917 if (strcmp(type, "cpu") != 0)
1918 continue;
1919
1920 /* Skip non-configured cpus. */
1921 if (prom_getprop(node, "status", type, sizeof(type)) > 0)
1922 if (strcmp(type, "okay") != 0)
1923 continue;
1924
1925 reg = cpu_to_be32(-1); /* make sparse happy */
1926 prom_getprop(node, "reg", &reg, sizeof(reg));
1927 cpu_no = be32_to_cpu(reg);
1928
1929 prom_debug("cpu hw idx = %lu\n", cpu_no);
1930
1931 /* Init the acknowledge var which will be reset by
1932 * the secondary cpu when it awakens from its OF
1933 * spinloop.
1934 */
1935 *acknowledge = (unsigned long)-1;
1936
1937 if (cpu_no != prom.cpu) {
1938 /* Primary Thread of non-boot cpu or any thread */
1939 prom_printf("starting cpu hw idx %lu... ", cpu_no);
1940 call_prom("start-cpu", 3, 0, node,
1941 secondary_hold, cpu_no);
1942
1943 for (i = 0; (i < 100000000) &&
1944 (*acknowledge == ((unsigned long)-1)); i++ )
1945 mb();
1946
1947 if (*acknowledge == cpu_no)
1948 prom_printf("done\n");
1949 else
1950 prom_printf("failed: %x\n", *acknowledge);
1951 }
1952 #ifdef CONFIG_SMP
1953 else
1954 prom_printf("boot cpu hw idx %lu\n", cpu_no);
1955 #endif /* CONFIG_SMP */
1956 }
1957
1958 prom_debug("prom_hold_cpus: end...\n");
1959 }
1960
1961
1962 static void __init prom_init_client_services(unsigned long pp)
1963 {
1964 /* Get a handle to the prom entry point before anything else */
1965 prom_entry = pp;
1966
1967 /* get a handle for the stdout device */
1968 prom.chosen = call_prom("finddevice", 1, 1, ADDR("/chosen"));
1969 if (!PHANDLE_VALID(prom.chosen))
1970 prom_panic("cannot find chosen"); /* msg won't be printed :( */
1971
1972 /* get device tree root */
1973 prom.root = call_prom("finddevice", 1, 1, ADDR("/"));
1974 if (!PHANDLE_VALID(prom.root))
1975 prom_panic("cannot find device tree root"); /* msg won't be printed :( */
1976
1977 prom.mmumap = 0;
1978 }
1979
1980 #ifdef CONFIG_PPC32
1981 /*
1982 * For really old powermacs, we need to map things we claim.
1983 * For that, we need the ihandle of the mmu.
1984 * Also, on the longtrail, we need to work around other bugs.
1985 */
1986 static void __init prom_find_mmu(void)
1987 {
1988 phandle oprom;
1989 char version[64];
1990
1991 oprom = call_prom("finddevice", 1, 1, ADDR("/openprom"));
1992 if (!PHANDLE_VALID(oprom))
1993 return;
1994 if (prom_getprop(oprom, "model", version, sizeof(version)) <= 0)
1995 return;
1996 version[sizeof(version) - 1] = 0;
1997 /* XXX might need to add other versions here */
1998 if (strcmp(version, "Open Firmware, 1.0.5") == 0)
1999 of_workarounds = OF_WA_CLAIM;
2000 else if (strncmp(version, "FirmWorks,3.", 12) == 0) {
2001 of_workarounds = OF_WA_CLAIM | OF_WA_LONGTRAIL;
2002 call_prom("interpret", 1, 1, "dev /memory 0 to allow-reclaim");
2003 } else
2004 return;
2005 prom.memory = call_prom("open", 1, 1, ADDR("/memory"));
2006 prom_getprop(prom.chosen, "mmu", &prom.mmumap,
2007 sizeof(prom.mmumap));
2008 prom.mmumap = be32_to_cpu(prom.mmumap);
2009 if (!IHANDLE_VALID(prom.memory) || !IHANDLE_VALID(prom.mmumap))
2010 of_workarounds &= ~OF_WA_CLAIM; /* hmmm */
2011 }
2012 #else
2013 #define prom_find_mmu()
2014 #endif
2015
2016 static void __init prom_init_stdout(void)
2017 {
2018 char *path = of_stdout_device;
2019 char type[16];
2020 phandle stdout_node;
2021 __be32 val;
2022
2023 if (prom_getprop(prom.chosen, "stdout", &val, sizeof(val)) <= 0)
2024 prom_panic("cannot find stdout");
2025
2026 prom.stdout = be32_to_cpu(val);
2027
2028 /* Get the full OF pathname of the stdout device */
2029 memset(path, 0, 256);
2030 call_prom("instance-to-path", 3, 1, prom.stdout, path, 255);
2031 prom_printf("OF stdout device is: %s\n", of_stdout_device);
2032 prom_setprop(prom.chosen, "/chosen", "linux,stdout-path",
2033 path, strlen(path) + 1);
2034
2035 /* instance-to-package fails on PA-Semi */
2036 stdout_node = call_prom("instance-to-package", 1, 1, prom.stdout);
2037 if (stdout_node != PROM_ERROR) {
2038 val = cpu_to_be32(stdout_node);
2039 prom_setprop(prom.chosen, "/chosen", "linux,stdout-package",
2040 &val, sizeof(val));
2041
2042 /* If it's a display, note it */
2043 memset(type, 0, sizeof(type));
2044 prom_getprop(stdout_node, "device_type", type, sizeof(type));
2045 if (strcmp(type, "display") == 0)
2046 prom_setprop(stdout_node, path, "linux,boot-display", NULL, 0);
2047 }
2048 }
2049
2050 static int __init prom_find_machine_type(void)
2051 {
2052 char compat[256];
2053 int len, i = 0;
2054 #ifdef CONFIG_PPC64
2055 phandle rtas;
2056 int x;
2057 #endif
2058
2059 /* Look for a PowerMac or a Cell */
2060 len = prom_getprop(prom.root, "compatible",
2061 compat, sizeof(compat)-1);
2062 if (len > 0) {
2063 compat[len] = 0;
2064 while (i < len) {
2065 char *p = &compat[i];
2066 int sl = strlen(p);
2067 if (sl == 0)
2068 break;
2069 if (strstr(p, "Power Macintosh") ||
2070 strstr(p, "MacRISC"))
2071 return PLATFORM_POWERMAC;
2072 #ifdef CONFIG_PPC64
2073 /* We must make sure we don't detect the IBM Cell
2074 * blades as pSeries due to some firmware issues,
2075 * so we do it here.
2076 */
2077 if (strstr(p, "IBM,CBEA") ||
2078 strstr(p, "IBM,CPBW-1.0"))
2079 return PLATFORM_GENERIC;
2080 #endif /* CONFIG_PPC64 */
2081 i += sl + 1;
2082 }
2083 }
2084 #ifdef CONFIG_PPC64
2085 /* Try to detect OPAL */
2086 if (PHANDLE_VALID(call_prom("finddevice", 1, 1, ADDR("/ibm,opal"))))
2087 return PLATFORM_OPAL;
2088
2089 /* Try to figure out if it's an IBM pSeries or any other
2090 * PAPR compliant platform. We assume it is if :
2091 * - /device_type is "chrp" (please, do NOT use that for future
2092 * non-IBM designs !
2093 * - it has /rtas
2094 */
2095 len = prom_getprop(prom.root, "device_type",
2096 compat, sizeof(compat)-1);
2097 if (len <= 0)
2098 return PLATFORM_GENERIC;
2099 if (strcmp(compat, "chrp"))
2100 return PLATFORM_GENERIC;
2101
2102 /* Default to pSeries. We need to know if we are running LPAR */
2103 rtas = call_prom("finddevice", 1, 1, ADDR("/rtas"));
2104 if (!PHANDLE_VALID(rtas))
2105 return PLATFORM_GENERIC;
2106 x = prom_getproplen(rtas, "ibm,hypertas-functions");
2107 if (x != PROM_ERROR) {
2108 prom_debug("Hypertas detected, assuming LPAR !\n");
2109 return PLATFORM_PSERIES_LPAR;
2110 }
2111 return PLATFORM_PSERIES;
2112 #else
2113 return PLATFORM_GENERIC;
2114 #endif
2115 }
2116
2117 static int __init prom_set_color(ihandle ih, int i, int r, int g, int b)
2118 {
2119 return call_prom("call-method", 6, 1, ADDR("color!"), ih, i, b, g, r);
2120 }
2121
2122 /*
2123 * If we have a display that we don't know how to drive,
2124 * we will want to try to execute OF's open method for it
2125 * later. However, OF will probably fall over if we do that
2126 * we've taken over the MMU.
2127 * So we check whether we will need to open the display,
2128 * and if so, open it now.
2129 */
2130 static void __init prom_check_displays(void)
2131 {
2132 char type[16], *path;
2133 phandle node;
2134 ihandle ih;
2135 int i;
2136
2137 static unsigned char default_colors[] = {
2138 0x00, 0x00, 0x00,
2139 0x00, 0x00, 0xaa,
2140 0x00, 0xaa, 0x00,
2141 0x00, 0xaa, 0xaa,
2142 0xaa, 0x00, 0x00,
2143 0xaa, 0x00, 0xaa,
2144 0xaa, 0xaa, 0x00,
2145 0xaa, 0xaa, 0xaa,
2146 0x55, 0x55, 0x55,
2147 0x55, 0x55, 0xff,
2148 0x55, 0xff, 0x55,
2149 0x55, 0xff, 0xff,
2150 0xff, 0x55, 0x55,
2151 0xff, 0x55, 0xff,
2152 0xff, 0xff, 0x55,
2153 0xff, 0xff, 0xff
2154 };
2155 const unsigned char *clut;
2156
2157 prom_debug("Looking for displays\n");
2158 for (node = 0; prom_next_node(&node); ) {
2159 memset(type, 0, sizeof(type));
2160 prom_getprop(node, "device_type", type, sizeof(type));
2161 if (strcmp(type, "display") != 0)
2162 continue;
2163
2164 /* It seems OF doesn't null-terminate the path :-( */
2165 path = prom_scratch;
2166 memset(path, 0, PROM_SCRATCH_SIZE);
2167
2168 /*
2169 * leave some room at the end of the path for appending extra
2170 * arguments
2171 */
2172 if (call_prom("package-to-path", 3, 1, node, path,
2173 PROM_SCRATCH_SIZE-10) == PROM_ERROR)
2174 continue;
2175 prom_printf("found display : %s, opening... ", path);
2176
2177 ih = call_prom("open", 1, 1, path);
2178 if (ih == 0) {
2179 prom_printf("failed\n");
2180 continue;
2181 }
2182
2183 /* Success */
2184 prom_printf("done\n");
2185 prom_setprop(node, path, "linux,opened", NULL, 0);
2186
2187 /* Setup a usable color table when the appropriate
2188 * method is available. Should update this to set-colors */
2189 clut = default_colors;
2190 for (i = 0; i < 16; i++, clut += 3)
2191 if (prom_set_color(ih, i, clut[0], clut[1],
2192 clut[2]) != 0)
2193 break;
2194
2195 #ifdef CONFIG_LOGO_LINUX_CLUT224
2196 clut = PTRRELOC(logo_linux_clut224.clut);
2197 for (i = 0; i < logo_linux_clut224.clutsize; i++, clut += 3)
2198 if (prom_set_color(ih, i + 32, clut[0], clut[1],
2199 clut[2]) != 0)
2200 break;
2201 #endif /* CONFIG_LOGO_LINUX_CLUT224 */
2202
2203 #ifdef CONFIG_PPC_EARLY_DEBUG_BOOTX
2204 if (prom_getprop(node, "linux,boot-display", NULL, 0) !=
2205 PROM_ERROR) {
2206 u32 width, height, pitch, addr;
2207
2208 prom_printf("Setting btext !\n");
2209 prom_getprop(node, "width", &width, 4);
2210 prom_getprop(node, "height", &height, 4);
2211 prom_getprop(node, "linebytes", &pitch, 4);
2212 prom_getprop(node, "address", &addr, 4);
2213 prom_printf("W=%d H=%d LB=%d addr=0x%x\n",
2214 width, height, pitch, addr);
2215 btext_setup_display(width, height, 8, pitch, addr);
2216 }
2217 #endif /* CONFIG_PPC_EARLY_DEBUG_BOOTX */
2218 }
2219 }
2220
2221
2222 /* Return (relocated) pointer to this much memory: moves initrd if reqd. */
2223 static void __init *make_room(unsigned long *mem_start, unsigned long *mem_end,
2224 unsigned long needed, unsigned long align)
2225 {
2226 void *ret;
2227
2228 *mem_start = _ALIGN(*mem_start, align);
2229 while ((*mem_start + needed) > *mem_end) {
2230 unsigned long room, chunk;
2231
2232 prom_debug("Chunk exhausted, claiming more at %x...\n",
2233 alloc_bottom);
2234 room = alloc_top - alloc_bottom;
2235 if (room > DEVTREE_CHUNK_SIZE)
2236 room = DEVTREE_CHUNK_SIZE;
2237 if (room < PAGE_SIZE)
2238 prom_panic("No memory for flatten_device_tree "
2239 "(no room)\n");
2240 chunk = alloc_up(room, 0);
2241 if (chunk == 0)
2242 prom_panic("No memory for flatten_device_tree "
2243 "(claim failed)\n");
2244 *mem_end = chunk + room;
2245 }
2246
2247 ret = (void *)*mem_start;
2248 *mem_start += needed;
2249
2250 return ret;
2251 }
2252
2253 #define dt_push_token(token, mem_start, mem_end) do { \
2254 void *room = make_room(mem_start, mem_end, 4, 4); \
2255 *(__be32 *)room = cpu_to_be32(token); \
2256 } while(0)
2257
2258 static unsigned long __init dt_find_string(char *str)
2259 {
2260 char *s, *os;
2261
2262 s = os = (char *)dt_string_start;
2263 s += 4;
2264 while (s < (char *)dt_string_end) {
2265 if (strcmp(s, str) == 0)
2266 return s - os;
2267 s += strlen(s) + 1;
2268 }
2269 return 0;
2270 }
2271
2272 /*
2273 * The Open Firmware 1275 specification states properties must be 31 bytes or
2274 * less, however not all firmwares obey this. Make it 64 bytes to be safe.
2275 */
2276 #define MAX_PROPERTY_NAME 64
2277
2278 static void __init scan_dt_build_strings(phandle node,
2279 unsigned long *mem_start,
2280 unsigned long *mem_end)
2281 {
2282 char *prev_name, *namep, *sstart;
2283 unsigned long soff;
2284 phandle child;
2285
2286 sstart = (char *)dt_string_start;
2287
2288 /* get and store all property names */
2289 prev_name = "";
2290 for (;;) {
2291 /* 64 is max len of name including nul. */
2292 namep = make_room(mem_start, mem_end, MAX_PROPERTY_NAME, 1);
2293 if (call_prom("nextprop", 3, 1, node, prev_name, namep) != 1) {
2294 /* No more nodes: unwind alloc */
2295 *mem_start = (unsigned long)namep;
2296 break;
2297 }
2298
2299 /* skip "name" */
2300 if (strcmp(namep, "name") == 0) {
2301 *mem_start = (unsigned long)namep;
2302 prev_name = "name";
2303 continue;
2304 }
2305 /* get/create string entry */
2306 soff = dt_find_string(namep);
2307 if (soff != 0) {
2308 *mem_start = (unsigned long)namep;
2309 namep = sstart + soff;
2310 } else {
2311 /* Trim off some if we can */
2312 *mem_start = (unsigned long)namep + strlen(namep) + 1;
2313 dt_string_end = *mem_start;
2314 }
2315 prev_name = namep;
2316 }
2317
2318 /* do all our children */
2319 child = call_prom("child", 1, 1, node);
2320 while (child != 0) {
2321 scan_dt_build_strings(child, mem_start, mem_end);
2322 child = call_prom("peer", 1, 1, child);
2323 }
2324 }
2325
2326 static void __init scan_dt_build_struct(phandle node, unsigned long *mem_start,
2327 unsigned long *mem_end)
2328 {
2329 phandle child;
2330 char *namep, *prev_name, *sstart, *p, *ep, *lp, *path;
2331 unsigned long soff;
2332 unsigned char *valp;
2333 static char pname[MAX_PROPERTY_NAME];
2334 int l, room, has_phandle = 0;
2335
2336 dt_push_token(OF_DT_BEGIN_NODE, mem_start, mem_end);
2337
2338 /* get the node's full name */
2339 namep = (char *)*mem_start;
2340 room = *mem_end - *mem_start;
2341 if (room > 255)
2342 room = 255;
2343 l = call_prom("package-to-path", 3, 1, node, namep, room);
2344 if (l >= 0) {
2345 /* Didn't fit? Get more room. */
2346 if (l >= room) {
2347 if (l >= *mem_end - *mem_start)
2348 namep = make_room(mem_start, mem_end, l+1, 1);
2349 call_prom("package-to-path", 3, 1, node, namep, l);
2350 }
2351 namep[l] = '\0';
2352
2353 /* Fixup an Apple bug where they have bogus \0 chars in the
2354 * middle of the path in some properties, and extract
2355 * the unit name (everything after the last '/').
2356 */
2357 for (lp = p = namep, ep = namep + l; p < ep; p++) {
2358 if (*p == '/')
2359 lp = namep;
2360 else if (*p != 0)
2361 *lp++ = *p;
2362 }
2363 *lp = 0;
2364 *mem_start = _ALIGN((unsigned long)lp + 1, 4);
2365 }
2366
2367 /* get it again for debugging */
2368 path = prom_scratch;
2369 memset(path, 0, PROM_SCRATCH_SIZE);
2370 call_prom("package-to-path", 3, 1, node, path, PROM_SCRATCH_SIZE-1);
2371
2372 /* get and store all properties */
2373 prev_name = "";
2374 sstart = (char *)dt_string_start;
2375 for (;;) {
2376 if (call_prom("nextprop", 3, 1, node, prev_name,
2377 pname) != 1)
2378 break;
2379
2380 /* skip "name" */
2381 if (strcmp(pname, "name") == 0) {
2382 prev_name = "name";
2383 continue;
2384 }
2385
2386 /* find string offset */
2387 soff = dt_find_string(pname);
2388 if (soff == 0) {
2389 prom_printf("WARNING: Can't find string index for"
2390 " <%s>, node %s\n", pname, path);
2391 break;
2392 }
2393 prev_name = sstart + soff;
2394
2395 /* get length */
2396 l = call_prom("getproplen", 2, 1, node, pname);
2397
2398 /* sanity checks */
2399 if (l == PROM_ERROR)
2400 continue;
2401
2402 /* push property head */
2403 dt_push_token(OF_DT_PROP, mem_start, mem_end);
2404 dt_push_token(l, mem_start, mem_end);
2405 dt_push_token(soff, mem_start, mem_end);
2406
2407 /* push property content */
2408 valp = make_room(mem_start, mem_end, l, 4);
2409 call_prom("getprop", 4, 1, node, pname, valp, l);
2410 *mem_start = _ALIGN(*mem_start, 4);
2411
2412 if (!strcmp(pname, "phandle"))
2413 has_phandle = 1;
2414 }
2415
2416 /* Add a "linux,phandle" property if no "phandle" property already
2417 * existed (can happen with OPAL)
2418 */
2419 if (!has_phandle) {
2420 soff = dt_find_string("linux,phandle");
2421 if (soff == 0)
2422 prom_printf("WARNING: Can't find string index for"
2423 " <linux-phandle> node %s\n", path);
2424 else {
2425 dt_push_token(OF_DT_PROP, mem_start, mem_end);
2426 dt_push_token(4, mem_start, mem_end);
2427 dt_push_token(soff, mem_start, mem_end);
2428 valp = make_room(mem_start, mem_end, 4, 4);
2429 *(__be32 *)valp = cpu_to_be32(node);
2430 }
2431 }
2432
2433 /* do all our children */
2434 child = call_prom("child", 1, 1, node);
2435 while (child != 0) {
2436 scan_dt_build_struct(child, mem_start, mem_end);
2437 child = call_prom("peer", 1, 1, child);
2438 }
2439
2440 dt_push_token(OF_DT_END_NODE, mem_start, mem_end);
2441 }
2442
2443 static void __init flatten_device_tree(void)
2444 {
2445 phandle root;
2446 unsigned long mem_start, mem_end, room;
2447 struct boot_param_header *hdr;
2448 char *namep;
2449 u64 *rsvmap;
2450
2451 /*
2452 * Check how much room we have between alloc top & bottom (+/- a
2453 * few pages), crop to 1MB, as this is our "chunk" size
2454 */
2455 room = alloc_top - alloc_bottom - 0x4000;
2456 if (room > DEVTREE_CHUNK_SIZE)
2457 room = DEVTREE_CHUNK_SIZE;
2458 prom_debug("starting device tree allocs at %x\n", alloc_bottom);
2459
2460 /* Now try to claim that */
2461 mem_start = (unsigned long)alloc_up(room, PAGE_SIZE);
2462 if (mem_start == 0)
2463 prom_panic("Can't allocate initial device-tree chunk\n");
2464 mem_end = mem_start + room;
2465
2466 /* Get root of tree */
2467 root = call_prom("peer", 1, 1, (phandle)0);
2468 if (root == (phandle)0)
2469 prom_panic ("couldn't get device tree root\n");
2470
2471 /* Build header and make room for mem rsv map */
2472 mem_start = _ALIGN(mem_start, 4);
2473 hdr = make_room(&mem_start, &mem_end,
2474 sizeof(struct boot_param_header), 4);
2475 dt_header_start = (unsigned long)hdr;
2476 rsvmap = make_room(&mem_start, &mem_end, sizeof(mem_reserve_map), 8);
2477
2478 /* Start of strings */
2479 mem_start = PAGE_ALIGN(mem_start);
2480 dt_string_start = mem_start;
2481 mem_start += 4; /* hole */
2482
2483 /* Add "linux,phandle" in there, we'll need it */
2484 namep = make_room(&mem_start, &mem_end, 16, 1);
2485 strcpy(namep, "linux,phandle");
2486 mem_start = (unsigned long)namep + strlen(namep) + 1;
2487
2488 /* Build string array */
2489 prom_printf("Building dt strings...\n");
2490 scan_dt_build_strings(root, &mem_start, &mem_end);
2491 dt_string_end = mem_start;
2492
2493 /* Build structure */
2494 mem_start = PAGE_ALIGN(mem_start);
2495 dt_struct_start = mem_start;
2496 prom_printf("Building dt structure...\n");
2497 scan_dt_build_struct(root, &mem_start, &mem_end);
2498 dt_push_token(OF_DT_END, &mem_start, &mem_end);
2499 dt_struct_end = PAGE_ALIGN(mem_start);
2500
2501 /* Finish header */
2502 hdr->boot_cpuid_phys = cpu_to_be32(prom.cpu);
2503 hdr->magic = cpu_to_be32(OF_DT_HEADER);
2504 hdr->totalsize = cpu_to_be32(dt_struct_end - dt_header_start);
2505 hdr->off_dt_struct = cpu_to_be32(dt_struct_start - dt_header_start);
2506 hdr->off_dt_strings = cpu_to_be32(dt_string_start - dt_header_start);
2507 hdr->dt_strings_size = cpu_to_be32(dt_string_end - dt_string_start);
2508 hdr->off_mem_rsvmap = cpu_to_be32(((unsigned long)rsvmap) - dt_header_start);
2509 hdr->version = cpu_to_be32(OF_DT_VERSION);
2510 /* Version 16 is not backward compatible */
2511 hdr->last_comp_version = cpu_to_be32(0x10);
2512
2513 /* Copy the reserve map in */
2514 memcpy(rsvmap, mem_reserve_map, sizeof(mem_reserve_map));
2515
2516 #ifdef DEBUG_PROM
2517 {
2518 int i;
2519 prom_printf("reserved memory map:\n");
2520 for (i = 0; i < mem_reserve_cnt; i++)
2521 prom_printf(" %x - %x\n",
2522 be64_to_cpu(mem_reserve_map[i].base),
2523 be64_to_cpu(mem_reserve_map[i].size));
2524 }
2525 #endif
2526 /* Bump mem_reserve_cnt to cause further reservations to fail
2527 * since it's too late.
2528 */
2529 mem_reserve_cnt = MEM_RESERVE_MAP_SIZE;
2530
2531 prom_printf("Device tree strings 0x%x -> 0x%x\n",
2532 dt_string_start, dt_string_end);
2533 prom_printf("Device tree struct 0x%x -> 0x%x\n",
2534 dt_struct_start, dt_struct_end);
2535 }
2536
2537 #ifdef CONFIG_PPC_MAPLE
2538 /* PIBS Version 1.05.0000 04/26/2005 has an incorrect /ht/isa/ranges property.
2539 * The values are bad, and it doesn't even have the right number of cells. */
2540 static void __init fixup_device_tree_maple(void)
2541 {
2542 phandle isa;
2543 u32 rloc = 0x01002000; /* IO space; PCI device = 4 */
2544 u32 isa_ranges[6];
2545 char *name;
2546
2547 name = "/ht@0/isa@4";
2548 isa = call_prom("finddevice", 1, 1, ADDR(name));
2549 if (!PHANDLE_VALID(isa)) {
2550 name = "/ht@0/isa@6";
2551 isa = call_prom("finddevice", 1, 1, ADDR(name));
2552 rloc = 0x01003000; /* IO space; PCI device = 6 */
2553 }
2554 if (!PHANDLE_VALID(isa))
2555 return;
2556
2557 if (prom_getproplen(isa, "ranges") != 12)
2558 return;
2559 if (prom_getprop(isa, "ranges", isa_ranges, sizeof(isa_ranges))
2560 == PROM_ERROR)
2561 return;
2562
2563 if (isa_ranges[0] != 0x1 ||
2564 isa_ranges[1] != 0xf4000000 ||
2565 isa_ranges[2] != 0x00010000)
2566 return;
2567
2568 prom_printf("Fixing up bogus ISA range on Maple/Apache...\n");
2569
2570 isa_ranges[0] = 0x1;
2571 isa_ranges[1] = 0x0;
2572 isa_ranges[2] = rloc;
2573 isa_ranges[3] = 0x0;
2574 isa_ranges[4] = 0x0;
2575 isa_ranges[5] = 0x00010000;
2576 prom_setprop(isa, name, "ranges",
2577 isa_ranges, sizeof(isa_ranges));
2578 }
2579
2580 #define CPC925_MC_START 0xf8000000
2581 #define CPC925_MC_LENGTH 0x1000000
2582 /* The values for memory-controller don't have right number of cells */
2583 static void __init fixup_device_tree_maple_memory_controller(void)
2584 {
2585 phandle mc;
2586 u32 mc_reg[4];
2587 char *name = "/hostbridge@f8000000";
2588 u32 ac, sc;
2589
2590 mc = call_prom("finddevice", 1, 1, ADDR(name));
2591 if (!PHANDLE_VALID(mc))
2592 return;
2593
2594 if (prom_getproplen(mc, "reg") != 8)
2595 return;
2596
2597 prom_getprop(prom.root, "#address-cells", &ac, sizeof(ac));
2598 prom_getprop(prom.root, "#size-cells", &sc, sizeof(sc));
2599 if ((ac != 2) || (sc != 2))
2600 return;
2601
2602 if (prom_getprop(mc, "reg", mc_reg, sizeof(mc_reg)) == PROM_ERROR)
2603 return;
2604
2605 if (mc_reg[0] != CPC925_MC_START || mc_reg[1] != CPC925_MC_LENGTH)
2606 return;
2607
2608 prom_printf("Fixing up bogus hostbridge on Maple...\n");
2609
2610 mc_reg[0] = 0x0;
2611 mc_reg[1] = CPC925_MC_START;
2612 mc_reg[2] = 0x0;
2613 mc_reg[3] = CPC925_MC_LENGTH;
2614 prom_setprop(mc, name, "reg", mc_reg, sizeof(mc_reg));
2615 }
2616 #else
2617 #define fixup_device_tree_maple()
2618 #define fixup_device_tree_maple_memory_controller()
2619 #endif
2620
2621 #ifdef CONFIG_PPC_CHRP
2622 /*
2623 * Pegasos and BriQ lacks the "ranges" property in the isa node
2624 * Pegasos needs decimal IRQ 14/15, not hexadecimal
2625 * Pegasos has the IDE configured in legacy mode, but advertised as native
2626 */
2627 static void __init fixup_device_tree_chrp(void)
2628 {
2629 phandle ph;
2630 u32 prop[6];
2631 u32 rloc = 0x01006000; /* IO space; PCI device = 12 */
2632 char *name;
2633 int rc;
2634
2635 name = "/pci@80000000/isa@c";
2636 ph = call_prom("finddevice", 1, 1, ADDR(name));
2637 if (!PHANDLE_VALID(ph)) {
2638 name = "/pci@ff500000/isa@6";
2639 ph = call_prom("finddevice", 1, 1, ADDR(name));
2640 rloc = 0x01003000; /* IO space; PCI device = 6 */
2641 }
2642 if (PHANDLE_VALID(ph)) {
2643 rc = prom_getproplen(ph, "ranges");
2644 if (rc == 0 || rc == PROM_ERROR) {
2645 prom_printf("Fixing up missing ISA range on Pegasos...\n");
2646
2647 prop[0] = 0x1;
2648 prop[1] = 0x0;
2649 prop[2] = rloc;
2650 prop[3] = 0x0;
2651 prop[4] = 0x0;
2652 prop[5] = 0x00010000;
2653 prom_setprop(ph, name, "ranges", prop, sizeof(prop));
2654 }
2655 }
2656
2657 name = "/pci@80000000/ide@C,1";
2658 ph = call_prom("finddevice", 1, 1, ADDR(name));
2659 if (PHANDLE_VALID(ph)) {
2660 prom_printf("Fixing up IDE interrupt on Pegasos...\n");
2661 prop[0] = 14;
2662 prop[1] = 0x0;
2663 prom_setprop(ph, name, "interrupts", prop, 2*sizeof(u32));
2664 prom_printf("Fixing up IDE class-code on Pegasos...\n");
2665 rc = prom_getprop(ph, "class-code", prop, sizeof(u32));
2666 if (rc == sizeof(u32)) {
2667 prop[0] &= ~0x5;
2668 prom_setprop(ph, name, "class-code", prop, sizeof(u32));
2669 }
2670 }
2671 }
2672 #else
2673 #define fixup_device_tree_chrp()
2674 #endif
2675
2676 #if defined(CONFIG_PPC64) && defined(CONFIG_PPC_PMAC)
2677 static void __init fixup_device_tree_pmac(void)
2678 {
2679 phandle u3, i2c, mpic;
2680 u32 u3_rev;
2681 u32 interrupts[2];
2682 u32 parent;
2683
2684 /* Some G5s have a missing interrupt definition, fix it up here */
2685 u3 = call_prom("finddevice", 1, 1, ADDR("/u3@0,f8000000"));
2686 if (!PHANDLE_VALID(u3))
2687 return;
2688 i2c = call_prom("finddevice", 1, 1, ADDR("/u3@0,f8000000/i2c@f8001000"));
2689 if (!PHANDLE_VALID(i2c))
2690 return;
2691 mpic = call_prom("finddevice", 1, 1, ADDR("/u3@0,f8000000/mpic@f8040000"));
2692 if (!PHANDLE_VALID(mpic))
2693 return;
2694
2695 /* check if proper rev of u3 */
2696 if (prom_getprop(u3, "device-rev", &u3_rev, sizeof(u3_rev))
2697 == PROM_ERROR)
2698 return;
2699 if (u3_rev < 0x35 || u3_rev > 0x39)
2700 return;
2701 /* does it need fixup ? */
2702 if (prom_getproplen(i2c, "interrupts") > 0)
2703 return;
2704
2705 prom_printf("fixing up bogus interrupts for u3 i2c...\n");
2706
2707 /* interrupt on this revision of u3 is number 0 and level */
2708 interrupts[0] = 0;
2709 interrupts[1] = 1;
2710 prom_setprop(i2c, "/u3@0,f8000000/i2c@f8001000", "interrupts",
2711 &interrupts, sizeof(interrupts));
2712 parent = (u32)mpic;
2713 prom_setprop(i2c, "/u3@0,f8000000/i2c@f8001000", "interrupt-parent",
2714 &parent, sizeof(parent));
2715 }
2716 #else
2717 #define fixup_device_tree_pmac()
2718 #endif
2719
2720 #ifdef CONFIG_PPC_EFIKA
2721 /*
2722 * The MPC5200 FEC driver requires an phy-handle property to tell it how
2723 * to talk to the phy. If the phy-handle property is missing, then this
2724 * function is called to add the appropriate nodes and link it to the
2725 * ethernet node.
2726 */
2727 static void __init fixup_device_tree_efika_add_phy(void)
2728 {
2729 u32 node;
2730 char prop[64];
2731 int rv;
2732
2733 /* Check if /builtin/ethernet exists - bail if it doesn't */
2734 node = call_prom("finddevice", 1, 1, ADDR("/builtin/ethernet"));
2735 if (!PHANDLE_VALID(node))
2736 return;
2737
2738 /* Check if the phy-handle property exists - bail if it does */
2739 rv = prom_getprop(node, "phy-handle", prop, sizeof(prop));
2740 if (!rv)
2741 return;
2742
2743 /*
2744 * At this point the ethernet device doesn't have a phy described.
2745 * Now we need to add the missing phy node and linkage
2746 */
2747
2748 /* Check for an MDIO bus node - if missing then create one */
2749 node = call_prom("finddevice", 1, 1, ADDR("/builtin/mdio"));
2750 if (!PHANDLE_VALID(node)) {
2751 prom_printf("Adding Ethernet MDIO node\n");
2752 call_prom("interpret", 1, 1,
2753 " s\" /builtin\" find-device"
2754 " new-device"
2755 " 1 encode-int s\" #address-cells\" property"
2756 " 0 encode-int s\" #size-cells\" property"
2757 " s\" mdio\" device-name"
2758 " s\" fsl,mpc5200b-mdio\" encode-string"
2759 " s\" compatible\" property"
2760 " 0xf0003000 0x400 reg"
2761 " 0x2 encode-int"
2762 " 0x5 encode-int encode+"
2763 " 0x3 encode-int encode+"
2764 " s\" interrupts\" property"
2765 " finish-device");
2766 };
2767
2768 /* Check for a PHY device node - if missing then create one and
2769 * give it's phandle to the ethernet node */
2770 node = call_prom("finddevice", 1, 1,
2771 ADDR("/builtin/mdio/ethernet-phy"));
2772 if (!PHANDLE_VALID(node)) {
2773 prom_printf("Adding Ethernet PHY node\n");
2774 call_prom("interpret", 1, 1,
2775 " s\" /builtin/mdio\" find-device"
2776 " new-device"
2777 " s\" ethernet-phy\" device-name"
2778 " 0x10 encode-int s\" reg\" property"
2779 " my-self"
2780 " ihandle>phandle"
2781 " finish-device"
2782 " s\" /builtin/ethernet\" find-device"
2783 " encode-int"
2784 " s\" phy-handle\" property"
2785 " device-end");
2786 }
2787 }
2788
2789 static void __init fixup_device_tree_efika(void)
2790 {
2791 int sound_irq[3] = { 2, 2, 0 };
2792 int bcomm_irq[3*16] = { 3,0,0, 3,1,0, 3,2,0, 3,3,0,
2793 3,4,0, 3,5,0, 3,6,0, 3,7,0,
2794 3,8,0, 3,9,0, 3,10,0, 3,11,0,
2795 3,12,0, 3,13,0, 3,14,0, 3,15,0 };
2796 u32 node;
2797 char prop[64];
2798 int rv, len;
2799
2800 /* Check if we're really running on a EFIKA */
2801 node = call_prom("finddevice", 1, 1, ADDR("/"));
2802 if (!PHANDLE_VALID(node))
2803 return;
2804
2805 rv = prom_getprop(node, "model", prop, sizeof(prop));
2806 if (rv == PROM_ERROR)
2807 return;
2808 if (strcmp(prop, "EFIKA5K2"))
2809 return;
2810
2811 prom_printf("Applying EFIKA device tree fixups\n");
2812
2813 /* Claiming to be 'chrp' is death */
2814 node = call_prom("finddevice", 1, 1, ADDR("/"));
2815 rv = prom_getprop(node, "device_type", prop, sizeof(prop));
2816 if (rv != PROM_ERROR && (strcmp(prop, "chrp") == 0))
2817 prom_setprop(node, "/", "device_type", "efika", sizeof("efika"));
2818
2819 /* CODEGEN,description is exposed in /proc/cpuinfo so
2820 fix that too */
2821 rv = prom_getprop(node, "CODEGEN,description", prop, sizeof(prop));
2822 if (rv != PROM_ERROR && (strstr(prop, "CHRP")))
2823 prom_setprop(node, "/", "CODEGEN,description",
2824 "Efika 5200B PowerPC System",
2825 sizeof("Efika 5200B PowerPC System"));
2826
2827 /* Fixup bestcomm interrupts property */
2828 node = call_prom("finddevice", 1, 1, ADDR("/builtin/bestcomm"));
2829 if (PHANDLE_VALID(node)) {
2830 len = prom_getproplen(node, "interrupts");
2831 if (len == 12) {
2832 prom_printf("Fixing bestcomm interrupts property\n");
2833 prom_setprop(node, "/builtin/bestcom", "interrupts",
2834 bcomm_irq, sizeof(bcomm_irq));
2835 }
2836 }
2837
2838 /* Fixup sound interrupts property */
2839 node = call_prom("finddevice", 1, 1, ADDR("/builtin/sound"));
2840 if (PHANDLE_VALID(node)) {
2841 rv = prom_getprop(node, "interrupts", prop, sizeof(prop));
2842 if (rv == PROM_ERROR) {
2843 prom_printf("Adding sound interrupts property\n");
2844 prom_setprop(node, "/builtin/sound", "interrupts",
2845 sound_irq, sizeof(sound_irq));
2846 }
2847 }
2848
2849 /* Make sure ethernet phy-handle property exists */
2850 fixup_device_tree_efika_add_phy();
2851 }
2852 #else
2853 #define fixup_device_tree_efika()
2854 #endif
2855
2856 #ifdef CONFIG_PPC_PASEMI_NEMO
2857 /*
2858 * CFE supplied on Nemo is broken in several ways, biggest
2859 * problem is that it reassigns ISA interrupts to unused mpic ints.
2860 * Add an interrupt-controller property for the io-bridge to use
2861 * and correct the ints so we can attach them to an irq_domain
2862 */
2863 static void __init fixup_device_tree_pasemi(void)
2864 {
2865 u32 interrupts[2], parent, rval, val = 0;
2866 char *name, *pci_name;
2867 phandle iob, node;
2868
2869 /* Find the root pci node */
2870 name = "/pxp@0,e0000000";
2871 iob = call_prom("finddevice", 1, 1, ADDR(name));
2872 if (!PHANDLE_VALID(iob))
2873 return;
2874
2875 /* check if interrupt-controller node set yet */
2876 if (prom_getproplen(iob, "interrupt-controller") !=PROM_ERROR)
2877 return;
2878
2879 prom_printf("adding interrupt-controller property for SB600...\n");
2880
2881 prom_setprop(iob, name, "interrupt-controller", &val, 0);
2882
2883 pci_name = "/pxp@0,e0000000/pci@11";
2884 node = call_prom("finddevice", 1, 1, ADDR(pci_name));
2885 parent = ADDR(iob);
2886
2887 for( ; prom_next_node(&node); ) {
2888 /* scan each node for one with an interrupt */
2889 if (!PHANDLE_VALID(node))
2890 continue;
2891
2892 rval = prom_getproplen(node, "interrupts");
2893 if (rval == 0 || rval == PROM_ERROR)
2894 continue;
2895
2896 prom_getprop(node, "interrupts", &interrupts, sizeof(interrupts));
2897 if ((interrupts[0] < 212) || (interrupts[0] > 222))
2898 continue;
2899
2900 /* found a node, update both interrupts and interrupt-parent */
2901 if ((interrupts[0] >= 212) && (interrupts[0] <= 215))
2902 interrupts[0] -= 203;
2903 if ((interrupts[0] >= 216) && (interrupts[0] <= 220))
2904 interrupts[0] -= 213;
2905 if (interrupts[0] == 221)
2906 interrupts[0] = 14;
2907 if (interrupts[0] == 222)
2908 interrupts[0] = 8;
2909
2910 prom_setprop(node, pci_name, "interrupts", interrupts,
2911 sizeof(interrupts));
2912 prom_setprop(node, pci_name, "interrupt-parent", &parent,
2913 sizeof(parent));
2914 }
2915
2916 /*
2917 * The io-bridge has device_type set to 'io-bridge' change it to 'isa'
2918 * so that generic isa-bridge code can add the SB600 and its on-board
2919 * peripherals.
2920 */
2921 name = "/pxp@0,e0000000/io-bridge@0";
2922 iob = call_prom("finddevice", 1, 1, ADDR(name));
2923 if (!PHANDLE_VALID(iob))
2924 return;
2925
2926 /* device_type is already set, just change it. */
2927
2928 prom_printf("Changing device_type of SB600 node...\n");
2929
2930 prom_setprop(iob, name, "device_type", "isa", sizeof("isa"));
2931 }
2932 #else /* !CONFIG_PPC_PASEMI_NEMO */
2933 static inline void fixup_device_tree_pasemi(void) { }
2934 #endif
2935
2936 static void __init fixup_device_tree(void)
2937 {
2938 fixup_device_tree_maple();
2939 fixup_device_tree_maple_memory_controller();
2940 fixup_device_tree_chrp();
2941 fixup_device_tree_pmac();
2942 fixup_device_tree_efika();
2943 fixup_device_tree_pasemi();
2944 }
2945
2946 static void __init prom_find_boot_cpu(void)
2947 {
2948 __be32 rval;
2949 ihandle prom_cpu;
2950 phandle cpu_pkg;
2951
2952 rval = 0;
2953 if (prom_getprop(prom.chosen, "cpu", &rval, sizeof(rval)) <= 0)
2954 return;
2955 prom_cpu = be32_to_cpu(rval);
2956
2957 cpu_pkg = call_prom("instance-to-package", 1, 1, prom_cpu);
2958
2959 if (!PHANDLE_VALID(cpu_pkg))
2960 return;
2961
2962 prom_getprop(cpu_pkg, "reg", &rval, sizeof(rval));
2963 prom.cpu = be32_to_cpu(rval);
2964
2965 prom_debug("Booting CPU hw index = %lu\n", prom.cpu);
2966 }
2967
2968 static void __init prom_check_initrd(unsigned long r3, unsigned long r4)
2969 {
2970 #ifdef CONFIG_BLK_DEV_INITRD
2971 if (r3 && r4 && r4 != 0xdeadbeef) {
2972 __be64 val;
2973
2974 prom_initrd_start = is_kernel_addr(r3) ? __pa(r3) : r3;
2975 prom_initrd_end = prom_initrd_start + r4;
2976
2977 val = cpu_to_be64(prom_initrd_start);
2978 prom_setprop(prom.chosen, "/chosen", "linux,initrd-start",
2979 &val, sizeof(val));
2980 val = cpu_to_be64(prom_initrd_end);
2981 prom_setprop(prom.chosen, "/chosen", "linux,initrd-end",
2982 &val, sizeof(val));
2983
2984 reserve_mem(prom_initrd_start,
2985 prom_initrd_end - prom_initrd_start);
2986
2987 prom_debug("initrd_start=0x%x\n", prom_initrd_start);
2988 prom_debug("initrd_end=0x%x\n", prom_initrd_end);
2989 }
2990 #endif /* CONFIG_BLK_DEV_INITRD */
2991 }
2992
2993 #ifdef CONFIG_PPC64
2994 #ifdef CONFIG_RELOCATABLE
2995 static void reloc_toc(void)
2996 {
2997 }
2998
2999 static void unreloc_toc(void)
3000 {
3001 }
3002 #else
3003 static void __reloc_toc(unsigned long offset, unsigned long nr_entries)
3004 {
3005 unsigned long i;
3006 unsigned long *toc_entry;
3007
3008 /* Get the start of the TOC by using r2 directly. */
3009 asm volatile("addi %0,2,-0x8000" : "=b" (toc_entry));
3010
3011 for (i = 0; i < nr_entries; i++) {
3012 *toc_entry = *toc_entry + offset;
3013 toc_entry++;
3014 }
3015 }
3016
3017 static void reloc_toc(void)
3018 {
3019 unsigned long offset = reloc_offset();
3020 unsigned long nr_entries =
3021 (__prom_init_toc_end - __prom_init_toc_start) / sizeof(long);
3022
3023 __reloc_toc(offset, nr_entries);
3024
3025 mb();
3026 }
3027
3028 static void unreloc_toc(void)
3029 {
3030 unsigned long offset = reloc_offset();
3031 unsigned long nr_entries =
3032 (__prom_init_toc_end - __prom_init_toc_start) / sizeof(long);
3033
3034 mb();
3035
3036 __reloc_toc(-offset, nr_entries);
3037 }
3038 #endif
3039 #endif
3040
3041 /*
3042 * We enter here early on, when the Open Firmware prom is still
3043 * handling exceptions and the MMU hash table for us.
3044 */
3045
3046 unsigned long __init prom_init(unsigned long r3, unsigned long r4,
3047 unsigned long pp,
3048 unsigned long r6, unsigned long r7,
3049 unsigned long kbase)
3050 {
3051 unsigned long hdr;
3052
3053 #ifdef CONFIG_PPC32
3054 unsigned long offset = reloc_offset();
3055 reloc_got2(offset);
3056 #else
3057 reloc_toc();
3058 #endif
3059
3060 /*
3061 * First zero the BSS
3062 */
3063 memset(&__bss_start, 0, __bss_stop - __bss_start);
3064
3065 /*
3066 * Init interface to Open Firmware, get some node references,
3067 * like /chosen
3068 */
3069 prom_init_client_services(pp);
3070
3071 /*
3072 * See if this OF is old enough that we need to do explicit maps
3073 * and other workarounds
3074 */
3075 prom_find_mmu();
3076
3077 /*
3078 * Init prom stdout device
3079 */
3080 prom_init_stdout();
3081
3082 prom_printf("Preparing to boot %s", linux_banner);
3083
3084 /*
3085 * Get default machine type. At this point, we do not differentiate
3086 * between pSeries SMP and pSeries LPAR
3087 */
3088 of_platform = prom_find_machine_type();
3089 prom_printf("Detected machine type: %x\n", of_platform);
3090
3091 #ifndef CONFIG_NONSTATIC_KERNEL
3092 /* Bail if this is a kdump kernel. */
3093 if (PHYSICAL_START > 0)
3094 prom_panic("Error: You can't boot a kdump kernel from OF!\n");
3095 #endif
3096
3097 /*
3098 * Check for an initrd
3099 */
3100 prom_check_initrd(r3, r4);
3101
3102 /*
3103 * Do early parsing of command line
3104 */
3105 early_cmdline_parse();
3106
3107 #if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV)
3108 /*
3109 * On pSeries, inform the firmware about our capabilities
3110 */
3111 if (of_platform == PLATFORM_PSERIES ||
3112 of_platform == PLATFORM_PSERIES_LPAR)
3113 prom_send_capabilities();
3114 #endif
3115
3116 /*
3117 * Copy the CPU hold code
3118 */
3119 if (of_platform != PLATFORM_POWERMAC)
3120 copy_and_flush(0, kbase, 0x100, 0);
3121
3122 /*
3123 * Initialize memory management within prom_init
3124 */
3125 prom_init_mem();
3126
3127 /*
3128 * Determine which cpu is actually running right _now_
3129 */
3130 prom_find_boot_cpu();
3131
3132 /*
3133 * Initialize display devices
3134 */
3135 prom_check_displays();
3136
3137 #if defined(CONFIG_PPC64) && defined(__BIG_ENDIAN__)
3138 /*
3139 * Initialize IOMMU (TCE tables) on pSeries. Do that before anything else
3140 * that uses the allocator, we need to make sure we get the top of memory
3141 * available for us here...
3142 */
3143 if (of_platform == PLATFORM_PSERIES)
3144 prom_initialize_tce_table();
3145 #endif
3146
3147 /*
3148 * On non-powermacs, try to instantiate RTAS. PowerMacs don't
3149 * have a usable RTAS implementation.
3150 */
3151 if (of_platform != PLATFORM_POWERMAC &&
3152 of_platform != PLATFORM_OPAL)
3153 prom_instantiate_rtas();
3154
3155 #ifdef CONFIG_PPC_POWERNV
3156 if (of_platform == PLATFORM_OPAL)
3157 prom_instantiate_opal();
3158 #endif /* CONFIG_PPC_POWERNV */
3159
3160 #ifdef CONFIG_PPC64
3161 /* instantiate sml */
3162 prom_instantiate_sml();
3163 #endif
3164
3165 /*
3166 * On non-powermacs, put all CPUs in spin-loops.
3167 *
3168 * PowerMacs use a different mechanism to spin CPUs
3169 *
3170 * (This must be done after instanciating RTAS)
3171 */
3172 if (of_platform != PLATFORM_POWERMAC &&
3173 of_platform != PLATFORM_OPAL)
3174 prom_hold_cpus();
3175
3176 /*
3177 * Fill in some infos for use by the kernel later on
3178 */
3179 if (prom_memory_limit) {
3180 __be64 val = cpu_to_be64(prom_memory_limit);
3181 prom_setprop(prom.chosen, "/chosen", "linux,memory-limit",
3182 &val, sizeof(val));
3183 }
3184 #ifdef CONFIG_PPC64
3185 if (prom_iommu_off)
3186 prom_setprop(prom.chosen, "/chosen", "linux,iommu-off",
3187 NULL, 0);
3188
3189 if (prom_iommu_force_on)
3190 prom_setprop(prom.chosen, "/chosen", "linux,iommu-force-on",
3191 NULL, 0);
3192
3193 if (prom_tce_alloc_start) {
3194 prom_setprop(prom.chosen, "/chosen", "linux,tce-alloc-start",
3195 &prom_tce_alloc_start,
3196 sizeof(prom_tce_alloc_start));
3197 prom_setprop(prom.chosen, "/chosen", "linux,tce-alloc-end",
3198 &prom_tce_alloc_end,
3199 sizeof(prom_tce_alloc_end));
3200 }
3201 #endif
3202
3203 /*
3204 * Fixup any known bugs in the device-tree
3205 */
3206 fixup_device_tree();
3207
3208 /*
3209 * Now finally create the flattened device-tree
3210 */
3211 prom_printf("copying OF device tree...\n");
3212 flatten_device_tree();
3213
3214 /*
3215 * in case stdin is USB and still active on IBM machines...
3216 * Unfortunately quiesce crashes on some powermacs if we have
3217 * closed stdin already (in particular the powerbook 101). It
3218 * appears that the OPAL version of OFW doesn't like it either.
3219 */
3220 if (of_platform != PLATFORM_POWERMAC &&
3221 of_platform != PLATFORM_OPAL)
3222 prom_close_stdin();
3223
3224 /*
3225 * Call OF "quiesce" method to shut down pending DMA's from
3226 * devices etc...
3227 */
3228 prom_printf("Quiescing Open Firmware ...\n");
3229 call_prom("quiesce", 0, 0);
3230
3231 /*
3232 * And finally, call the kernel passing it the flattened device
3233 * tree and NULL as r5, thus triggering the new entry point which
3234 * is common to us and kexec
3235 */
3236 hdr = dt_header_start;
3237
3238 /* Don't print anything after quiesce under OPAL, it crashes OFW */
3239 if (of_platform != PLATFORM_OPAL) {
3240 prom_printf("Booting Linux via __start() @ 0x%lx ...\n", kbase);
3241 prom_debug("->dt_header_start=0x%x\n", hdr);
3242 }
3243
3244 #ifdef CONFIG_PPC32
3245 reloc_got2(-offset);
3246 #else
3247 unreloc_toc();
3248 #endif
3249
3250 #ifdef CONFIG_PPC_EARLY_DEBUG_OPAL
3251 /* OPAL early debug gets the OPAL base & entry in r8 and r9 */
3252 __start(hdr, kbase, 0, 0, 0,
3253 prom_opal_base, prom_opal_entry);
3254 #else
3255 __start(hdr, kbase, 0, 0, 0, 0, 0);
3256 #endif
3257
3258 return 0;
3259 }