]> git.ipfire.org Git - people/ms/u-boot.git/blob - cpu/mpc86xx/start.S
Merge branch '070524_netstar' of git://linux-arm.org/u-boot-armdev
[people/ms/u-boot.git] / cpu / mpc86xx / start.S
1 /*
2 * Copyright 2004, 2007 Freescale Semiconductor.
3 * Srikanth Srinivasan <srikanth.srinivaan@freescale.com>
4 *
5 * See file CREDITS for list of people who contributed to this
6 * project.
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License as
10 * published by the Free Software Foundation; either version 2 of
11 * the License, or (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
21 * MA 02111-1307 USA
22 */
23
24 /* U-Boot - Startup Code for 86xx PowerPC based Embedded Boards
25 *
26 *
27 * The processor starts at 0xfff00100 and the code is executed
28 * from flash. The code is organized to be at an other address
29 * in memory, but as long we don't jump around before relocating.
30 * board_init lies at a quite high address and when the cpu has
31 * jumped there, everything is ok.
32 */
33 #include <config.h>
34 #include <mpc86xx.h>
35 #include <version.h>
36
37 #include <ppc_asm.tmpl>
38 #include <ppc_defs.h>
39
40 #include <asm/cache.h>
41 #include <asm/mmu.h>
42
43 #ifndef CONFIG_IDENT_STRING
44 #define CONFIG_IDENT_STRING ""
45 #endif
46
47 /*
48 * Need MSR_DR | MSR_IR enabled to access I/O (printf) in exceptions
49 */
50
51 /*
52 * Set up GOT: Global Offset Table
53 *
54 * Use r14 to access the GOT
55 */
56 START_GOT
57 GOT_ENTRY(_GOT2_TABLE_)
58 GOT_ENTRY(_FIXUP_TABLE_)
59
60 GOT_ENTRY(_start)
61 GOT_ENTRY(_start_of_vectors)
62 GOT_ENTRY(_end_of_vectors)
63 GOT_ENTRY(transfer_to_handler)
64
65 GOT_ENTRY(__init_end)
66 GOT_ENTRY(_end)
67 GOT_ENTRY(__bss_start)
68 END_GOT
69
70 /*
71 * r3 - 1st arg to board_init(): IMMP pointer
72 * r4 - 2nd arg to board_init(): boot flag
73 */
74 .text
75 .long 0x27051956 /* U-Boot Magic Number */
76 .globl version_string
77 version_string:
78 .ascii U_BOOT_VERSION
79 .ascii " (", __DATE__, " - ", __TIME__, ")"
80 .ascii CONFIG_IDENT_STRING, "\0"
81
82 . = EXC_OFF_SYS_RESET
83 .globl _start
84 _start:
85 li r21, BOOTFLAG_COLD /* Normal Power-On: Boot from FLASH */
86 b boot_cold
87 sync
88
89 . = EXC_OFF_SYS_RESET + 0x10
90
91 .globl _start_warm
92 _start_warm:
93 li r21, BOOTFLAG_WARM /* Software reboot */
94 b boot_warm
95 sync
96
97 /* the boot code is located below the exception table */
98
99 .globl _start_of_vectors
100 _start_of_vectors:
101
102 /* Machine check */
103 STD_EXCEPTION(0x200, MachineCheck, MachineCheckException)
104
105 /* Data Storage exception. */
106 STD_EXCEPTION(0x300, DataStorage, UnknownException)
107
108 /* Instruction Storage exception. */
109 STD_EXCEPTION(0x400, InstStorage, UnknownException)
110
111 /* External Interrupt exception. */
112 STD_EXCEPTION(0x500, ExtInterrupt, external_interrupt)
113
114 /* Alignment exception. */
115 . = 0x600
116 Alignment:
117 EXCEPTION_PROLOG(SRR0, SRR1)
118 mfspr r4,DAR
119 stw r4,_DAR(r21)
120 mfspr r5,DSISR
121 stw r5,_DSISR(r21)
122 addi r3,r1,STACK_FRAME_OVERHEAD
123 li r20,MSR_KERNEL
124 rlwimi r20,r23,0,16,16 /* copy EE bit from saved MSR */
125 lwz r6,GOT(transfer_to_handler)
126 mtlr r6
127 blrl
128 .L_Alignment:
129 .long AlignmentException - _start + EXC_OFF_SYS_RESET
130 .long int_return - _start + EXC_OFF_SYS_RESET
131
132 /* Program check exception */
133 . = 0x700
134 ProgramCheck:
135 EXCEPTION_PROLOG(SRR0, SRR1)
136 addi r3,r1,STACK_FRAME_OVERHEAD
137 li r20,MSR_KERNEL
138 rlwimi r20,r23,0,16,16 /* copy EE bit from saved MSR */
139 lwz r6,GOT(transfer_to_handler)
140 mtlr r6
141 blrl
142 .L_ProgramCheck:
143 .long ProgramCheckException - _start + EXC_OFF_SYS_RESET
144 .long int_return - _start + EXC_OFF_SYS_RESET
145
146 STD_EXCEPTION(0x800, FPUnavailable, UnknownException)
147
148 /* I guess we could implement decrementer, and may have
149 * to someday for timekeeping.
150 */
151 STD_EXCEPTION(0x900, Decrementer, timer_interrupt)
152 STD_EXCEPTION(0xa00, Trap_0a, UnknownException)
153 STD_EXCEPTION(0xb00, Trap_0b, UnknownException)
154 STD_EXCEPTION(0xc00, SystemCall, UnknownException)
155 STD_EXCEPTION(0xd00, SingleStep, UnknownException)
156 STD_EXCEPTION(0xe00, Trap_0e, UnknownException)
157 STD_EXCEPTION(0xf00, Trap_0f, UnknownException)
158 STD_EXCEPTION(0x1000, SoftEmu, SoftEmuException)
159 STD_EXCEPTION(0x1100, InstructionTLBMiss, UnknownException)
160 STD_EXCEPTION(0x1200, DataTLBMiss, UnknownException)
161 STD_EXCEPTION(0x1300, InstructionTLBError, UnknownException)
162 STD_EXCEPTION(0x1400, DataTLBError, UnknownException)
163 STD_EXCEPTION(0x1500, Reserved5, UnknownException)
164 STD_EXCEPTION(0x1600, Reserved6, UnknownException)
165 STD_EXCEPTION(0x1700, Reserved7, UnknownException)
166 STD_EXCEPTION(0x1800, Reserved8, UnknownException)
167 STD_EXCEPTION(0x1900, Reserved9, UnknownException)
168 STD_EXCEPTION(0x1a00, ReservedA, UnknownException)
169 STD_EXCEPTION(0x1b00, ReservedB, UnknownException)
170 STD_EXCEPTION(0x1c00, DataBreakpoint, UnknownException)
171 STD_EXCEPTION(0x1d00, InstructionBreakpoint, UnknownException)
172 STD_EXCEPTION(0x1e00, PeripheralBreakpoint, UnknownException)
173 STD_EXCEPTION(0x1f00, DevPortBreakpoint, UnknownException)
174
175 .globl _end_of_vectors
176 _end_of_vectors:
177
178 . = 0x2000
179
180 boot_cold:
181 boot_warm:
182
183 /* if this is a multi-core system we need to check which cpu
184 * this is, if it is not cpu 0 send the cpu to the linux reset
185 * vector */
186 #if (CONFIG_NUM_CPUS > 1)
187 mfspr r0, MSSCR0
188 andi. r0, r0, 0x0020
189 rlwinm r0,r0,27,31,31
190 mtspr PIR, r0
191 beq 1f
192
193 bl secondary_cpu_setup
194 #endif
195
196 1:
197 #ifdef CFG_RAMBOOT
198 /* disable everything */
199 li r0, 0
200 mtspr HID0, r0
201 sync
202 mtmsr 0
203 #endif
204
205 bl invalidate_bats
206 sync
207
208 #ifdef CFG_L2
209 /* init the L2 cache */
210 lis r3, L2_INIT@h
211 ori r3, r3, L2_INIT@l
212 mtspr l2cr, r3
213 /* invalidate the L2 cache */
214 bl l2cache_invalidate
215 sync
216 #endif
217
218 /*
219 * Calculate absolute address in FLASH and jump there
220 *------------------------------------------------------*/
221 lis r3, CFG_MONITOR_BASE@h
222 ori r3, r3, CFG_MONITOR_BASE@l
223 addi r3, r3, in_flash - _start + EXC_OFF_SYS_RESET
224 mtlr r3
225 blr
226
227 in_flash:
228 /* let the C-code set up the rest */
229 /* */
230 /* Be careful to keep code relocatable ! */
231 /*------------------------------------------------------*/
232 /* perform low-level init */
233
234 /* enable extended addressing */
235 bl enable_ext_addr
236
237 /* setup the bats */
238 bl early_bats
239
240 /*
241 * Cache must be enabled here for stack-in-cache trick.
242 * This means we need to enable the BATS.
243 * Cache should be turned on after BATs, since by default
244 * everything is write-through.
245 */
246
247 /* enable address translation */
248 bl enable_addr_trans
249 sync
250
251 /* enable and invalidate the data cache */
252 /* bl l1dcache_enable */
253 bl dcache_enable
254 sync
255
256 #if 1
257 bl icache_enable
258 #endif
259
260 #ifdef CFG_INIT_RAM_LOCK
261 bl lock_ram_in_cache
262 sync
263 #endif
264
265 /* set up the stack pointer in our newly created
266 * cache-ram (r1) */
267 lis r1, (CFG_INIT_RAM_ADDR + CFG_GBL_DATA_OFFSET)@h
268 ori r1, r1, (CFG_INIT_RAM_ADDR + CFG_GBL_DATA_OFFSET)@l
269
270 li r0, 0 /* Make room for stack frame header and */
271 stwu r0, -4(r1) /* clear final stack frame so that */
272 stwu r0, -4(r1) /* stack backtraces terminate cleanly */
273
274 GET_GOT /* initialize GOT access */
275
276 /* setup the rest of the bats */
277 bl setup_bats
278 bl clear_tlbs
279 sync
280
281 #if (CFG_CCSRBAR_DEFAULT != CFG_CCSRBAR)
282 /* setup ccsrbar */
283 bl setup_ccsrbar
284 #endif
285
286 /* run low-level CPU init code (from Flash) */
287 bl cpu_init_f
288 sync
289
290 #ifdef RUN_DIAG
291
292 /* Load PX_AUX register address in r4 */
293 lis r4, 0xf810
294 ori r4, r4, 0x6
295 /* Load contents of PX_AUX in r3 bits 24 to 31*/
296 lbz r3, 0(r4)
297
298 /* Mask and obtain the bit in r3 */
299 rlwinm. r3, r3, 0, 24, 24
300 /* If not zero, jump and continue with u-boot */
301 bne diag_done
302
303 /* Load back contents of PX_AUX in r3 bits 24 to 31 */
304 lbz r3, 0(r4)
305 /* Set the MSB of the register value */
306 ori r3, r3, 0x80
307 /* Write value in r3 back to PX_AUX */
308 stb r3, 0(r4)
309
310 /* Get the address to jump to in r3*/
311 lis r3, CFG_DIAG_ADDR@h
312 ori r3, r3, CFG_DIAG_ADDR@l
313
314 /* Load the LR with the branch address */
315 mtlr r3
316
317 /* Branch to diagnostic */
318 blr
319
320 diag_done:
321 #endif
322
323 /* bl l2cache_enable */
324 mr r3, r21
325
326 /* r3: BOOTFLAG */
327 /* run 1st part of board init code (from Flash) */
328 bl board_init_f
329 sync
330
331 /* NOTREACHED */
332
333 .globl invalidate_bats
334 invalidate_bats:
335
336 li r0, 0
337 /* invalidate BATs */
338 mtspr IBAT0U, r0
339 mtspr IBAT1U, r0
340 mtspr IBAT2U, r0
341 mtspr IBAT3U, r0
342 mtspr IBAT4U, r0
343 mtspr IBAT5U, r0
344 mtspr IBAT6U, r0
345 mtspr IBAT7U, r0
346
347 isync
348 mtspr DBAT0U, r0
349 mtspr DBAT1U, r0
350 mtspr DBAT2U, r0
351 mtspr DBAT3U, r0
352 mtspr DBAT4U, r0
353 mtspr DBAT5U, r0
354 mtspr DBAT6U, r0
355 mtspr DBAT7U, r0
356
357 isync
358 sync
359 blr
360
361
362 /* setup_bats - set them up to some initial state */
363 /* Skip any BATS setup in early_bats */
364 .globl setup_bats
365 setup_bats:
366
367 addis r0, r0, 0x0000
368
369 /* IBAT 0 */
370 addis r4, r0, CFG_IBAT0L@h
371 ori r4, r4, CFG_IBAT0L@l
372 addis r3, r0, CFG_IBAT0U@h
373 ori r3, r3, CFG_IBAT0U@l
374 mtspr IBAT0L, r4
375 mtspr IBAT0U, r3
376 isync
377
378 /* DBAT 0 */
379 addis r4, r0, CFG_DBAT0L@h
380 ori r4, r4, CFG_DBAT0L@l
381 addis r3, r0, CFG_DBAT0U@h
382 ori r3, r3, CFG_DBAT0U@l
383 mtspr DBAT0L, r4
384 mtspr DBAT0U, r3
385 isync
386
387 /* IBAT 1 */
388 addis r4, r0, CFG_IBAT1L@h
389 ori r4, r4, CFG_IBAT1L@l
390 addis r3, r0, CFG_IBAT1U@h
391 ori r3, r3, CFG_IBAT1U@l
392 mtspr IBAT1L, r4
393 mtspr IBAT1U, r3
394 isync
395
396 /* DBAT 1 */
397 addis r4, r0, CFG_DBAT1L@h
398 ori r4, r4, CFG_DBAT1L@l
399 addis r3, r0, CFG_DBAT1U@h
400 ori r3, r3, CFG_DBAT1U@l
401 mtspr DBAT1L, r4
402 mtspr DBAT1U, r3
403 isync
404
405 /* IBAT 2 */
406 addis r4, r0, CFG_IBAT2L@h
407 ori r4, r4, CFG_IBAT2L@l
408 addis r3, r0, CFG_IBAT2U@h
409 ori r3, r3, CFG_IBAT2U@l
410 mtspr IBAT2L, r4
411 mtspr IBAT2U, r3
412 isync
413
414 /* DBAT 2 */
415 addis r4, r0, CFG_DBAT2L@h
416 ori r4, r4, CFG_DBAT2L@l
417 addis r3, r0, CFG_DBAT2U@h
418 ori r3, r3, CFG_DBAT2U@l
419 mtspr DBAT2L, r4
420 mtspr DBAT2U, r3
421 isync
422
423 /* IBAT 3 */
424 addis r4, r0, CFG_IBAT3L@h
425 ori r4, r4, CFG_IBAT3L@l
426 addis r3, r0, CFG_IBAT3U@h
427 ori r3, r3, CFG_IBAT3U@l
428 mtspr IBAT3L, r4
429 mtspr IBAT3U, r3
430 isync
431
432 /* DBAT 3 */
433 addis r4, r0, CFG_DBAT3L@h
434 ori r4, r4, CFG_DBAT3L@l
435 addis r3, r0, CFG_DBAT3U@h
436 ori r3, r3, CFG_DBAT3U@l
437 mtspr DBAT3L, r4
438 mtspr DBAT3U, r3
439 isync
440
441 /* IBAT 4 */
442 addis r4, r0, CFG_IBAT4L@h
443 ori r4, r4, CFG_IBAT4L@l
444 addis r3, r0, CFG_IBAT4U@h
445 ori r3, r3, CFG_IBAT4U@l
446 mtspr IBAT4L, r4
447 mtspr IBAT4U, r3
448 isync
449
450 /* DBAT 4 */
451 addis r4, r0, CFG_DBAT4L@h
452 ori r4, r4, CFG_DBAT4L@l
453 addis r3, r0, CFG_DBAT4U@h
454 ori r3, r3, CFG_DBAT4U@l
455 mtspr DBAT4L, r4
456 mtspr DBAT4U, r3
457 isync
458
459 /* IBAT 7 */
460 addis r4, r0, CFG_IBAT7L@h
461 ori r4, r4, CFG_IBAT7L@l
462 addis r3, r0, CFG_IBAT7U@h
463 ori r3, r3, CFG_IBAT7U@l
464 mtspr IBAT7L, r4
465 mtspr IBAT7U, r3
466 isync
467
468 /* DBAT 7 */
469 addis r4, r0, CFG_DBAT7L@h
470 ori r4, r4, CFG_DBAT7L@l
471 addis r3, r0, CFG_DBAT7U@h
472 ori r3, r3, CFG_DBAT7U@l
473 mtspr DBAT7L, r4
474 mtspr DBAT7U, r3
475 isync
476
477 sync
478 blr
479
480 /*
481 * early_bats:
482 *
483 * Set up bats needed early on - this is usually the BAT for the
484 * stack-in-cache and the Flash
485 */
486 .globl early_bats
487 early_bats:
488 /* IBAT 5 */
489 lis r4, CFG_IBAT5L@h
490 ori r4, r4, CFG_IBAT5L@l
491 lis r3, CFG_IBAT5U@h
492 ori r3, r3, CFG_IBAT5U@l
493 mtspr IBAT5L, r4
494 mtspr IBAT5U, r3
495 isync
496
497 /* DBAT 5 */
498 lis r4, CFG_DBAT5L@h
499 ori r4, r4, CFG_DBAT5L@l
500 lis r3, CFG_DBAT5U@h
501 ori r3, r3, CFG_DBAT5U@l
502 mtspr DBAT5L, r4
503 mtspr DBAT5U, r3
504 isync
505
506 /* IBAT 6 */
507 lis r4, CFG_IBAT6L@h
508 ori r4, r4, CFG_IBAT6L@l
509 lis r3, CFG_IBAT6U@h
510 ori r3, r3, CFG_IBAT6U@l
511 mtspr IBAT6L, r4
512 mtspr IBAT6U, r3
513 isync
514
515 /* DBAT 6 */
516 lis r4, CFG_DBAT6L@h
517 ori r4, r4, CFG_DBAT6L@l
518 lis r3, CFG_DBAT6U@h
519 ori r3, r3, CFG_DBAT6U@l
520 mtspr DBAT6L, r4
521 mtspr DBAT6U, r3
522 isync
523 blr
524
525 .globl clear_tlbs
526 clear_tlbs:
527 addis r3, 0, 0x0000
528 addis r5, 0, 0x4
529 isync
530 tlblp:
531 tlbie r3
532 sync
533 addi r3, r3, 0x1000
534 cmp 0, 0, r3, r5
535 blt tlblp
536 blr
537
538 .globl enable_addr_trans
539 enable_addr_trans:
540 /* enable address translation */
541 mfmsr r5
542 ori r5, r5, (MSR_IR | MSR_DR)
543 mtmsr r5
544 isync
545 blr
546
547 .globl disable_addr_trans
548 disable_addr_trans:
549 /* disable address translation */
550 mflr r4
551 mfmsr r3
552 andi. r0, r3, (MSR_IR | MSR_DR)
553 beqlr
554 andc r3, r3, r0
555 mtspr SRR0, r4
556 mtspr SRR1, r3
557 rfi
558
559 /*
560 * This code finishes saving the registers to the exception frame
561 * and jumps to the appropriate handler for the exception.
562 * Register r21 is pointer into trap frame, r1 has new stack pointer.
563 */
564 .globl transfer_to_handler
565 transfer_to_handler:
566 stw r22,_NIP(r21)
567 lis r22,MSR_POW@h
568 andc r23,r23,r22
569 stw r23,_MSR(r21)
570 SAVE_GPR(7, r21)
571 SAVE_4GPRS(8, r21)
572 SAVE_8GPRS(12, r21)
573 SAVE_8GPRS(24, r21)
574 mflr r23
575 andi. r24,r23,0x3f00 /* get vector offset */
576 stw r24,TRAP(r21)
577 li r22,0
578 stw r22,RESULT(r21)
579 mtspr SPRG2,r22 /* r1 is now kernel sp */
580 lwz r24,0(r23) /* virtual address of handler */
581 lwz r23,4(r23) /* where to go when done */
582 mtspr SRR0,r24
583 mtspr SRR1,r20
584 mtlr r23
585 SYNC
586 rfi /* jump to handler, enable MMU */
587
588 int_return:
589 mfmsr r28 /* Disable interrupts */
590 li r4,0
591 ori r4,r4,MSR_EE
592 andc r28,r28,r4
593 SYNC /* Some chip revs need this... */
594 mtmsr r28
595 SYNC
596 lwz r2,_CTR(r1)
597 lwz r0,_LINK(r1)
598 mtctr r2
599 mtlr r0
600 lwz r2,_XER(r1)
601 lwz r0,_CCR(r1)
602 mtspr XER,r2
603 mtcrf 0xFF,r0
604 REST_10GPRS(3, r1)
605 REST_10GPRS(13, r1)
606 REST_8GPRS(23, r1)
607 REST_GPR(31, r1)
608 lwz r2,_NIP(r1) /* Restore environment */
609 lwz r0,_MSR(r1)
610 mtspr SRR0,r2
611 mtspr SRR1,r0
612 lwz r0,GPR0(r1)
613 lwz r2,GPR2(r1)
614 lwz r1,GPR1(r1)
615 SYNC
616 rfi
617
618 .globl dc_read
619 dc_read:
620 blr
621
622 .globl get_pvr
623 get_pvr:
624 mfspr r3, PVR
625 blr
626
627 .globl get_svr
628 get_svr:
629 mfspr r3, SVR
630 blr
631
632
633 /*
634 * Function: in8
635 * Description: Input 8 bits
636 */
637 .globl in8
638 in8:
639 lbz r3,0x0000(r3)
640 blr
641
642 /*
643 * Function: out8
644 * Description: Output 8 bits
645 */
646 .globl out8
647 out8:
648 stb r4,0x0000(r3)
649 blr
650
651 /*
652 * Function: out16
653 * Description: Output 16 bits
654 */
655 .globl out16
656 out16:
657 sth r4,0x0000(r3)
658 blr
659
660 /*
661 * Function: out16r
662 * Description: Byte reverse and output 16 bits
663 */
664 .globl out16r
665 out16r:
666 sthbrx r4,r0,r3
667 blr
668
669 /*
670 * Function: out32
671 * Description: Output 32 bits
672 */
673 .globl out32
674 out32:
675 stw r4,0x0000(r3)
676 blr
677
678 /*
679 * Function: out32r
680 * Description: Byte reverse and output 32 bits
681 */
682 .globl out32r
683 out32r:
684 stwbrx r4,r0,r3
685 blr
686
687 /*
688 * Function: in16
689 * Description: Input 16 bits
690 */
691 .globl in16
692 in16:
693 lhz r3,0x0000(r3)
694 blr
695
696 /*
697 * Function: in16r
698 * Description: Input 16 bits and byte reverse
699 */
700 .globl in16r
701 in16r:
702 lhbrx r3,r0,r3
703 blr
704
705 /*
706 * Function: in32
707 * Description: Input 32 bits
708 */
709 .globl in32
710 in32:
711 lwz 3,0x0000(3)
712 blr
713
714 /*
715 * Function: in32r
716 * Description: Input 32 bits and byte reverse
717 */
718 .globl in32r
719 in32r:
720 lwbrx r3,r0,r3
721 blr
722
723 /*
724 * void relocate_code (addr_sp, gd, addr_moni)
725 *
726 * This "function" does not return, instead it continues in RAM
727 * after relocating the monitor code.
728 *
729 * r3 = dest
730 * r4 = src
731 * r5 = length in bytes
732 * r6 = cachelinesize
733 */
734 .globl relocate_code
735 relocate_code:
736
737 mr r1, r3 /* Set new stack pointer */
738 mr r9, r4 /* Save copy of Global Data pointer */
739 mr r2, r9 /* Save for DECLARE_GLOBAL_DATA_PTR */
740 mr r10, r5 /* Save copy of Destination Address */
741
742 mr r3, r5 /* Destination Address */
743 lis r4, CFG_MONITOR_BASE@h /* Source Address */
744 ori r4, r4, CFG_MONITOR_BASE@l
745 lwz r5, GOT(__init_end)
746 sub r5, r5, r4
747 li r6, CFG_CACHELINE_SIZE /* Cache Line Size */
748
749 /*
750 * Fix GOT pointer:
751 *
752 * New GOT-PTR = (old GOT-PTR - CFG_MONITOR_BASE) + Destination Address
753 *
754 * Offset:
755 */
756 sub r15, r10, r4
757
758 /* First our own GOT */
759 add r14, r14, r15
760 /* then the one used by the C code */
761 add r30, r30, r15
762
763 /*
764 * Now relocate code
765 */
766 #ifdef CONFIG_ECC
767 bl board_relocate_rom
768 sync
769 mr r3, r10 /* Destination Address */
770 lis r4, CFG_MONITOR_BASE@h /* Source Address */
771 ori r4, r4, CFG_MONITOR_BASE@l
772 lwz r5, GOT(__init_end)
773 sub r5, r5, r4
774 li r6, CFG_CACHELINE_SIZE /* Cache Line Size */
775 #else
776 cmplw cr1,r3,r4
777 addi r0,r5,3
778 srwi. r0,r0,2
779 beq cr1,4f /* In place copy is not necessary */
780 beq 7f /* Protect against 0 count */
781 mtctr r0
782 bge cr1,2f
783
784 la r8,-4(r4)
785 la r7,-4(r3)
786 1: lwzu r0,4(r8)
787 stwu r0,4(r7)
788 bdnz 1b
789 b 4f
790
791 2: slwi r0,r0,2
792 add r8,r4,r0
793 add r7,r3,r0
794 3: lwzu r0,-4(r8)
795 stwu r0,-4(r7)
796 bdnz 3b
797 #endif
798 /*
799 * Now flush the cache: note that we must start from a cache aligned
800 * address. Otherwise we might miss one cache line.
801 */
802 4: cmpwi r6,0
803 add r5,r3,r5
804 beq 7f /* Always flush prefetch queue in any case */
805 subi r0,r6,1
806 andc r3,r3,r0
807 mr r4,r3
808 5: dcbst 0,r4
809 add r4,r4,r6
810 cmplw r4,r5
811 blt 5b
812 sync /* Wait for all dcbst to complete on bus */
813 mr r4,r3
814 6: icbi 0,r4
815 add r4,r4,r6
816 cmplw r4,r5
817 blt 6b
818 7: sync /* Wait for all icbi to complete on bus */
819 isync
820
821 /*
822 * We are done. Do not return, instead branch to second part of board
823 * initialization, now running from RAM.
824 */
825 addi r0, r10, in_ram - _start + EXC_OFF_SYS_RESET
826 mtlr r0
827 blr
828
829 in_ram:
830 #ifdef CONFIG_ECC
831 bl board_init_ecc
832 #endif
833 /*
834 * Relocation Function, r14 point to got2+0x8000
835 *
836 * Adjust got2 pointers, no need to check for 0, this code
837 * already puts a few entries in the table.
838 */
839 li r0,__got2_entries@sectoff@l
840 la r3,GOT(_GOT2_TABLE_)
841 lwz r11,GOT(_GOT2_TABLE_)
842 mtctr r0
843 sub r11,r3,r11
844 addi r3,r3,-4
845 1: lwzu r0,4(r3)
846 add r0,r0,r11
847 stw r0,0(r3)
848 bdnz 1b
849
850 /*
851 * Now adjust the fixups and the pointers to the fixups
852 * in case we need to move ourselves again.
853 */
854 2: li r0,__fixup_entries@sectoff@l
855 lwz r3,GOT(_FIXUP_TABLE_)
856 cmpwi r0,0
857 mtctr r0
858 addi r3,r3,-4
859 beq 4f
860 3: lwzu r4,4(r3)
861 lwzux r0,r4,r11
862 add r0,r0,r11
863 stw r10,0(r3)
864 stw r0,0(r4)
865 bdnz 3b
866 4:
867 /* clear_bss: */
868 /*
869 * Now clear BSS segment
870 */
871 lwz r3,GOT(__bss_start)
872 lwz r4,GOT(_end)
873
874 cmplw 0, r3, r4
875 beq 6f
876
877 li r0, 0
878 5:
879 stw r0, 0(r3)
880 addi r3, r3, 4
881 cmplw 0, r3, r4
882 bne 5b
883 6:
884 mr r3, r9 /* Init Date pointer */
885 mr r4, r10 /* Destination Address */
886 bl board_init_r
887
888 /* not reached - end relocate_code */
889 /*-----------------------------------------------------------------------*/
890
891 /*
892 * Copy exception vector code to low memory
893 *
894 * r3: dest_addr
895 * r7: source address, r8: end address, r9: target address
896 */
897 .globl trap_init
898 trap_init:
899 lwz r7, GOT(_start)
900 lwz r8, GOT(_end_of_vectors)
901
902 li r9, 0x100 /* reset vector always at 0x100 */
903
904 cmplw 0, r7, r8
905 bgelr /* return if r7>=r8 - just in case */
906
907 mflr r4 /* save link register */
908 1:
909 lwz r0, 0(r7)
910 stw r0, 0(r9)
911 addi r7, r7, 4
912 addi r9, r9, 4
913 cmplw 0, r7, r8
914 bne 1b
915
916 /*
917 * relocate `hdlr' and `int_return' entries
918 */
919 li r7, .L_MachineCheck - _start + EXC_OFF_SYS_RESET
920 li r8, Alignment - _start + EXC_OFF_SYS_RESET
921 2:
922 bl trap_reloc
923 addi r7, r7, 0x100 /* next exception vector */
924 cmplw 0, r7, r8
925 blt 2b
926
927 li r7, .L_Alignment - _start + EXC_OFF_SYS_RESET
928 bl trap_reloc
929
930 li r7, .L_ProgramCheck - _start + EXC_OFF_SYS_RESET
931 bl trap_reloc
932
933 li r7, .L_FPUnavailable - _start + EXC_OFF_SYS_RESET
934 li r8, SystemCall - _start + EXC_OFF_SYS_RESET
935 3:
936 bl trap_reloc
937 addi r7, r7, 0x100 /* next exception vector */
938 cmplw 0, r7, r8
939 blt 3b
940
941 li r7, .L_SingleStep - _start + EXC_OFF_SYS_RESET
942 li r8, _end_of_vectors - _start + EXC_OFF_SYS_RESET
943 4:
944 bl trap_reloc
945 addi r7, r7, 0x100 /* next exception vector */
946 cmplw 0, r7, r8
947 blt 4b
948
949 /* enable execptions from RAM vectors */
950 mfmsr r7
951 li r8,MSR_IP
952 andc r7,r7,r8
953 ori r7,r7,MSR_ME /* Enable Machine Check */
954 mtmsr r7
955
956 mtlr r4 /* restore link register */
957 blr
958
959 /*
960 * Function: relocate entries for one exception vector
961 */
962 trap_reloc:
963 lwz r0, 0(r7) /* hdlr ... */
964 add r0, r0, r3 /* ... += dest_addr */
965 stw r0, 0(r7)
966
967 lwz r0, 4(r7) /* int_return ... */
968 add r0, r0, r3 /* ... += dest_addr */
969 stw r0, 4(r7)
970
971 sync
972 isync
973
974 blr
975
976 .globl enable_ext_addr
977 enable_ext_addr:
978 mfspr r0, HID0
979 lis r0, (HID0_HIGH_BAT_EN | HID0_XBSEN | HID0_XAEN)@h
980 ori r0, r0, (HID0_HIGH_BAT_EN | HID0_XBSEN | HID0_XAEN)@l
981 mtspr HID0, r0
982 sync
983 isync
984 blr
985
986 #if (CFG_CCSRBAR_DEFAULT != CFG_CCSRBAR)
987 .globl setup_ccsrbar
988 setup_ccsrbar:
989 /* Special sequence needed to update CCSRBAR itself */
990 lis r4, CFG_CCSRBAR_DEFAULT@h
991 ori r4, r4, CFG_CCSRBAR_DEFAULT@l
992
993 lis r5, CFG_CCSRBAR@h
994 ori r5, r5, CFG_CCSRBAR@l
995 srwi r6,r5,12
996 stw r6, 0(r4)
997 isync
998
999 lis r5, 0xffff
1000 ori r5,r5,0xf000
1001 lwz r5, 0(r5)
1002 isync
1003
1004 lis r3, CFG_CCSRBAR@h
1005 lwz r5, CFG_CCSRBAR@l(r3)
1006 isync
1007
1008 blr
1009 #endif
1010
1011 #ifdef CFG_INIT_RAM_LOCK
1012 lock_ram_in_cache:
1013 /* Allocate Initial RAM in data cache.
1014 */
1015 lis r3, (CFG_INIT_RAM_ADDR & ~31)@h
1016 ori r3, r3, (CFG_INIT_RAM_ADDR & ~31)@l
1017 li r2, ((CFG_INIT_RAM_END & ~31) + \
1018 (CFG_INIT_RAM_ADDR & 31) + 31) / 32
1019 mtctr r2
1020 1:
1021 dcbz r0, r3
1022 addi r3, r3, 32
1023 bdnz 1b
1024 #if 1
1025 /* Lock the data cache */
1026 mfspr r0, HID0
1027 ori r0, r0, 0x1000
1028 sync
1029 mtspr HID0, r0
1030 sync
1031 blr
1032 #endif
1033 #if 0
1034 /* Lock the first way of the data cache */
1035 mfspr r0, LDSTCR
1036 ori r0, r0, 0x0080
1037 #if defined(CONFIG_ALTIVEC)
1038 dssall
1039 #endif
1040 sync
1041 mtspr LDSTCR, r0
1042 sync
1043 isync
1044 blr
1045 #endif
1046
1047 .globl unlock_ram_in_cache
1048 unlock_ram_in_cache:
1049 /* invalidate the INIT_RAM section */
1050 lis r3, (CFG_INIT_RAM_ADDR & ~31)@h
1051 ori r3, r3, (CFG_INIT_RAM_ADDR & ~31)@l
1052 li r2, ((CFG_INIT_RAM_END & ~31) + \
1053 (CFG_INIT_RAM_ADDR & 31) + 31) / 32
1054 mtctr r2
1055 1: icbi r0, r3
1056 addi r3, r3, 32
1057 bdnz 1b
1058 sync /* Wait for all icbi to complete on bus */
1059 isync
1060 #if 1
1061 /* Unlock the data cache and invalidate it */
1062 mfspr r0, HID0
1063 li r3,0x1000
1064 andc r0,r0,r3
1065 li r3,0x0400
1066 or r0,r0,r3
1067 sync
1068 mtspr HID0, r0
1069 sync
1070 blr
1071 #endif
1072 #if 0
1073 /* Unlock the first way of the data cache */
1074 mfspr r0, LDSTCR
1075 li r3,0x0080
1076 andc r0,r0,r3
1077 #ifdef CONFIG_ALTIVEC
1078 dssall
1079 #endif
1080 sync
1081 mtspr LDSTCR, r0
1082 sync
1083 isync
1084 li r3,0x0400
1085 or r0,r0,r3
1086 sync
1087 mtspr HID0, r0
1088 sync
1089 blr
1090 #endif
1091 #endif
1092
1093 /* If this is a multi-cpu system then we need to handle the
1094 * 2nd cpu. The assumption is that the 2nd cpu is being
1095 * held in boot holdoff mode until the 1st cpu unlocks it
1096 * from Linux. We'll do some basic cpu init and then pass
1097 * it to the Linux Reset Vector.
1098 * Sri: Much of this initialization is not required. Linux
1099 * rewrites the bats, and the sprs and also enables the L1 cache.
1100 */
1101 #if (CONFIG_NUM_CPUS > 1)
1102 .globl secondary_cpu_setup
1103 secondary_cpu_setup:
1104 /* Do only core setup on all cores except cpu0 */
1105 bl invalidate_bats
1106 sync
1107 bl enable_ext_addr
1108
1109 #ifdef CFG_L2
1110 /* init the L2 cache */
1111 addis r3, r0, L2_INIT@h
1112 ori r3, r3, L2_INIT@l
1113 sync
1114 mtspr l2cr, r3
1115 #ifdef CONFIG_ALTIVEC
1116 dssall
1117 #endif
1118 /* invalidate the L2 cache */
1119 bl l2cache_invalidate
1120 sync
1121 #endif
1122
1123 /* enable and invalidate the data cache */
1124 bl dcache_enable
1125 sync
1126
1127 /* enable and invalidate the instruction cache*/
1128 bl icache_enable
1129 sync
1130
1131 /* TBEN in HID0 */
1132 mfspr r4, HID0
1133 oris r4, r4, 0x0400
1134 mtspr HID0, r4
1135 sync
1136 isync
1137
1138 /* MCP|SYNCBE|ABE in HID1 */
1139 mfspr r4, HID1
1140 oris r4, r4, 0x8000
1141 ori r4, r4, 0x0C00
1142 mtspr HID1, r4
1143 sync
1144 isync
1145
1146 lis r3, CONFIG_LINUX_RESET_VEC@h
1147 ori r3, r3, CONFIG_LINUX_RESET_VEC@l
1148 mtlr r3
1149 blr
1150
1151 /* Never Returns, Running in Linux Now */
1152 #endif