]> git.ipfire.org Git - people/ms/u-boot.git/blob - cpu/mpc86xx/start.S
86xx: remove the redundant r2 global data pointer save
[people/ms/u-boot.git] / cpu / mpc86xx / start.S
1 /*
2 * Copyright 2004, 2007 Freescale Semiconductor.
3 * Srikanth Srinivasan <srikanth.srinivaan@freescale.com>
4 *
5 * See file CREDITS for list of people who contributed to this
6 * project.
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License as
10 * published by the Free Software Foundation; either version 2 of
11 * the License, or (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
21 * MA 02111-1307 USA
22 */
23
24 /* U-Boot - Startup Code for 86xx PowerPC based Embedded Boards
25 *
26 *
27 * The processor starts at 0xfff00100 and the code is executed
28 * from flash. The code is organized to be at an other address
29 * in memory, but as long we don't jump around before relocating.
30 * board_init lies at a quite high address and when the cpu has
31 * jumped there, everything is ok.
32 */
33 #include <config.h>
34 #include <mpc86xx.h>
35 #include <version.h>
36
37 #include <ppc_asm.tmpl>
38 #include <ppc_defs.h>
39
40 #include <asm/cache.h>
41 #include <asm/mmu.h>
42
43 #ifndef CONFIG_IDENT_STRING
44 #define CONFIG_IDENT_STRING ""
45 #endif
46
47 /*
48 * Need MSR_DR | MSR_IR enabled to access I/O (printf) in exceptions
49 */
50
51 /*
52 * Set up GOT: Global Offset Table
53 *
54 * Use r14 to access the GOT
55 */
56 START_GOT
57 GOT_ENTRY(_GOT2_TABLE_)
58 GOT_ENTRY(_FIXUP_TABLE_)
59
60 GOT_ENTRY(_start)
61 GOT_ENTRY(_start_of_vectors)
62 GOT_ENTRY(_end_of_vectors)
63 GOT_ENTRY(transfer_to_handler)
64
65 GOT_ENTRY(__init_end)
66 GOT_ENTRY(_end)
67 GOT_ENTRY(__bss_start)
68 END_GOT
69
70 /*
71 * r3 - 1st arg to board_init(): IMMP pointer
72 * r4 - 2nd arg to board_init(): boot flag
73 */
74 .text
75 .long 0x27051956 /* U-Boot Magic Number */
76 .globl version_string
77 version_string:
78 .ascii U_BOOT_VERSION
79 .ascii " (", __DATE__, " - ", __TIME__, ")"
80 .ascii CONFIG_IDENT_STRING, "\0"
81
82 . = EXC_OFF_SYS_RESET
83 .globl _start
84 _start:
85 li r21, BOOTFLAG_COLD /* Normal Power-On: Boot from FLASH */
86 b boot_cold
87 sync
88
89 . = EXC_OFF_SYS_RESET + 0x10
90
91 .globl _start_warm
92 _start_warm:
93 li r21, BOOTFLAG_WARM /* Software reboot */
94 b boot_warm
95 sync
96
97 /* the boot code is located below the exception table */
98
99 .globl _start_of_vectors
100 _start_of_vectors:
101
102 /* Machine check */
103 STD_EXCEPTION(0x200, MachineCheck, MachineCheckException)
104
105 /* Data Storage exception. */
106 STD_EXCEPTION(0x300, DataStorage, UnknownException)
107
108 /* Instruction Storage exception. */
109 STD_EXCEPTION(0x400, InstStorage, UnknownException)
110
111 /* External Interrupt exception. */
112 STD_EXCEPTION(0x500, ExtInterrupt, external_interrupt)
113
114 /* Alignment exception. */
115 . = 0x600
116 Alignment:
117 EXCEPTION_PROLOG(SRR0, SRR1)
118 mfspr r4,DAR
119 stw r4,_DAR(r21)
120 mfspr r5,DSISR
121 stw r5,_DSISR(r21)
122 addi r3,r1,STACK_FRAME_OVERHEAD
123 li r20,MSR_KERNEL
124 rlwimi r20,r23,0,16,16 /* copy EE bit from saved MSR */
125 lwz r6,GOT(transfer_to_handler)
126 mtlr r6
127 blrl
128 .L_Alignment:
129 .long AlignmentException - _start + EXC_OFF_SYS_RESET
130 .long int_return - _start + EXC_OFF_SYS_RESET
131
132 /* Program check exception */
133 . = 0x700
134 ProgramCheck:
135 EXCEPTION_PROLOG(SRR0, SRR1)
136 addi r3,r1,STACK_FRAME_OVERHEAD
137 li r20,MSR_KERNEL
138 rlwimi r20,r23,0,16,16 /* copy EE bit from saved MSR */
139 lwz r6,GOT(transfer_to_handler)
140 mtlr r6
141 blrl
142 .L_ProgramCheck:
143 .long ProgramCheckException - _start + EXC_OFF_SYS_RESET
144 .long int_return - _start + EXC_OFF_SYS_RESET
145
146 STD_EXCEPTION(0x800, FPUnavailable, UnknownException)
147
148 /* I guess we could implement decrementer, and may have
149 * to someday for timekeeping.
150 */
151 STD_EXCEPTION(0x900, Decrementer, timer_interrupt)
152 STD_EXCEPTION(0xa00, Trap_0a, UnknownException)
153 STD_EXCEPTION(0xb00, Trap_0b, UnknownException)
154 STD_EXCEPTION(0xc00, SystemCall, UnknownException)
155 STD_EXCEPTION(0xd00, SingleStep, UnknownException)
156 STD_EXCEPTION(0xe00, Trap_0e, UnknownException)
157 STD_EXCEPTION(0xf00, Trap_0f, UnknownException)
158 STD_EXCEPTION(0x1000, SoftEmu, SoftEmuException)
159 STD_EXCEPTION(0x1100, InstructionTLBMiss, UnknownException)
160 STD_EXCEPTION(0x1200, DataTLBMiss, UnknownException)
161 STD_EXCEPTION(0x1300, InstructionTLBError, UnknownException)
162 STD_EXCEPTION(0x1400, DataTLBError, UnknownException)
163 STD_EXCEPTION(0x1500, Reserved5, UnknownException)
164 STD_EXCEPTION(0x1600, Reserved6, UnknownException)
165 STD_EXCEPTION(0x1700, Reserved7, UnknownException)
166 STD_EXCEPTION(0x1800, Reserved8, UnknownException)
167 STD_EXCEPTION(0x1900, Reserved9, UnknownException)
168 STD_EXCEPTION(0x1a00, ReservedA, UnknownException)
169 STD_EXCEPTION(0x1b00, ReservedB, UnknownException)
170 STD_EXCEPTION(0x1c00, DataBreakpoint, UnknownException)
171 STD_EXCEPTION(0x1d00, InstructionBreakpoint, UnknownException)
172 STD_EXCEPTION(0x1e00, PeripheralBreakpoint, UnknownException)
173 STD_EXCEPTION(0x1f00, DevPortBreakpoint, UnknownException)
174
175 .globl _end_of_vectors
176 _end_of_vectors:
177
178 . = 0x2000
179
180 boot_cold:
181 boot_warm:
182
183 /* if this is a multi-core system we need to check which cpu
184 * this is, if it is not cpu 0 send the cpu to the linux reset
185 * vector */
186 #if (CONFIG_NUM_CPUS > 1)
187 mfspr r0, MSSCR0
188 andi. r0, r0, 0x0020
189 rlwinm r0,r0,27,31,31
190 mtspr PIR, r0
191 beq 1f
192
193 bl secondary_cpu_setup
194 #endif
195
196 1:
197 #ifdef CONFIG_SYS_RAMBOOT
198 /* disable everything */
199 li r0, 0
200 mtspr HID0, r0
201 sync
202 mtmsr 0
203 #endif
204
205 /* Invalidate BATs */
206 bl invalidate_bats
207 sync
208 /* Invalidate all of TLB before MMU turn on */
209 bl clear_tlbs
210 sync
211
212 #ifdef CONFIG_SYS_L2
213 /* init the L2 cache */
214 lis r3, L2_INIT@h
215 ori r3, r3, L2_INIT@l
216 mtspr l2cr, r3
217 /* invalidate the L2 cache */
218 bl l2cache_invalidate
219 sync
220 #endif
221
222 /*
223 * Calculate absolute address in FLASH and jump there
224 *------------------------------------------------------*/
225 lis r3, CONFIG_SYS_MONITOR_BASE@h
226 ori r3, r3, CONFIG_SYS_MONITOR_BASE@l
227 addi r3, r3, in_flash - _start + EXC_OFF_SYS_RESET
228 mtlr r3
229 blr
230
231 in_flash:
232 /* let the C-code set up the rest */
233 /* */
234 /* Be careful to keep code relocatable ! */
235 /*------------------------------------------------------*/
236 /* perform low-level init */
237
238 /* enable extended addressing */
239 bl enable_ext_addr
240
241 /* setup the bats */
242 bl early_bats
243
244 /*
245 * Cache must be enabled here for stack-in-cache trick.
246 * This means we need to enable the BATS.
247 * Cache should be turned on after BATs, since by default
248 * everything is write-through.
249 */
250
251 /* enable address translation */
252 bl enable_addr_trans
253 sync
254
255 /* enable and invalidate the data cache */
256 /* bl l1dcache_enable */
257 bl dcache_enable
258 sync
259
260 #if 1
261 bl icache_enable
262 #endif
263
264 #ifdef CONFIG_SYS_INIT_RAM_LOCK
265 bl lock_ram_in_cache
266 sync
267 #endif
268
269 /* set up the stack pointer in our newly created
270 * cache-ram (r1) */
271 lis r1, (CONFIG_SYS_INIT_RAM_ADDR + CONFIG_SYS_GBL_DATA_OFFSET)@h
272 ori r1, r1, (CONFIG_SYS_INIT_RAM_ADDR + CONFIG_SYS_GBL_DATA_OFFSET)@l
273
274 li r0, 0 /* Make room for stack frame header and */
275 stwu r0, -4(r1) /* clear final stack frame so that */
276 stwu r0, -4(r1) /* stack backtraces terminate cleanly */
277
278 GET_GOT /* initialize GOT access */
279
280 /* setup the rest of the bats */
281 bl setup_bats
282 sync
283
284 #if (CONFIG_SYS_CCSRBAR_DEFAULT != CONFIG_SYS_CCSRBAR)
285 /* setup ccsrbar */
286 bl setup_ccsrbar
287 #endif
288
289 /* run low-level CPU init code (from Flash) */
290 bl cpu_init_f
291 sync
292
293 #ifdef RUN_DIAG
294
295 /* Load PX_AUX register address in r4 */
296 lis r4, 0xf810
297 ori r4, r4, 0x6
298 /* Load contents of PX_AUX in r3 bits 24 to 31*/
299 lbz r3, 0(r4)
300
301 /* Mask and obtain the bit in r3 */
302 rlwinm. r3, r3, 0, 24, 24
303 /* If not zero, jump and continue with u-boot */
304 bne diag_done
305
306 /* Load back contents of PX_AUX in r3 bits 24 to 31 */
307 lbz r3, 0(r4)
308 /* Set the MSB of the register value */
309 ori r3, r3, 0x80
310 /* Write value in r3 back to PX_AUX */
311 stb r3, 0(r4)
312
313 /* Get the address to jump to in r3*/
314 lis r3, CONFIG_SYS_DIAG_ADDR@h
315 ori r3, r3, CONFIG_SYS_DIAG_ADDR@l
316
317 /* Load the LR with the branch address */
318 mtlr r3
319
320 /* Branch to diagnostic */
321 blr
322
323 diag_done:
324 #endif
325
326 /* bl l2cache_enable */
327 mr r3, r21
328
329 /* r3: BOOTFLAG */
330 /* run 1st part of board init code (from Flash) */
331 bl board_init_f
332 sync
333
334 /* NOTREACHED */
335
336 .globl invalidate_bats
337 invalidate_bats:
338
339 li r0, 0
340 /* invalidate BATs */
341 mtspr IBAT0U, r0
342 mtspr IBAT1U, r0
343 mtspr IBAT2U, r0
344 mtspr IBAT3U, r0
345 mtspr IBAT4U, r0
346 mtspr IBAT5U, r0
347 mtspr IBAT6U, r0
348 mtspr IBAT7U, r0
349
350 isync
351 mtspr DBAT0U, r0
352 mtspr DBAT1U, r0
353 mtspr DBAT2U, r0
354 mtspr DBAT3U, r0
355 mtspr DBAT4U, r0
356 mtspr DBAT5U, r0
357 mtspr DBAT6U, r0
358 mtspr DBAT7U, r0
359
360 isync
361 sync
362 blr
363
364 /*
365 * early_bats:
366 *
367 * Set up bats needed early on - this is usually the BAT for the
368 * stack-in-cache and the Flash
369 */
370 .globl early_bats
371 early_bats:
372 /* IBAT 5 */
373 lis r4, CONFIG_SYS_IBAT5L@h
374 ori r4, r4, CONFIG_SYS_IBAT5L@l
375 lis r3, CONFIG_SYS_IBAT5U@h
376 ori r3, r3, CONFIG_SYS_IBAT5U@l
377 mtspr IBAT5L, r4
378 mtspr IBAT5U, r3
379 isync
380
381 /* DBAT 5 */
382 lis r4, CONFIG_SYS_DBAT5L@h
383 ori r4, r4, CONFIG_SYS_DBAT5L@l
384 lis r3, CONFIG_SYS_DBAT5U@h
385 ori r3, r3, CONFIG_SYS_DBAT5U@l
386 mtspr DBAT5L, r4
387 mtspr DBAT5U, r3
388 isync
389
390 /* IBAT 6 */
391 lis r4, CONFIG_SYS_IBAT6L@h
392 ori r4, r4, CONFIG_SYS_IBAT6L@l
393 lis r3, CONFIG_SYS_IBAT6U@h
394 ori r3, r3, CONFIG_SYS_IBAT6U@l
395 mtspr IBAT6L, r4
396 mtspr IBAT6U, r3
397 isync
398
399 /* DBAT 6 */
400 lis r4, CONFIG_SYS_DBAT6L@h
401 ori r4, r4, CONFIG_SYS_DBAT6L@l
402 lis r3, CONFIG_SYS_DBAT6U@h
403 ori r3, r3, CONFIG_SYS_DBAT6U@l
404 mtspr DBAT6L, r4
405 mtspr DBAT6U, r3
406 isync
407 blr
408
409 .globl clear_tlbs
410 clear_tlbs:
411 addis r3, 0, 0x0000
412 addis r5, 0, 0x4
413 isync
414 tlblp:
415 tlbie r3
416 sync
417 addi r3, r3, 0x1000
418 cmp 0, 0, r3, r5
419 blt tlblp
420 blr
421
422 .globl enable_addr_trans
423 enable_addr_trans:
424 /* enable address translation */
425 mfmsr r5
426 ori r5, r5, (MSR_IR | MSR_DR)
427 mtmsr r5
428 isync
429 blr
430
431 .globl disable_addr_trans
432 disable_addr_trans:
433 /* disable address translation */
434 mflr r4
435 mfmsr r3
436 andi. r0, r3, (MSR_IR | MSR_DR)
437 beqlr
438 andc r3, r3, r0
439 mtspr SRR0, r4
440 mtspr SRR1, r3
441 rfi
442
443 /*
444 * This code finishes saving the registers to the exception frame
445 * and jumps to the appropriate handler for the exception.
446 * Register r21 is pointer into trap frame, r1 has new stack pointer.
447 */
448 .globl transfer_to_handler
449 transfer_to_handler:
450 stw r22,_NIP(r21)
451 lis r22,MSR_POW@h
452 andc r23,r23,r22
453 stw r23,_MSR(r21)
454 SAVE_GPR(7, r21)
455 SAVE_4GPRS(8, r21)
456 SAVE_8GPRS(12, r21)
457 SAVE_8GPRS(24, r21)
458 mflr r23
459 andi. r24,r23,0x3f00 /* get vector offset */
460 stw r24,TRAP(r21)
461 li r22,0
462 stw r22,RESULT(r21)
463 mtspr SPRG2,r22 /* r1 is now kernel sp */
464 lwz r24,0(r23) /* virtual address of handler */
465 lwz r23,4(r23) /* where to go when done */
466 mtspr SRR0,r24
467 mtspr SRR1,r20
468 mtlr r23
469 SYNC
470 rfi /* jump to handler, enable MMU */
471
472 int_return:
473 mfmsr r28 /* Disable interrupts */
474 li r4,0
475 ori r4,r4,MSR_EE
476 andc r28,r28,r4
477 SYNC /* Some chip revs need this... */
478 mtmsr r28
479 SYNC
480 lwz r2,_CTR(r1)
481 lwz r0,_LINK(r1)
482 mtctr r2
483 mtlr r0
484 lwz r2,_XER(r1)
485 lwz r0,_CCR(r1)
486 mtspr XER,r2
487 mtcrf 0xFF,r0
488 REST_10GPRS(3, r1)
489 REST_10GPRS(13, r1)
490 REST_8GPRS(23, r1)
491 REST_GPR(31, r1)
492 lwz r2,_NIP(r1) /* Restore environment */
493 lwz r0,_MSR(r1)
494 mtspr SRR0,r2
495 mtspr SRR1,r0
496 lwz r0,GPR0(r1)
497 lwz r2,GPR2(r1)
498 lwz r1,GPR1(r1)
499 SYNC
500 rfi
501
502 .globl dc_read
503 dc_read:
504 blr
505
506 .globl get_pvr
507 get_pvr:
508 mfspr r3, PVR
509 blr
510
511 .globl get_svr
512 get_svr:
513 mfspr r3, SVR
514 blr
515
516
517 /*
518 * Function: in8
519 * Description: Input 8 bits
520 */
521 .globl in8
522 in8:
523 lbz r3,0x0000(r3)
524 blr
525
526 /*
527 * Function: out8
528 * Description: Output 8 bits
529 */
530 .globl out8
531 out8:
532 stb r4,0x0000(r3)
533 blr
534
535 /*
536 * Function: out16
537 * Description: Output 16 bits
538 */
539 .globl out16
540 out16:
541 sth r4,0x0000(r3)
542 blr
543
544 /*
545 * Function: out16r
546 * Description: Byte reverse and output 16 bits
547 */
548 .globl out16r
549 out16r:
550 sthbrx r4,r0,r3
551 blr
552
553 /*
554 * Function: out32
555 * Description: Output 32 bits
556 */
557 .globl out32
558 out32:
559 stw r4,0x0000(r3)
560 blr
561
562 /*
563 * Function: out32r
564 * Description: Byte reverse and output 32 bits
565 */
566 .globl out32r
567 out32r:
568 stwbrx r4,r0,r3
569 blr
570
571 /*
572 * Function: in16
573 * Description: Input 16 bits
574 */
575 .globl in16
576 in16:
577 lhz r3,0x0000(r3)
578 blr
579
580 /*
581 * Function: in16r
582 * Description: Input 16 bits and byte reverse
583 */
584 .globl in16r
585 in16r:
586 lhbrx r3,r0,r3
587 blr
588
589 /*
590 * Function: in32
591 * Description: Input 32 bits
592 */
593 .globl in32
594 in32:
595 lwz 3,0x0000(3)
596 blr
597
598 /*
599 * Function: in32r
600 * Description: Input 32 bits and byte reverse
601 */
602 .globl in32r
603 in32r:
604 lwbrx r3,r0,r3
605 blr
606
607 /*
608 * void relocate_code (addr_sp, gd, addr_moni)
609 *
610 * This "function" does not return, instead it continues in RAM
611 * after relocating the monitor code.
612 *
613 * r3 = dest
614 * r4 = src
615 * r5 = length in bytes
616 * r6 = cachelinesize
617 */
618 .globl relocate_code
619 relocate_code:
620
621 mr r1, r3 /* Set new stack pointer */
622 mr r9, r4 /* Save copy of Global Data pointer */
623 mr r10, r5 /* Save copy of Destination Address */
624
625 mr r3, r5 /* Destination Address */
626 lis r4, CONFIG_SYS_MONITOR_BASE@h /* Source Address */
627 ori r4, r4, CONFIG_SYS_MONITOR_BASE@l
628 lwz r5, GOT(__init_end)
629 sub r5, r5, r4
630 li r6, CONFIG_SYS_CACHELINE_SIZE /* Cache Line Size */
631
632 /*
633 * Fix GOT pointer:
634 *
635 * New GOT-PTR = (old GOT-PTR - CONFIG_SYS_MONITOR_BASE) + Destination Address
636 *
637 * Offset:
638 */
639 sub r15, r10, r4
640
641 /* First our own GOT */
642 add r14, r14, r15
643 /* then the one used by the C code */
644 add r30, r30, r15
645
646 /*
647 * Now relocate code
648 */
649 cmplw cr1,r3,r4
650 addi r0,r5,3
651 srwi. r0,r0,2
652 beq cr1,4f /* In place copy is not necessary */
653 beq 7f /* Protect against 0 count */
654 mtctr r0
655 bge cr1,2f
656
657 la r8,-4(r4)
658 la r7,-4(r3)
659 1: lwzu r0,4(r8)
660 stwu r0,4(r7)
661 bdnz 1b
662 b 4f
663
664 2: slwi r0,r0,2
665 add r8,r4,r0
666 add r7,r3,r0
667 3: lwzu r0,-4(r8)
668 stwu r0,-4(r7)
669 bdnz 3b
670 /*
671 * Now flush the cache: note that we must start from a cache aligned
672 * address. Otherwise we might miss one cache line.
673 */
674 4: cmpwi r6,0
675 add r5,r3,r5
676 beq 7f /* Always flush prefetch queue in any case */
677 subi r0,r6,1
678 andc r3,r3,r0
679 mr r4,r3
680 5: dcbst 0,r4
681 add r4,r4,r6
682 cmplw r4,r5
683 blt 5b
684 sync /* Wait for all dcbst to complete on bus */
685 mr r4,r3
686 6: icbi 0,r4
687 add r4,r4,r6
688 cmplw r4,r5
689 blt 6b
690 7: sync /* Wait for all icbi to complete on bus */
691 isync
692
693 /*
694 * We are done. Do not return, instead branch to second part of board
695 * initialization, now running from RAM.
696 */
697 addi r0, r10, in_ram - _start + EXC_OFF_SYS_RESET
698 mtlr r0
699 blr
700
701 in_ram:
702 /*
703 * Relocation Function, r14 point to got2+0x8000
704 *
705 * Adjust got2 pointers, no need to check for 0, this code
706 * already puts a few entries in the table.
707 */
708 li r0,__got2_entries@sectoff@l
709 la r3,GOT(_GOT2_TABLE_)
710 lwz r11,GOT(_GOT2_TABLE_)
711 mtctr r0
712 sub r11,r3,r11
713 addi r3,r3,-4
714 1: lwzu r0,4(r3)
715 add r0,r0,r11
716 stw r0,0(r3)
717 bdnz 1b
718
719 /*
720 * Now adjust the fixups and the pointers to the fixups
721 * in case we need to move ourselves again.
722 */
723 2: li r0,__fixup_entries@sectoff@l
724 lwz r3,GOT(_FIXUP_TABLE_)
725 cmpwi r0,0
726 mtctr r0
727 addi r3,r3,-4
728 beq 4f
729 3: lwzu r4,4(r3)
730 lwzux r0,r4,r11
731 add r0,r0,r11
732 stw r10,0(r3)
733 stw r0,0(r4)
734 bdnz 3b
735 4:
736 /* clear_bss: */
737 /*
738 * Now clear BSS segment
739 */
740 lwz r3,GOT(__bss_start)
741 lwz r4,GOT(_end)
742
743 cmplw 0, r3, r4
744 beq 6f
745
746 li r0, 0
747 5:
748 stw r0, 0(r3)
749 addi r3, r3, 4
750 cmplw 0, r3, r4
751 bne 5b
752 6:
753 mr r3, r9 /* Init Date pointer */
754 mr r4, r10 /* Destination Address */
755 bl board_init_r
756
757 /* not reached - end relocate_code */
758 /*-----------------------------------------------------------------------*/
759
760 /*
761 * Copy exception vector code to low memory
762 *
763 * r3: dest_addr
764 * r7: source address, r8: end address, r9: target address
765 */
766 .globl trap_init
767 trap_init:
768 lwz r7, GOT(_start)
769 lwz r8, GOT(_end_of_vectors)
770
771 li r9, 0x100 /* reset vector always at 0x100 */
772
773 cmplw 0, r7, r8
774 bgelr /* return if r7>=r8 - just in case */
775
776 mflr r4 /* save link register */
777 1:
778 lwz r0, 0(r7)
779 stw r0, 0(r9)
780 addi r7, r7, 4
781 addi r9, r9, 4
782 cmplw 0, r7, r8
783 bne 1b
784
785 /*
786 * relocate `hdlr' and `int_return' entries
787 */
788 li r7, .L_MachineCheck - _start + EXC_OFF_SYS_RESET
789 li r8, Alignment - _start + EXC_OFF_SYS_RESET
790 2:
791 bl trap_reloc
792 addi r7, r7, 0x100 /* next exception vector */
793 cmplw 0, r7, r8
794 blt 2b
795
796 li r7, .L_Alignment - _start + EXC_OFF_SYS_RESET
797 bl trap_reloc
798
799 li r7, .L_ProgramCheck - _start + EXC_OFF_SYS_RESET
800 bl trap_reloc
801
802 li r7, .L_FPUnavailable - _start + EXC_OFF_SYS_RESET
803 li r8, SystemCall - _start + EXC_OFF_SYS_RESET
804 3:
805 bl trap_reloc
806 addi r7, r7, 0x100 /* next exception vector */
807 cmplw 0, r7, r8
808 blt 3b
809
810 li r7, .L_SingleStep - _start + EXC_OFF_SYS_RESET
811 li r8, _end_of_vectors - _start + EXC_OFF_SYS_RESET
812 4:
813 bl trap_reloc
814 addi r7, r7, 0x100 /* next exception vector */
815 cmplw 0, r7, r8
816 blt 4b
817
818 /* enable execptions from RAM vectors */
819 mfmsr r7
820 li r8,MSR_IP
821 andc r7,r7,r8
822 ori r7,r7,MSR_ME /* Enable Machine Check */
823 mtmsr r7
824
825 mtlr r4 /* restore link register */
826 blr
827
828 /*
829 * Function: relocate entries for one exception vector
830 */
831 trap_reloc:
832 lwz r0, 0(r7) /* hdlr ... */
833 add r0, r0, r3 /* ... += dest_addr */
834 stw r0, 0(r7)
835
836 lwz r0, 4(r7) /* int_return ... */
837 add r0, r0, r3 /* ... += dest_addr */
838 stw r0, 4(r7)
839
840 sync
841 isync
842
843 blr
844
845 .globl enable_ext_addr
846 enable_ext_addr:
847 mfspr r0, HID0
848 lis r0, (HID0_HIGH_BAT_EN | HID0_XBSEN | HID0_XAEN)@h
849 ori r0, r0, (HID0_HIGH_BAT_EN | HID0_XBSEN | HID0_XAEN)@l
850 mtspr HID0, r0
851 sync
852 isync
853 blr
854
855 #if (CONFIG_SYS_CCSRBAR_DEFAULT != CONFIG_SYS_CCSRBAR)
856 .globl setup_ccsrbar
857 setup_ccsrbar:
858 /* Special sequence needed to update CCSRBAR itself */
859 lis r4, CONFIG_SYS_CCSRBAR_DEFAULT@h
860 ori r4, r4, CONFIG_SYS_CCSRBAR_DEFAULT@l
861
862 lis r5, CONFIG_SYS_CCSRBAR@h
863 ori r5, r5, CONFIG_SYS_CCSRBAR@l
864 srwi r6,r5,12
865 stw r6, 0(r4)
866 isync
867
868 lis r5, 0xffff
869 ori r5,r5,0xf000
870 lwz r5, 0(r5)
871 isync
872
873 lis r3, CONFIG_SYS_CCSRBAR@h
874 lwz r5, CONFIG_SYS_CCSRBAR@l(r3)
875 isync
876
877 blr
878 #endif
879
880 #ifdef CONFIG_SYS_INIT_RAM_LOCK
881 lock_ram_in_cache:
882 /* Allocate Initial RAM in data cache.
883 */
884 lis r3, (CONFIG_SYS_INIT_RAM_ADDR & ~31)@h
885 ori r3, r3, (CONFIG_SYS_INIT_RAM_ADDR & ~31)@l
886 li r4, ((CONFIG_SYS_INIT_RAM_END & ~31) + \
887 (CONFIG_SYS_INIT_RAM_ADDR & 31) + 31) / 32
888 mtctr r4
889 1:
890 dcbz r0, r3
891 addi r3, r3, 32
892 bdnz 1b
893 #if 1
894 /* Lock the data cache */
895 mfspr r0, HID0
896 ori r0, r0, 0x1000
897 sync
898 mtspr HID0, r0
899 sync
900 blr
901 #endif
902 #if 0
903 /* Lock the first way of the data cache */
904 mfspr r0, LDSTCR
905 ori r0, r0, 0x0080
906 #if defined(CONFIG_ALTIVEC)
907 dssall
908 #endif
909 sync
910 mtspr LDSTCR, r0
911 sync
912 isync
913 blr
914 #endif
915
916 .globl unlock_ram_in_cache
917 unlock_ram_in_cache:
918 /* invalidate the INIT_RAM section */
919 lis r3, (CONFIG_SYS_INIT_RAM_ADDR & ~31)@h
920 ori r3, r3, (CONFIG_SYS_INIT_RAM_ADDR & ~31)@l
921 li r4, ((CONFIG_SYS_INIT_RAM_END & ~31) + \
922 (CONFIG_SYS_INIT_RAM_ADDR & 31) + 31) / 32
923 mtctr r4
924 1: icbi r0, r3
925 addi r3, r3, 32
926 bdnz 1b
927 sync /* Wait for all icbi to complete on bus */
928 isync
929 #if 1
930 /* Unlock the data cache and invalidate it */
931 mfspr r0, HID0
932 li r3,0x1000
933 andc r0,r0,r3
934 li r3,0x0400
935 or r0,r0,r3
936 sync
937 mtspr HID0, r0
938 sync
939 blr
940 #endif
941 #if 0
942 /* Unlock the first way of the data cache */
943 mfspr r0, LDSTCR
944 li r3,0x0080
945 andc r0,r0,r3
946 #ifdef CONFIG_ALTIVEC
947 dssall
948 #endif
949 sync
950 mtspr LDSTCR, r0
951 sync
952 isync
953 li r3,0x0400
954 or r0,r0,r3
955 sync
956 mtspr HID0, r0
957 sync
958 blr
959 #endif
960 #endif
961
962 /* If this is a multi-cpu system then we need to handle the
963 * 2nd cpu. The assumption is that the 2nd cpu is being
964 * held in boot holdoff mode until the 1st cpu unlocks it
965 * from Linux. We'll do some basic cpu init and then pass
966 * it to the Linux Reset Vector.
967 * Sri: Much of this initialization is not required. Linux
968 * rewrites the bats, and the sprs and also enables the L1 cache.
969 */
970 #if (CONFIG_NUM_CPUS > 1)
971 .globl secondary_cpu_setup
972 secondary_cpu_setup:
973 /* Do only core setup on all cores except cpu0 */
974 bl invalidate_bats
975 sync
976 bl enable_ext_addr
977
978 #ifdef CONFIG_SYS_L2
979 /* init the L2 cache */
980 addis r3, r0, L2_INIT@h
981 ori r3, r3, L2_INIT@l
982 sync
983 mtspr l2cr, r3
984 #ifdef CONFIG_ALTIVEC
985 dssall
986 #endif
987 /* invalidate the L2 cache */
988 bl l2cache_invalidate
989 sync
990 #endif
991
992 /* enable and invalidate the data cache */
993 bl dcache_enable
994 sync
995
996 /* enable and invalidate the instruction cache*/
997 bl icache_enable
998 sync
999
1000 /* TBEN in HID0 */
1001 mfspr r4, HID0
1002 oris r4, r4, 0x0400
1003 mtspr HID0, r4
1004 sync
1005 isync
1006
1007 /* MCP|SYNCBE|ABE in HID1 */
1008 mfspr r4, HID1
1009 oris r4, r4, 0x8000
1010 ori r4, r4, 0x0C00
1011 mtspr HID1, r4
1012 sync
1013 isync
1014
1015 lis r3, CONFIG_LINUX_RESET_VEC@h
1016 ori r3, r3, CONFIG_LINUX_RESET_VEC@l
1017 mtlr r3
1018 blr
1019
1020 /* Never Returns, Running in Linux Now */
1021 #endif