]> git.ipfire.org Git - people/ms/u-boot.git/blob - cpu/mpc5xxx/start.S
* Patch by Mark Jonas, 08 June 2004:
[people/ms/u-boot.git] / cpu / mpc5xxx / start.S
1 /*
2 * Copyright (C) 1998 Dan Malek <dmalek@jlc.net>
3 * Copyright (C) 1999 Magnus Damm <kieraypc01.p.y.kie.era.ericsson.se>
4 * Copyright (C) 2000 - 2003 Wolfgang Denk <wd@denx.de>
5 *
6 * See file CREDITS for list of people who contributed to this
7 * project.
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License as
11 * published by the Free Software Foundation; either version 2 of
12 * the License, or (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
22 * MA 02111-1307 USA
23 */
24
25 /*
26 * U-Boot - Startup Code for MPC5xxx CPUs
27 */
28 #include <config.h>
29 #include <mpc5xxx.h>
30 #include <version.h>
31
32 #define CONFIG_MPC5xxx 1 /* needed for Linux kernel header files */
33 #define _LINUX_CONFIG_H 1 /* avoid reading Linux autoconf.h file */
34
35 #include <ppc_asm.tmpl>
36 #include <ppc_defs.h>
37
38 #include <asm/cache.h>
39 #include <asm/mmu.h>
40
41 #ifndef CONFIG_IDENT_STRING
42 #define CONFIG_IDENT_STRING ""
43 #endif
44
45 /* We don't want the MMU yet.
46 */
47 #undef MSR_KERNEL
48 /* Floating Point enable, Machine Check and Recoverable Interr. */
49 #ifdef DEBUG
50 #define MSR_KERNEL (MSR_FP|MSR_RI)
51 #else
52 #define MSR_KERNEL (MSR_FP|MSR_ME|MSR_RI)
53 #endif
54
55 /*
56 * Set up GOT: Global Offset Table
57 *
58 * Use r14 to access the GOT
59 */
60 START_GOT
61 GOT_ENTRY(_GOT2_TABLE_)
62 GOT_ENTRY(_FIXUP_TABLE_)
63
64 GOT_ENTRY(_start)
65 GOT_ENTRY(_start_of_vectors)
66 GOT_ENTRY(_end_of_vectors)
67 GOT_ENTRY(transfer_to_handler)
68
69 GOT_ENTRY(__init_end)
70 GOT_ENTRY(_end)
71 GOT_ENTRY(__bss_start)
72 END_GOT
73
74 /*
75 * Version string
76 */
77 .data
78 .globl version_string
79 version_string:
80 .ascii U_BOOT_VERSION
81 .ascii " (", __DATE__, " - ", __TIME__, ")"
82 .ascii CONFIG_IDENT_STRING, "\0"
83
84 /*
85 * Exception vectors
86 */
87 .text
88 . = EXC_OFF_SYS_RESET
89 .globl _start
90 _start:
91 li r21, BOOTFLAG_COLD /* Normal Power-On */
92 nop
93 b boot_cold
94
95 . = EXC_OFF_SYS_RESET + 0x10
96
97 .globl _start_warm
98 _start_warm:
99 li r21, BOOTFLAG_WARM /* Software reboot */
100 b boot_warm
101
102 boot_cold:
103 boot_warm:
104 mfmsr r5 /* save msr contents */
105
106 /* Move CSBoot and adjust instruction pointer */
107 /*--------------------------------------------------------------*/
108
109 #if defined(CFG_LOWBOOT)
110 #if defined(CFG_RAMBOOT)
111 #error CFG_LOWBOOT is incompatible with CFG_RAMBOOT
112 #endif /* CFG_RAMBOOT */
113 lis r4, CFG_DEFAULT_MBAR@h
114 lis r3, START_REG(CFG_BOOTCS_START)@h
115 ori r3, r3, START_REG(CFG_BOOTCS_START)@l
116 stw r3, 0x4(r4) /* CS0 start */
117 lis r3, STOP_REG(CFG_BOOTCS_START, CFG_BOOTCS_SIZE)@h
118 ori r3, r3, STOP_REG(CFG_BOOTCS_START, CFG_BOOTCS_SIZE)@l
119 stw r3, 0x8(r4) /* CS0 stop */
120 lis r3, 0x02010000@h
121 ori r3, r3, 0x02010000@l
122 stw r3, 0x54(r4) /* CS0 and Boot enable */
123
124 lis r3, lowboot_reentry@h /* jump from bootlow address space (0x0000xxxx) */
125 ori r3, r3, lowboot_reentry@l /* to the address space the linker used */
126 mtlr r3
127 blr
128
129 lowboot_reentry:
130 lis r3, START_REG(CFG_BOOTCS_START)@h
131 ori r3, r3, START_REG(CFG_BOOTCS_START)@l
132 stw r3, 0x4c(r4) /* Boot start */
133 lis r3, STOP_REG(CFG_BOOTCS_START, CFG_BOOTCS_SIZE)@h
134 ori r3, r3, STOP_REG(CFG_BOOTCS_START, CFG_BOOTCS_SIZE)@l
135 stw r3, 0x50(r4) /* Boot stop */
136 lis r3, 0x02000001@h
137 ori r3, r3, 0x02000001@l
138 stw r3, 0x54(r4) /* Boot enable, CS0 disable */
139 #endif /* CFG_LOWBOOT */
140
141 #if defined(CFG_DEFAULT_MBAR) && !defined(CFG_RAMBOOT)
142 lis r3, CFG_MBAR@h
143 ori r3, r3, CFG_MBAR@l
144 #if defined(CONFIG_MPC5200)
145 /* MBAR is mirrored into the MBAR SPR */
146 mtspr MBAR,r3
147 rlwinm r3, r3, 16, 16, 31
148 #endif
149 #if defined(CONFIG_MGT5100)
150 rlwinm r3, r3, 17, 15, 31
151 #endif
152 lis r4, CFG_DEFAULT_MBAR@h
153 stw r3, 0(r4)
154 #endif /* CFG_DEFAULT_MBAR */
155
156 /* Initialise the MPC5xxx processor core */
157 /*--------------------------------------------------------------*/
158
159 bl init_5xxx_core
160
161 /* initialize some things that are hard to access from C */
162 /*--------------------------------------------------------------*/
163
164 /* set up stack in on-chip SRAM */
165 lis r3, CFG_INIT_RAM_ADDR@h
166 ori r3, r3, CFG_INIT_RAM_ADDR@l
167 ori r1, r3, CFG_INIT_SP_OFFSET
168 li r0, 0 /* Make room for stack frame header and */
169 stwu r0, -4(r1) /* clear final stack frame so that */
170 stwu r0, -4(r1) /* stack backtraces terminate cleanly */
171
172 /* let the C-code set up the rest */
173 /* */
174 /* Be careful to keep code relocatable ! */
175 /*--------------------------------------------------------------*/
176
177 GET_GOT /* initialize GOT access */
178
179 /* r3: IMMR */
180 bl cpu_init_f /* run low-level CPU init code (in Flash)*/
181
182 mr r3, r21
183 /* r3: BOOTFLAG */
184 bl board_init_f /* run 1st part of board init code (in Flash)*/
185
186 /*
187 * Vector Table
188 */
189
190 .globl _start_of_vectors
191 _start_of_vectors:
192
193 /* Machine check */
194 STD_EXCEPTION(0x200, MachineCheck, MachineCheckException)
195
196 /* Data Storage exception. */
197 STD_EXCEPTION(0x300, DataStorage, UnknownException)
198
199 /* Instruction Storage exception. */
200 STD_EXCEPTION(0x400, InstStorage, UnknownException)
201
202 /* External Interrupt exception. */
203 STD_EXCEPTION(0x500, ExtInterrupt, external_interrupt)
204
205 /* Alignment exception. */
206 . = 0x600
207 Alignment:
208 EXCEPTION_PROLOG
209 mfspr r4,DAR
210 stw r4,_DAR(r21)
211 mfspr r5,DSISR
212 stw r5,_DSISR(r21)
213 addi r3,r1,STACK_FRAME_OVERHEAD
214 li r20,MSR_KERNEL
215 rlwimi r20,r23,0,16,16 /* copy EE bit from saved MSR */
216 rlwimi r20,r23,0,25,25 /* copy IP bit from saved MSR */
217 lwz r6,GOT(transfer_to_handler)
218 mtlr r6
219 blrl
220 .L_Alignment:
221 .long AlignmentException - _start + EXC_OFF_SYS_RESET
222 .long int_return - _start + EXC_OFF_SYS_RESET
223
224 /* Program check exception */
225 . = 0x700
226 ProgramCheck:
227 EXCEPTION_PROLOG
228 addi r3,r1,STACK_FRAME_OVERHEAD
229 li r20,MSR_KERNEL
230 rlwimi r20,r23,0,16,16 /* copy EE bit from saved MSR */
231 rlwimi r20,r23,0,25,25 /* copy IP bit from saved MSR */
232 lwz r6,GOT(transfer_to_handler)
233 mtlr r6
234 blrl
235 .L_ProgramCheck:
236 .long ProgramCheckException - _start + EXC_OFF_SYS_RESET
237 .long int_return - _start + EXC_OFF_SYS_RESET
238
239 STD_EXCEPTION(0x800, FPUnavailable, UnknownException)
240
241 /* I guess we could implement decrementer, and may have
242 * to someday for timekeeping.
243 */
244 STD_EXCEPTION(0x900, Decrementer, timer_interrupt)
245
246 STD_EXCEPTION(0xa00, Trap_0a, UnknownException)
247 STD_EXCEPTION(0xb00, Trap_0b, UnknownException)
248 STD_EXCEPTION(0xc00, SystemCall, UnknownException)
249 STD_EXCEPTION(0xd00, SingleStep, UnknownException)
250
251 STD_EXCEPTION(0xe00, Trap_0e, UnknownException)
252 STD_EXCEPTION(0xf00, Trap_0f, UnknownException)
253
254 STD_EXCEPTION(0x1000, InstructionTLBMiss, UnknownException)
255 STD_EXCEPTION(0x1100, DataLoadTLBMiss, UnknownException)
256 STD_EXCEPTION(0x1200, DataStoreTLBMiss, UnknownException)
257 #ifdef DEBUG
258 . = 0x1300
259 /*
260 * This exception occurs when the program counter matches the
261 * Instruction Address Breakpoint Register (IABR).
262 *
263 * I want the cpu to halt if this occurs so I can hunt around
264 * with the debugger and look at things.
265 *
266 * When DEBUG is defined, both machine check enable (in the MSR)
267 * and checkstop reset enable (in the reset mode register) are
268 * turned off and so a checkstop condition will result in the cpu
269 * halting.
270 *
271 * I force the cpu into a checkstop condition by putting an illegal
272 * instruction here (at least this is the theory).
273 *
274 * well - that didnt work, so just do an infinite loop!
275 */
276 1: b 1b
277 #else
278 STD_EXCEPTION(0x1300, InstructionBreakpoint, DebugException)
279 #endif
280 STD_EXCEPTION(0x1400, SMI, UnknownException)
281
282 STD_EXCEPTION(0x1500, Trap_15, UnknownException)
283 STD_EXCEPTION(0x1600, Trap_16, UnknownException)
284 STD_EXCEPTION(0x1700, Trap_17, UnknownException)
285 STD_EXCEPTION(0x1800, Trap_18, UnknownException)
286 STD_EXCEPTION(0x1900, Trap_19, UnknownException)
287 STD_EXCEPTION(0x1a00, Trap_1a, UnknownException)
288 STD_EXCEPTION(0x1b00, Trap_1b, UnknownException)
289 STD_EXCEPTION(0x1c00, Trap_1c, UnknownException)
290 STD_EXCEPTION(0x1d00, Trap_1d, UnknownException)
291 STD_EXCEPTION(0x1e00, Trap_1e, UnknownException)
292 STD_EXCEPTION(0x1f00, Trap_1f, UnknownException)
293 STD_EXCEPTION(0x2000, Trap_20, UnknownException)
294 STD_EXCEPTION(0x2100, Trap_21, UnknownException)
295 STD_EXCEPTION(0x2200, Trap_22, UnknownException)
296 STD_EXCEPTION(0x2300, Trap_23, UnknownException)
297 STD_EXCEPTION(0x2400, Trap_24, UnknownException)
298 STD_EXCEPTION(0x2500, Trap_25, UnknownException)
299 STD_EXCEPTION(0x2600, Trap_26, UnknownException)
300 STD_EXCEPTION(0x2700, Trap_27, UnknownException)
301 STD_EXCEPTION(0x2800, Trap_28, UnknownException)
302 STD_EXCEPTION(0x2900, Trap_29, UnknownException)
303 STD_EXCEPTION(0x2a00, Trap_2a, UnknownException)
304 STD_EXCEPTION(0x2b00, Trap_2b, UnknownException)
305 STD_EXCEPTION(0x2c00, Trap_2c, UnknownException)
306 STD_EXCEPTION(0x2d00, Trap_2d, UnknownException)
307 STD_EXCEPTION(0x2e00, Trap_2e, UnknownException)
308 STD_EXCEPTION(0x2f00, Trap_2f, UnknownException)
309
310
311 .globl _end_of_vectors
312 _end_of_vectors:
313
314 . = 0x3000
315
316 /*
317 * This code finishes saving the registers to the exception frame
318 * and jumps to the appropriate handler for the exception.
319 * Register r21 is pointer into trap frame, r1 has new stack pointer.
320 */
321 .globl transfer_to_handler
322 transfer_to_handler:
323 stw r22,_NIP(r21)
324 lis r22,MSR_POW@h
325 andc r23,r23,r22
326 stw r23,_MSR(r21)
327 SAVE_GPR(7, r21)
328 SAVE_4GPRS(8, r21)
329 SAVE_8GPRS(12, r21)
330 SAVE_8GPRS(24, r21)
331 mflr r23
332 andi. r24,r23,0x3f00 /* get vector offset */
333 stw r24,TRAP(r21)
334 li r22,0
335 stw r22,RESULT(r21)
336 lwz r24,0(r23) /* virtual address of handler */
337 lwz r23,4(r23) /* where to go when done */
338 mtspr SRR0,r24
339 mtspr SRR1,r20
340 mtlr r23
341 SYNC
342 rfi /* jump to handler, enable MMU */
343
344 int_return:
345 mfmsr r28 /* Disable interrupts */
346 li r4,0
347 ori r4,r4,MSR_EE
348 andc r28,r28,r4
349 SYNC /* Some chip revs need this... */
350 mtmsr r28
351 SYNC
352 lwz r2,_CTR(r1)
353 lwz r0,_LINK(r1)
354 mtctr r2
355 mtlr r0
356 lwz r2,_XER(r1)
357 lwz r0,_CCR(r1)
358 mtspr XER,r2
359 mtcrf 0xFF,r0
360 REST_10GPRS(3, r1)
361 REST_10GPRS(13, r1)
362 REST_8GPRS(23, r1)
363 REST_GPR(31, r1)
364 lwz r2,_NIP(r1) /* Restore environment */
365 lwz r0,_MSR(r1)
366 mtspr SRR0,r2
367 mtspr SRR1,r0
368 lwz r0,GPR0(r1)
369 lwz r2,GPR2(r1)
370 lwz r1,GPR1(r1)
371 SYNC
372 rfi
373
374 /*
375 * This code initialises the MPC5xxx processor core
376 * (conforms to PowerPC 603e spec)
377 * Note: expects original MSR contents to be in r5.
378 */
379
380 .globl init_5xx_core
381 init_5xxx_core:
382
383 /* Initialize machine status; enable machine check interrupt */
384 /*--------------------------------------------------------------*/
385
386 li r3, MSR_KERNEL /* Set ME and RI flags */
387 rlwimi r3, r5, 0, 25, 25 /* preserve IP bit set by HRCW */
388 #ifdef DEBUG
389 rlwimi r3, r5, 0, 21, 22 /* debugger might set SE & BE bits */
390 #endif
391 SYNC /* Some chip revs need this... */
392 mtmsr r3
393 SYNC
394 mtspr SRR1, r3 /* Make SRR1 match MSR */
395
396 /* Initialize the Hardware Implementation-dependent Registers */
397 /* HID0 also contains cache control */
398 /*--------------------------------------------------------------*/
399
400 lis r3, CFG_HID0_INIT@h
401 ori r3, r3, CFG_HID0_INIT@l
402 SYNC
403 mtspr HID0, r3
404
405 lis r3, CFG_HID0_FINAL@h
406 ori r3, r3, CFG_HID0_FINAL@l
407 SYNC
408 mtspr HID0, r3
409
410 /* clear all BAT's */
411 /*--------------------------------------------------------------*/
412
413 li r0, 0
414 mtspr DBAT0U, r0
415 mtspr DBAT0L, r0
416 mtspr DBAT1U, r0
417 mtspr DBAT1L, r0
418 mtspr DBAT2U, r0
419 mtspr DBAT2L, r0
420 mtspr DBAT3U, r0
421 mtspr DBAT3L, r0
422 mtspr DBAT4U, r0
423 mtspr DBAT4L, r0
424 mtspr DBAT5U, r0
425 mtspr DBAT5L, r0
426 mtspr DBAT6U, r0
427 mtspr DBAT6L, r0
428 mtspr DBAT7U, r0
429 mtspr DBAT7L, r0
430 mtspr IBAT0U, r0
431 mtspr IBAT0L, r0
432 mtspr IBAT1U, r0
433 mtspr IBAT1L, r0
434 mtspr IBAT2U, r0
435 mtspr IBAT2L, r0
436 mtspr IBAT3U, r0
437 mtspr IBAT3L, r0
438 mtspr IBAT4U, r0
439 mtspr IBAT4L, r0
440 mtspr IBAT5U, r0
441 mtspr IBAT5L, r0
442 mtspr IBAT6U, r0
443 mtspr IBAT6L, r0
444 mtspr IBAT7U, r0
445 mtspr IBAT7L, r0
446 SYNC
447
448 /* invalidate all tlb's */
449 /* */
450 /* From the 603e User Manual: "The 603e provides the ability to */
451 /* invalidate a TLB entry. The TLB Invalidate Entry (tlbie) */
452 /* instruction invalidates the TLB entry indexed by the EA, and */
453 /* operates on both the instruction and data TLBs simultaneously*/
454 /* invalidating four TLB entries (both sets in each TLB). The */
455 /* index corresponds to bits 15-19 of the EA. To invalidate all */
456 /* entries within both TLBs, 32 tlbie instructions should be */
457 /* issued, incrementing this field by one each time." */
458 /* */
459 /* "Note that the tlbia instruction is not implemented on the */
460 /* 603e." */
461 /* */
462 /* bits 15-19 correspond to addresses 0x00000000 to 0x0001F000 */
463 /* incrementing by 0x1000 each time. The code below is sort of */
464 /* based on code in "flush_tlbs" from arch/ppc/kernel/head.S */
465 /* */
466 /*--------------------------------------------------------------*/
467
468 li r3, 32
469 mtctr r3
470 li r3, 0
471 1: tlbie r3
472 addi r3, r3, 0x1000
473 bdnz 1b
474 SYNC
475
476 /* Done! */
477 /*--------------------------------------------------------------*/
478
479 blr
480
481 /* Cache functions.
482 *
483 * Note: requires that all cache bits in
484 * HID0 are in the low half word.
485 */
486 .globl icache_enable
487 icache_enable:
488 mfspr r3, HID0
489 ori r3, r3, HID0_ICE
490 lis r4, 0
491 ori r4, r4, HID0_ILOCK
492 andc r3, r3, r4
493 ori r4, r3, HID0_ICFI
494 isync
495 mtspr HID0, r4 /* sets enable and invalidate, clears lock */
496 isync
497 mtspr HID0, r3 /* clears invalidate */
498 blr
499
500 .globl icache_disable
501 icache_disable:
502 mfspr r3, HID0
503 lis r4, 0
504 ori r4, r4, HID0_ICE|HID0_ILOCK
505 andc r3, r3, r4
506 ori r4, r3, HID0_ICFI
507 isync
508 mtspr HID0, r4 /* sets invalidate, clears enable and lock */
509 isync
510 mtspr HID0, r3 /* clears invalidate */
511 blr
512
513 .globl icache_status
514 icache_status:
515 mfspr r3, HID0
516 rlwinm r3, r3, HID0_ICE_BITPOS + 1, 31, 31
517 blr
518
519 .globl dcache_enable
520 dcache_enable:
521 mfspr r3, HID0
522 ori r3, r3, HID0_DCE
523 lis r4, 0
524 ori r4, r4, HID0_DLOCK
525 andc r3, r3, r4
526 ori r4, r3, HID0_DCI
527 sync
528 mtspr HID0, r4 /* sets enable and invalidate, clears lock */
529 sync
530 mtspr HID0, r3 /* clears invalidate */
531 blr
532
533 .globl dcache_disable
534 dcache_disable:
535 mfspr r3, HID0
536 lis r4, 0
537 ori r4, r4, HID0_DCE|HID0_DLOCK
538 andc r3, r3, r4
539 ori r4, r3, HID0_DCI
540 sync
541 mtspr HID0, r4 /* sets invalidate, clears enable and lock */
542 sync
543 mtspr HID0, r3 /* clears invalidate */
544 blr
545
546 .globl dcache_status
547 dcache_status:
548 mfspr r3, HID0
549 rlwinm r3, r3, HID0_DCE_BITPOS + 1, 31, 31
550 blr
551
552 .globl get_svr
553 get_svr:
554 mfspr r3, SVR
555 blr
556
557 .globl get_pvr
558 get_pvr:
559 mfspr r3, PVR
560 blr
561
562 /*------------------------------------------------------------------------------*/
563
564 /*
565 * void relocate_code (addr_sp, gd, addr_moni)
566 *
567 * This "function" does not return, instead it continues in RAM
568 * after relocating the monitor code.
569 *
570 * r3 = dest
571 * r4 = src
572 * r5 = length in bytes
573 * r6 = cachelinesize
574 */
575 .globl relocate_code
576 relocate_code:
577 mr r1, r3 /* Set new stack pointer */
578 mr r9, r4 /* Save copy of Global Data pointer */
579 mr r10, r5 /* Save copy of Destination Address */
580
581 mr r3, r5 /* Destination Address */
582 lis r4, CFG_MONITOR_BASE@h /* Source Address */
583 ori r4, r4, CFG_MONITOR_BASE@l
584 lwz r5, GOT(__init_end)
585 sub r5, r5, r4
586 li r6, CFG_CACHELINE_SIZE /* Cache Line Size */
587
588 /*
589 * Fix GOT pointer:
590 *
591 * New GOT-PTR = (old GOT-PTR - CFG_MONITOR_BASE) + Destination Address
592 *
593 * Offset:
594 */
595 sub r15, r10, r4
596
597 /* First our own GOT */
598 add r14, r14, r15
599 /* then the one used by the C code */
600 add r30, r30, r15
601
602 /*
603 * Now relocate code
604 */
605
606 cmplw cr1,r3,r4
607 addi r0,r5,3
608 srwi. r0,r0,2
609 beq cr1,4f /* In place copy is not necessary */
610 beq 7f /* Protect against 0 count */
611 mtctr r0
612 bge cr1,2f
613
614 la r8,-4(r4)
615 la r7,-4(r3)
616 1: lwzu r0,4(r8)
617 stwu r0,4(r7)
618 bdnz 1b
619 b 4f
620
621 2: slwi r0,r0,2
622 add r8,r4,r0
623 add r7,r3,r0
624 3: lwzu r0,-4(r8)
625 stwu r0,-4(r7)
626 bdnz 3b
627
628 /*
629 * Now flush the cache: note that we must start from a cache aligned
630 * address. Otherwise we might miss one cache line.
631 */
632 4: cmpwi r6,0
633 add r5,r3,r5
634 beq 7f /* Always flush prefetch queue in any case */
635 subi r0,r6,1
636 andc r3,r3,r0
637 mfspr r7,HID0 /* don't do dcbst if dcache is disabled */
638 rlwinm r7,r7,HID0_DCE_BITPOS+1,31,31
639 cmpwi r7,0
640 beq 9f
641 mr r4,r3
642 5: dcbst 0,r4
643 add r4,r4,r6
644 cmplw r4,r5
645 blt 5b
646 sync /* Wait for all dcbst to complete on bus */
647 9: mfspr r7,HID0 /* don't do icbi if icache is disabled */
648 rlwinm r7,r7,HID0_ICE_BITPOS+1,31,31
649 cmpwi r7,0
650 beq 7f
651 mr r4,r3
652 6: icbi 0,r4
653 add r4,r4,r6
654 cmplw r4,r5
655 blt 6b
656 7: sync /* Wait for all icbi to complete on bus */
657 isync
658
659 /*
660 * We are done. Do not return, instead branch to second part of board
661 * initialization, now running from RAM.
662 */
663
664 addi r0, r10, in_ram - _start + EXC_OFF_SYS_RESET
665 mtlr r0
666 blr
667
668 in_ram:
669
670 /*
671 * Relocation Function, r14 point to got2+0x8000
672 *
673 * Adjust got2 pointers, no need to check for 0, this code
674 * already puts a few entries in the table.
675 */
676 li r0,__got2_entries@sectoff@l
677 la r3,GOT(_GOT2_TABLE_)
678 lwz r11,GOT(_GOT2_TABLE_)
679 mtctr r0
680 sub r11,r3,r11
681 addi r3,r3,-4
682 1: lwzu r0,4(r3)
683 add r0,r0,r11
684 stw r0,0(r3)
685 bdnz 1b
686
687 /*
688 * Now adjust the fixups and the pointers to the fixups
689 * in case we need to move ourselves again.
690 */
691 2: li r0,__fixup_entries@sectoff@l
692 lwz r3,GOT(_FIXUP_TABLE_)
693 cmpwi r0,0
694 mtctr r0
695 addi r3,r3,-4
696 beq 4f
697 3: lwzu r4,4(r3)
698 lwzux r0,r4,r11
699 add r0,r0,r11
700 stw r10,0(r3)
701 stw r0,0(r4)
702 bdnz 3b
703 4:
704 clear_bss:
705 /*
706 * Now clear BSS segment
707 */
708 lwz r3,GOT(__bss_start)
709 lwz r4,GOT(_end)
710
711 cmplw 0, r3, r4
712 beq 6f
713
714 li r0, 0
715 5:
716 stw r0, 0(r3)
717 addi r3, r3, 4
718 cmplw 0, r3, r4
719 bne 5b
720 6:
721
722 mr r3, r9 /* Global Data pointer */
723 mr r4, r10 /* Destination Address */
724 bl board_init_r
725
726 /*
727 * Copy exception vector code to low memory
728 *
729 * r3: dest_addr
730 * r7: source address, r8: end address, r9: target address
731 */
732 .globl trap_init
733 trap_init:
734 lwz r7, GOT(_start)
735 lwz r8, GOT(_end_of_vectors)
736
737 li r9, 0x100 /* reset vector always at 0x100 */
738
739 cmplw 0, r7, r8
740 bgelr /* return if r7>=r8 - just in case */
741
742 mflr r4 /* save link register */
743 1:
744 lwz r0, 0(r7)
745 stw r0, 0(r9)
746 addi r7, r7, 4
747 addi r9, r9, 4
748 cmplw 0, r7, r8
749 bne 1b
750
751 /*
752 * relocate `hdlr' and `int_return' entries
753 */
754 li r7, .L_MachineCheck - _start + EXC_OFF_SYS_RESET
755 li r8, Alignment - _start + EXC_OFF_SYS_RESET
756 2:
757 bl trap_reloc
758 addi r7, r7, 0x100 /* next exception vector */
759 cmplw 0, r7, r8
760 blt 2b
761
762 li r7, .L_Alignment - _start + EXC_OFF_SYS_RESET
763 bl trap_reloc
764
765 li r7, .L_ProgramCheck - _start + EXC_OFF_SYS_RESET
766 bl trap_reloc
767
768 li r7, .L_FPUnavailable - _start + EXC_OFF_SYS_RESET
769 li r8, SystemCall - _start + EXC_OFF_SYS_RESET
770 3:
771 bl trap_reloc
772 addi r7, r7, 0x100 /* next exception vector */
773 cmplw 0, r7, r8
774 blt 3b
775
776 li r7, .L_SingleStep - _start + EXC_OFF_SYS_RESET
777 li r8, _end_of_vectors - _start + EXC_OFF_SYS_RESET
778 4:
779 bl trap_reloc
780 addi r7, r7, 0x100 /* next exception vector */
781 cmplw 0, r7, r8
782 blt 4b
783
784 mfmsr r3 /* now that the vectors have */
785 lis r7, MSR_IP@h /* relocated into low memory */
786 ori r7, r7, MSR_IP@l /* MSR[IP] can be turned off */
787 andc r3, r3, r7 /* (if it was on) */
788 SYNC /* Some chip revs need this... */
789 mtmsr r3
790 SYNC
791
792 mtlr r4 /* restore link register */
793 blr
794
795 /*
796 * Function: relocate entries for one exception vector
797 */
798 trap_reloc:
799 lwz r0, 0(r7) /* hdlr ... */
800 add r0, r0, r3 /* ... += dest_addr */
801 stw r0, 0(r7)
802
803 lwz r0, 4(r7) /* int_return ... */
804 add r0, r0, r3 /* ... += dest_addr */
805 stw r0, 4(r7)
806
807 blr