]> git.ipfire.org Git - people/ms/u-boot.git/blob - arch/powerpc/cpu/mpc5xxx/start.S
PowerPC: Add support for -msingle-pic-base
[people/ms/u-boot.git] / arch / powerpc / cpu / mpc5xxx / start.S
1 /*
2 * Copyright (C) 1998 Dan Malek <dmalek@jlc.net>
3 * Copyright (C) 1999 Magnus Damm <kieraypc01.p.y.kie.era.ericsson.se>
4 * Copyright (C) 2000 - 2003 Wolfgang Denk <wd@denx.de>
5 *
6 * See file CREDITS for list of people who contributed to this
7 * project.
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License as
11 * published by the Free Software Foundation; either version 2 of
12 * the License, or (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
22 * MA 02111-1307 USA
23 */
24
25 /*
26 * U-Boot - Startup Code for MPC5xxx CPUs
27 */
28 #include <asm-offsets.h>
29 #include <config.h>
30 #include <mpc5xxx.h>
31 #include <timestamp.h>
32 #include <version.h>
33
34 #define CONFIG_MPC5xxx 1 /* needed for Linux kernel header files */
35 #define _LINUX_CONFIG_H 1 /* avoid reading Linux autoconf.h file */
36
37 #include <ppc_asm.tmpl>
38 #include <ppc_defs.h>
39
40 #include <asm/cache.h>
41 #include <asm/mmu.h>
42 #include <asm/u-boot.h>
43
44 #ifndef CONFIG_IDENT_STRING
45 #define CONFIG_IDENT_STRING ""
46 #endif
47
48 /* We don't want the MMU yet.
49 */
50 #undef MSR_KERNEL
51 /* Floating Point enable, Machine Check and Recoverable Interr. */
52 #ifdef DEBUG
53 #define MSR_KERNEL (MSR_FP|MSR_RI)
54 #else
55 #define MSR_KERNEL (MSR_FP|MSR_ME|MSR_RI)
56 #endif
57
58 /*
59 * Set up GOT: Global Offset Table
60 *
61 * Use r12 to access the GOT
62 */
63 START_GOT
64 GOT_ENTRY(_GOT2_TABLE_)
65 GOT_ENTRY(_FIXUP_TABLE_)
66
67 GOT_ENTRY(_start)
68 GOT_ENTRY(_start_of_vectors)
69 GOT_ENTRY(_end_of_vectors)
70 GOT_ENTRY(transfer_to_handler)
71
72 GOT_ENTRY(__init_end)
73 GOT_ENTRY(__bss_end__)
74 GOT_ENTRY(__bss_start)
75 END_GOT
76
77 /*
78 * Version string
79 */
80 .data
81 .globl version_string
82 version_string:
83 .ascii U_BOOT_VERSION
84 .ascii " (", U_BOOT_DATE, " - ", U_BOOT_TIME, ")"
85 .ascii CONFIG_IDENT_STRING, "\0"
86
87 /*
88 * Exception vectors
89 */
90 .text
91 . = EXC_OFF_SYS_RESET
92 .globl _start
93 _start:
94 mfmsr r5 /* save msr contents */
95
96 /* Move CSBoot and adjust instruction pointer */
97 /*--------------------------------------------------------------*/
98
99 #if defined(CONFIG_SYS_LOWBOOT)
100 # if defined(CONFIG_SYS_RAMBOOT)
101 # error CONFIG_SYS_LOWBOOT is incompatible with CONFIG_SYS_RAMBOOT
102 # endif /* CONFIG_SYS_RAMBOOT */
103 lis r4, CONFIG_SYS_DEFAULT_MBAR@h
104 lis r3, START_REG(CONFIG_SYS_BOOTCS_START)@h
105 ori r3, r3, START_REG(CONFIG_SYS_BOOTCS_START)@l
106 stw r3, 0x4(r4) /* CS0 start */
107 lis r3, STOP_REG(CONFIG_SYS_BOOTCS_START, CONFIG_SYS_BOOTCS_SIZE)@h
108 ori r3, r3, STOP_REG(CONFIG_SYS_BOOTCS_START, CONFIG_SYS_BOOTCS_SIZE)@l
109 stw r3, 0x8(r4) /* CS0 stop */
110 lis r3, 0x02010000@h
111 ori r3, r3, 0x02010000@l
112 stw r3, 0x54(r4) /* CS0 and Boot enable */
113
114 lis r3, lowboot_reentry@h /* jump from bootlow address space (0x0000xxxx) */
115 ori r3, r3, lowboot_reentry@l /* to the address space the linker used */
116 mtlr r3
117 blr
118
119 lowboot_reentry:
120 lis r3, START_REG(CONFIG_SYS_BOOTCS_START)@h
121 ori r3, r3, START_REG(CONFIG_SYS_BOOTCS_START)@l
122 stw r3, 0x4c(r4) /* Boot start */
123 lis r3, STOP_REG(CONFIG_SYS_BOOTCS_START, CONFIG_SYS_BOOTCS_SIZE)@h
124 ori r3, r3, STOP_REG(CONFIG_SYS_BOOTCS_START, CONFIG_SYS_BOOTCS_SIZE)@l
125 stw r3, 0x50(r4) /* Boot stop */
126 lis r3, 0x02000001@h
127 ori r3, r3, 0x02000001@l
128 stw r3, 0x54(r4) /* Boot enable, CS0 disable */
129 #endif /* CONFIG_SYS_LOWBOOT */
130
131 #if defined(CONFIG_SYS_DEFAULT_MBAR) && !defined(CONFIG_SYS_RAMBOOT)
132 lis r3, CONFIG_SYS_MBAR@h
133 ori r3, r3, CONFIG_SYS_MBAR@l
134 /* MBAR is mirrored into the MBAR SPR */
135 mtspr MBAR,r3
136 rlwinm r3, r3, 16, 16, 31
137 lis r4, CONFIG_SYS_DEFAULT_MBAR@h
138 stw r3, 0(r4)
139 #endif /* CONFIG_SYS_DEFAULT_MBAR */
140
141 /* Initialise the MPC5xxx processor core */
142 /*--------------------------------------------------------------*/
143
144 bl init_5xxx_core
145
146 /* initialize some things that are hard to access from C */
147 /*--------------------------------------------------------------*/
148
149 /* set up stack in on-chip SRAM */
150 lis r3, CONFIG_SYS_INIT_RAM_ADDR@h
151 ori r3, r3, CONFIG_SYS_INIT_RAM_ADDR@l
152 ori r1, r3, CONFIG_SYS_INIT_SP_OFFSET
153 li r0, 0 /* Make room for stack frame header and */
154 stwu r0, -4(r1) /* clear final stack frame so that */
155 stwu r0, -4(r1) /* stack backtraces terminate cleanly */
156
157 /* let the C-code set up the rest */
158 /* */
159 /* Be careful to keep code relocatable ! */
160 /*--------------------------------------------------------------*/
161
162 GET_GOT /* initialize GOT access */
163 #if defined(__pic__) && __pic__ == 1
164 /* Needed for upcoming -msingle-pic-base */
165 bl _GLOBAL_OFFSET_TABLE_@local-4
166 mflr r30
167 #endif
168 /* r3: IMMR */
169 bl cpu_init_f /* run low-level CPU init code (in Flash)*/
170
171 bl board_init_f /* run 1st part of board init code (in Flash)*/
172
173 /* NOTREACHED - board_init_f() does not return */
174
175 /*
176 * Vector Table
177 */
178
179 .globl _start_of_vectors
180 _start_of_vectors:
181
182 /* Machine check */
183 STD_EXCEPTION(0x200, MachineCheck, MachineCheckException)
184
185 /* Data Storage exception. */
186 STD_EXCEPTION(0x300, DataStorage, UnknownException)
187
188 /* Instruction Storage exception. */
189 STD_EXCEPTION(0x400, InstStorage, UnknownException)
190
191 /* External Interrupt exception. */
192 STD_EXCEPTION(0x500, ExtInterrupt, external_interrupt)
193
194 /* Alignment exception. */
195 . = 0x600
196 Alignment:
197 EXCEPTION_PROLOG(SRR0, SRR1)
198 mfspr r4,DAR
199 stw r4,_DAR(r21)
200 mfspr r5,DSISR
201 stw r5,_DSISR(r21)
202 addi r3,r1,STACK_FRAME_OVERHEAD
203 EXC_XFER_TEMPLATE(Alignment, AlignmentException, MSR_KERNEL, COPY_EE)
204
205 /* Program check exception */
206 . = 0x700
207 ProgramCheck:
208 EXCEPTION_PROLOG(SRR0, SRR1)
209 addi r3,r1,STACK_FRAME_OVERHEAD
210 EXC_XFER_TEMPLATE(ProgramCheck, ProgramCheckException,
211 MSR_KERNEL, COPY_EE)
212
213 STD_EXCEPTION(0x800, FPUnavailable, UnknownException)
214
215 /* I guess we could implement decrementer, and may have
216 * to someday for timekeeping.
217 */
218 STD_EXCEPTION(0x900, Decrementer, timer_interrupt)
219
220 STD_EXCEPTION(0xa00, Trap_0a, UnknownException)
221 STD_EXCEPTION(0xb00, Trap_0b, UnknownException)
222 STD_EXCEPTION(0xc00, SystemCall, UnknownException)
223 STD_EXCEPTION(0xd00, SingleStep, UnknownException)
224
225 STD_EXCEPTION(0xe00, Trap_0e, UnknownException)
226 STD_EXCEPTION(0xf00, Trap_0f, UnknownException)
227
228 STD_EXCEPTION(0x1000, InstructionTLBMiss, UnknownException)
229 STD_EXCEPTION(0x1100, DataLoadTLBMiss, UnknownException)
230 STD_EXCEPTION(0x1200, DataStoreTLBMiss, UnknownException)
231 #ifdef DEBUG
232 . = 0x1300
233 /*
234 * This exception occurs when the program counter matches the
235 * Instruction Address Breakpoint Register (IABR).
236 *
237 * I want the cpu to halt if this occurs so I can hunt around
238 * with the debugger and look at things.
239 *
240 * When DEBUG is defined, both machine check enable (in the MSR)
241 * and checkstop reset enable (in the reset mode register) are
242 * turned off and so a checkstop condition will result in the cpu
243 * halting.
244 *
245 * I force the cpu into a checkstop condition by putting an illegal
246 * instruction here (at least this is the theory).
247 *
248 * well - that didnt work, so just do an infinite loop!
249 */
250 1: b 1b
251 #else
252 STD_EXCEPTION(0x1300, InstructionBreakpoint, DebugException)
253 #endif
254 STD_EXCEPTION(0x1400, SMI, UnknownException)
255
256 STD_EXCEPTION(0x1500, Trap_15, UnknownException)
257 STD_EXCEPTION(0x1600, Trap_16, UnknownException)
258 STD_EXCEPTION(0x1700, Trap_17, UnknownException)
259 STD_EXCEPTION(0x1800, Trap_18, UnknownException)
260 STD_EXCEPTION(0x1900, Trap_19, UnknownException)
261 STD_EXCEPTION(0x1a00, Trap_1a, UnknownException)
262 STD_EXCEPTION(0x1b00, Trap_1b, UnknownException)
263 STD_EXCEPTION(0x1c00, Trap_1c, UnknownException)
264 STD_EXCEPTION(0x1d00, Trap_1d, UnknownException)
265 STD_EXCEPTION(0x1e00, Trap_1e, UnknownException)
266 STD_EXCEPTION(0x1f00, Trap_1f, UnknownException)
267 STD_EXCEPTION(0x2000, Trap_20, UnknownException)
268 STD_EXCEPTION(0x2100, Trap_21, UnknownException)
269 STD_EXCEPTION(0x2200, Trap_22, UnknownException)
270 STD_EXCEPTION(0x2300, Trap_23, UnknownException)
271 STD_EXCEPTION(0x2400, Trap_24, UnknownException)
272 STD_EXCEPTION(0x2500, Trap_25, UnknownException)
273 STD_EXCEPTION(0x2600, Trap_26, UnknownException)
274 STD_EXCEPTION(0x2700, Trap_27, UnknownException)
275 STD_EXCEPTION(0x2800, Trap_28, UnknownException)
276 STD_EXCEPTION(0x2900, Trap_29, UnknownException)
277 STD_EXCEPTION(0x2a00, Trap_2a, UnknownException)
278 STD_EXCEPTION(0x2b00, Trap_2b, UnknownException)
279 STD_EXCEPTION(0x2c00, Trap_2c, UnknownException)
280 STD_EXCEPTION(0x2d00, Trap_2d, UnknownException)
281 STD_EXCEPTION(0x2e00, Trap_2e, UnknownException)
282 STD_EXCEPTION(0x2f00, Trap_2f, UnknownException)
283
284
285 .globl _end_of_vectors
286 _end_of_vectors:
287
288 . = 0x3000
289
290 /*
291 * This code finishes saving the registers to the exception frame
292 * and jumps to the appropriate handler for the exception.
293 * Register r21 is pointer into trap frame, r1 has new stack pointer.
294 */
295 .globl transfer_to_handler
296 transfer_to_handler:
297 stw r22,_NIP(r21)
298 lis r22,MSR_POW@h
299 andc r23,r23,r22
300 stw r23,_MSR(r21)
301 SAVE_GPR(7, r21)
302 SAVE_4GPRS(8, r21)
303 SAVE_8GPRS(12, r21)
304 SAVE_8GPRS(24, r21)
305 mflr r23
306 andi. r24,r23,0x3f00 /* get vector offset */
307 stw r24,TRAP(r21)
308 li r22,0
309 stw r22,RESULT(r21)
310 lwz r24,0(r23) /* virtual address of handler */
311 lwz r23,4(r23) /* where to go when done */
312 mtspr SRR0,r24
313 mtspr SRR1,r20
314 mtlr r23
315 SYNC
316 rfi /* jump to handler, enable MMU */
317
318 int_return:
319 mfmsr r28 /* Disable interrupts */
320 li r4,0
321 ori r4,r4,MSR_EE
322 andc r28,r28,r4
323 SYNC /* Some chip revs need this... */
324 mtmsr r28
325 SYNC
326 lwz r2,_CTR(r1)
327 lwz r0,_LINK(r1)
328 mtctr r2
329 mtlr r0
330 lwz r2,_XER(r1)
331 lwz r0,_CCR(r1)
332 mtspr XER,r2
333 mtcrf 0xFF,r0
334 REST_10GPRS(3, r1)
335 REST_10GPRS(13, r1)
336 REST_8GPRS(23, r1)
337 REST_GPR(31, r1)
338 lwz r2,_NIP(r1) /* Restore environment */
339 lwz r0,_MSR(r1)
340 mtspr SRR0,r2
341 mtspr SRR1,r0
342 lwz r0,GPR0(r1)
343 lwz r2,GPR2(r1)
344 lwz r1,GPR1(r1)
345 SYNC
346 rfi
347
348 /*
349 * This code initialises the MPC5xxx processor core
350 * (conforms to PowerPC 603e spec)
351 * Note: expects original MSR contents to be in r5.
352 */
353
354 .globl init_5xx_core
355 init_5xxx_core:
356
357 /* Initialize machine status; enable machine check interrupt */
358 /*--------------------------------------------------------------*/
359
360 li r3, MSR_KERNEL /* Set ME and RI flags */
361 rlwimi r3, r5, 0, 25, 25 /* preserve IP bit set by HRCW */
362 #ifdef DEBUG
363 rlwimi r3, r5, 0, 21, 22 /* debugger might set SE & BE bits */
364 #endif
365 SYNC /* Some chip revs need this... */
366 mtmsr r3
367 SYNC
368 mtspr SRR1, r3 /* Make SRR1 match MSR */
369
370 /* Initialize the Hardware Implementation-dependent Registers */
371 /* HID0 also contains cache control */
372 /*--------------------------------------------------------------*/
373
374 lis r3, CONFIG_SYS_HID0_INIT@h
375 ori r3, r3, CONFIG_SYS_HID0_INIT@l
376 SYNC
377 mtspr HID0, r3
378
379 lis r3, CONFIG_SYS_HID0_FINAL@h
380 ori r3, r3, CONFIG_SYS_HID0_FINAL@l
381 SYNC
382 mtspr HID0, r3
383
384 /* clear all BAT's */
385 /*--------------------------------------------------------------*/
386
387 li r0, 0
388 mtspr DBAT0U, r0
389 mtspr DBAT0L, r0
390 mtspr DBAT1U, r0
391 mtspr DBAT1L, r0
392 mtspr DBAT2U, r0
393 mtspr DBAT2L, r0
394 mtspr DBAT3U, r0
395 mtspr DBAT3L, r0
396 mtspr DBAT4U, r0
397 mtspr DBAT4L, r0
398 mtspr DBAT5U, r0
399 mtspr DBAT5L, r0
400 mtspr DBAT6U, r0
401 mtspr DBAT6L, r0
402 mtspr DBAT7U, r0
403 mtspr DBAT7L, r0
404 mtspr IBAT0U, r0
405 mtspr IBAT0L, r0
406 mtspr IBAT1U, r0
407 mtspr IBAT1L, r0
408 mtspr IBAT2U, r0
409 mtspr IBAT2L, r0
410 mtspr IBAT3U, r0
411 mtspr IBAT3L, r0
412 mtspr IBAT4U, r0
413 mtspr IBAT4L, r0
414 mtspr IBAT5U, r0
415 mtspr IBAT5L, r0
416 mtspr IBAT6U, r0
417 mtspr IBAT6L, r0
418 mtspr IBAT7U, r0
419 mtspr IBAT7L, r0
420 SYNC
421
422 /* invalidate all tlb's */
423 /* */
424 /* From the 603e User Manual: "The 603e provides the ability to */
425 /* invalidate a TLB entry. The TLB Invalidate Entry (tlbie) */
426 /* instruction invalidates the TLB entry indexed by the EA, and */
427 /* operates on both the instruction and data TLBs simultaneously*/
428 /* invalidating four TLB entries (both sets in each TLB). The */
429 /* index corresponds to bits 15-19 of the EA. To invalidate all */
430 /* entries within both TLBs, 32 tlbie instructions should be */
431 /* issued, incrementing this field by one each time." */
432 /* */
433 /* "Note that the tlbia instruction is not implemented on the */
434 /* 603e." */
435 /* */
436 /* bits 15-19 correspond to addresses 0x00000000 to 0x0001F000 */
437 /* incrementing by 0x1000 each time. The code below is sort of */
438 /* based on code in "flush_tlbs" from arch/powerpc/kernel/head.S */
439 /* */
440 /*--------------------------------------------------------------*/
441
442 li r3, 32
443 mtctr r3
444 li r3, 0
445 1: tlbie r3
446 addi r3, r3, 0x1000
447 bdnz 1b
448 SYNC
449
450 /* Done! */
451 /*--------------------------------------------------------------*/
452
453 blr
454
455 /* Cache functions.
456 *
457 * Note: requires that all cache bits in
458 * HID0 are in the low half word.
459 */
460 .globl icache_enable
461 icache_enable:
462 mfspr r3, HID0
463 ori r3, r3, HID0_ICE
464 lis r4, 0
465 ori r4, r4, HID0_ILOCK
466 andc r3, r3, r4
467 ori r4, r3, HID0_ICFI
468 isync
469 mtspr HID0, r4 /* sets enable and invalidate, clears lock */
470 isync
471 mtspr HID0, r3 /* clears invalidate */
472 blr
473
474 .globl icache_disable
475 icache_disable:
476 mfspr r3, HID0
477 lis r4, 0
478 ori r4, r4, HID0_ICE|HID0_ILOCK
479 andc r3, r3, r4
480 ori r4, r3, HID0_ICFI
481 isync
482 mtspr HID0, r4 /* sets invalidate, clears enable and lock */
483 isync
484 mtspr HID0, r3 /* clears invalidate */
485 blr
486
487 .globl icache_status
488 icache_status:
489 mfspr r3, HID0
490 rlwinm r3, r3, HID0_ICE_BITPOS + 1, 31, 31
491 blr
492
493 .globl dcache_enable
494 dcache_enable:
495 mfspr r3, HID0
496 ori r3, r3, HID0_DCE
497 lis r4, 0
498 ori r4, r4, HID0_DLOCK
499 andc r3, r3, r4
500 ori r4, r3, HID0_DCI
501 sync
502 mtspr HID0, r4 /* sets enable and invalidate, clears lock */
503 sync
504 mtspr HID0, r3 /* clears invalidate */
505 blr
506
507 .globl dcache_disable
508 dcache_disable:
509 mfspr r3, HID0
510 lis r4, 0
511 ori r4, r4, HID0_DCE|HID0_DLOCK
512 andc r3, r3, r4
513 ori r4, r3, HID0_DCI
514 sync
515 mtspr HID0, r4 /* sets invalidate, clears enable and lock */
516 sync
517 mtspr HID0, r3 /* clears invalidate */
518 blr
519
520 .globl dcache_status
521 dcache_status:
522 mfspr r3, HID0
523 rlwinm r3, r3, HID0_DCE_BITPOS + 1, 31, 31
524 blr
525
526 .globl get_svr
527 get_svr:
528 mfspr r3, SVR
529 blr
530
531 .globl get_pvr
532 get_pvr:
533 mfspr r3, PVR
534 blr
535
536 /*------------------------------------------------------------------------------*/
537
538 /*
539 * void relocate_code (addr_sp, gd, addr_moni)
540 *
541 * This "function" does not return, instead it continues in RAM
542 * after relocating the monitor code.
543 *
544 * r3 = dest
545 * r4 = src
546 * r5 = length in bytes
547 * r6 = cachelinesize
548 */
549 .globl relocate_code
550 relocate_code:
551 mr r1, r3 /* Set new stack pointer */
552 mr r9, r4 /* Save copy of Global Data pointer */
553 mr r10, r5 /* Save copy of Destination Address */
554
555 GET_GOT
556 #if defined(__pic__) && __pic__ == 1
557 /* Needed for upcoming -msingle-pic-base */
558 bl _GLOBAL_OFFSET_TABLE_@local-4
559 mflr r30
560 #endif
561 mr r3, r5 /* Destination Address */
562 lis r4, CONFIG_SYS_MONITOR_BASE@h /* Source Address */
563 ori r4, r4, CONFIG_SYS_MONITOR_BASE@l
564 lwz r5, GOT(__init_end)
565 sub r5, r5, r4
566 li r6, CONFIG_SYS_CACHELINE_SIZE /* Cache Line Size */
567
568 /*
569 * Fix GOT pointer:
570 *
571 * New GOT-PTR = (old GOT-PTR - CONFIG_SYS_MONITOR_BASE) + Destination Address
572 *
573 * Offset:
574 */
575 sub r15, r10, r4
576
577 /* First our own GOT */
578 add r12, r12, r15
579 /* then the one used by the C code */
580 add r30, r30, r15
581
582 /*
583 * Now relocate code
584 */
585
586 cmplw cr1,r3,r4
587 addi r0,r5,3
588 srwi. r0,r0,2
589 beq cr1,4f /* In place copy is not necessary */
590 beq 7f /* Protect against 0 count */
591 mtctr r0
592 bge cr1,2f
593
594 la r8,-4(r4)
595 la r7,-4(r3)
596 1: lwzu r0,4(r8)
597 stwu r0,4(r7)
598 bdnz 1b
599 b 4f
600
601 2: slwi r0,r0,2
602 add r8,r4,r0
603 add r7,r3,r0
604 3: lwzu r0,-4(r8)
605 stwu r0,-4(r7)
606 bdnz 3b
607
608 /*
609 * Now flush the cache: note that we must start from a cache aligned
610 * address. Otherwise we might miss one cache line.
611 */
612 4: cmpwi r6,0
613 add r5,r3,r5
614 beq 7f /* Always flush prefetch queue in any case */
615 subi r0,r6,1
616 andc r3,r3,r0
617 mfspr r7,HID0 /* don't do dcbst if dcache is disabled */
618 rlwinm r7,r7,HID0_DCE_BITPOS+1,31,31
619 cmpwi r7,0
620 beq 9f
621 mr r4,r3
622 5: dcbst 0,r4
623 add r4,r4,r6
624 cmplw r4,r5
625 blt 5b
626 sync /* Wait for all dcbst to complete on bus */
627 9: mfspr r7,HID0 /* don't do icbi if icache is disabled */
628 rlwinm r7,r7,HID0_ICE_BITPOS+1,31,31
629 cmpwi r7,0
630 beq 7f
631 mr r4,r3
632 6: icbi 0,r4
633 add r4,r4,r6
634 cmplw r4,r5
635 blt 6b
636 7: sync /* Wait for all icbi to complete on bus */
637 isync
638
639 /*
640 * We are done. Do not return, instead branch to second part of board
641 * initialization, now running from RAM.
642 */
643
644 addi r0, r10, in_ram - _start + EXC_OFF_SYS_RESET
645 mtlr r0
646 blr
647
648 in_ram:
649
650 /*
651 * Relocation Function, r12 point to got2+0x8000
652 *
653 * Adjust got2 pointers, no need to check for 0, this code
654 * already puts a few entries in the table.
655 */
656 li r0,__got2_entries@sectoff@l
657 la r3,GOT(_GOT2_TABLE_)
658 lwz r11,GOT(_GOT2_TABLE_)
659 mtctr r0
660 sub r11,r3,r11
661 addi r3,r3,-4
662 1: lwzu r0,4(r3)
663 cmpwi r0,0
664 beq- 2f
665 add r0,r0,r11
666 stw r0,0(r3)
667 2: bdnz 1b
668
669 /*
670 * Now adjust the fixups and the pointers to the fixups
671 * in case we need to move ourselves again.
672 */
673 li r0,__fixup_entries@sectoff@l
674 lwz r3,GOT(_FIXUP_TABLE_)
675 cmpwi r0,0
676 mtctr r0
677 addi r3,r3,-4
678 beq 4f
679 3: lwzu r4,4(r3)
680 lwzux r0,r4,r11
681 cmpwi r0,0
682 add r0,r0,r11
683 stw r4,0(r3)
684 beq- 5f
685 stw r0,0(r4)
686 5: bdnz 3b
687 4:
688 clear_bss:
689 /*
690 * Now clear BSS segment
691 */
692 lwz r3,GOT(__bss_start)
693 lwz r4,GOT(__bss_end__)
694
695 cmplw 0, r3, r4
696 beq 6f
697
698 li r0, 0
699 5:
700 stw r0, 0(r3)
701 addi r3, r3, 4
702 cmplw 0, r3, r4
703 bne 5b
704 6:
705
706 mr r3, r9 /* Global Data pointer */
707 mr r4, r10 /* Destination Address */
708 bl board_init_r
709
710 /*
711 * Copy exception vector code to low memory
712 *
713 * r3: dest_addr
714 * r7: source address, r8: end address, r9: target address
715 */
716 .globl trap_init
717 trap_init:
718 mflr r4 /* save link register */
719 GET_GOT
720 lwz r7, GOT(_start)
721 lwz r8, GOT(_end_of_vectors)
722
723 li r9, 0x100 /* reset vector always at 0x100 */
724
725 cmplw 0, r7, r8
726 bgelr /* return if r7>=r8 - just in case */
727 1:
728 lwz r0, 0(r7)
729 stw r0, 0(r9)
730 addi r7, r7, 4
731 addi r9, r9, 4
732 cmplw 0, r7, r8
733 bne 1b
734
735 /*
736 * relocate `hdlr' and `int_return' entries
737 */
738 li r7, .L_MachineCheck - _start + EXC_OFF_SYS_RESET
739 li r8, Alignment - _start + EXC_OFF_SYS_RESET
740 2:
741 bl trap_reloc
742 addi r7, r7, 0x100 /* next exception vector */
743 cmplw 0, r7, r8
744 blt 2b
745
746 li r7, .L_Alignment - _start + EXC_OFF_SYS_RESET
747 bl trap_reloc
748
749 li r7, .L_ProgramCheck - _start + EXC_OFF_SYS_RESET
750 bl trap_reloc
751
752 li r7, .L_FPUnavailable - _start + EXC_OFF_SYS_RESET
753 li r8, SystemCall - _start + EXC_OFF_SYS_RESET
754 3:
755 bl trap_reloc
756 addi r7, r7, 0x100 /* next exception vector */
757 cmplw 0, r7, r8
758 blt 3b
759
760 li r7, .L_SingleStep - _start + EXC_OFF_SYS_RESET
761 li r8, _end_of_vectors - _start + EXC_OFF_SYS_RESET
762 4:
763 bl trap_reloc
764 addi r7, r7, 0x100 /* next exception vector */
765 cmplw 0, r7, r8
766 blt 4b
767
768 mfmsr r3 /* now that the vectors have */
769 lis r7, MSR_IP@h /* relocated into low memory */
770 ori r7, r7, MSR_IP@l /* MSR[IP] can be turned off */
771 andc r3, r3, r7 /* (if it was on) */
772 SYNC /* Some chip revs need this... */
773 mtmsr r3
774 SYNC
775
776 mtlr r4 /* restore link register */
777 blr