]>
Commit | Line | Data |
---|---|---|
945af8d7 WD |
1 | /* |
2 | * Copyright (C) 1998 Dan Malek <dmalek@jlc.net> | |
3 | * Copyright (C) 1999 Magnus Damm <kieraypc01.p.y.kie.era.ericsson.se> | |
4 | * Copyright (C) 2000 - 2003 Wolfgang Denk <wd@denx.de> | |
5 | * | |
6 | * See file CREDITS for list of people who contributed to this | |
7 | * project. | |
8 | * | |
9 | * This program is free software; you can redistribute it and/or | |
10 | * modify it under the terms of the GNU General Public License as | |
11 | * published by the Free Software Foundation; either version 2 of | |
12 | * the License, or (at your option) any later version. | |
13 | * | |
14 | * This program is distributed in the hope that it will be useful, | |
15 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
17 | * GNU General Public License for more details. | |
18 | * | |
19 | * You should have received a copy of the GNU General Public License | |
20 | * along with this program; if not, write to the Free Software | |
21 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, | |
22 | * MA 02111-1307 USA | |
23 | */ | |
24 | ||
25 | /* | |
26 | * U-Boot - Startup Code for MPC5xxx CPUs | |
27 | */ | |
28 | #include <config.h> | |
29 | #include <mpc5xxx.h> | |
30 | #include <version.h> | |
31 | ||
cbd8a35c | 32 | #define CONFIG_MPC5xxx 1 /* needed for Linux kernel header files */ |
945af8d7 WD |
33 | #define _LINUX_CONFIG_H 1 /* avoid reading Linux autoconf.h file */ |
34 | ||
35 | #include <ppc_asm.tmpl> | |
36 | #include <ppc_defs.h> | |
37 | ||
38 | #include <asm/cache.h> | |
39 | #include <asm/mmu.h> | |
40 | ||
41 | #ifndef CONFIG_IDENT_STRING | |
42 | #define CONFIG_IDENT_STRING "" | |
43 | #endif | |
44 | ||
45 | /* We don't want the MMU yet. | |
46 | */ | |
47 | #undef MSR_KERNEL | |
48 | /* Floating Point enable, Machine Check and Recoverable Interr. */ | |
49 | #ifdef DEBUG | |
50 | #define MSR_KERNEL (MSR_FP|MSR_RI) | |
51 | #else | |
52 | #define MSR_KERNEL (MSR_FP|MSR_ME|MSR_RI) | |
53 | #endif | |
54 | ||
55 | /* | |
56 | * Set up GOT: Global Offset Table | |
57 | * | |
58 | * Use r14 to access the GOT | |
59 | */ | |
60 | START_GOT | |
61 | GOT_ENTRY(_GOT2_TABLE_) | |
62 | GOT_ENTRY(_FIXUP_TABLE_) | |
63 | ||
64 | GOT_ENTRY(_start) | |
65 | GOT_ENTRY(_start_of_vectors) | |
66 | GOT_ENTRY(_end_of_vectors) | |
67 | GOT_ENTRY(transfer_to_handler) | |
68 | ||
69 | GOT_ENTRY(__init_end) | |
70 | GOT_ENTRY(_end) | |
71 | GOT_ENTRY(__bss_start) | |
72 | END_GOT | |
73 | ||
74 | /* | |
75 | * Version string | |
76 | */ | |
77 | .data | |
78 | .globl version_string | |
79 | version_string: | |
80 | .ascii U_BOOT_VERSION | |
81 | .ascii " (", __DATE__, " - ", __TIME__, ")" | |
82 | .ascii CONFIG_IDENT_STRING, "\0" | |
83 | ||
84 | /* | |
85 | * Exception vectors | |
86 | */ | |
87 | .text | |
88 | . = EXC_OFF_SYS_RESET | |
89 | .globl _start | |
90 | _start: | |
91 | li r21, BOOTFLAG_COLD /* Normal Power-On */ | |
92 | nop | |
93 | b boot_cold | |
94 | ||
95 | . = EXC_OFF_SYS_RESET + 0x10 | |
96 | ||
97 | .globl _start_warm | |
98 | _start_warm: | |
99 | li r21, BOOTFLAG_WARM /* Software reboot */ | |
100 | b boot_warm | |
101 | ||
102 | boot_cold: | |
103 | boot_warm: | |
104 | mfmsr r5 /* save msr contents */ | |
105 | ||
e35745bb WD |
106 | /* Move CSBoot and adjust instruction pointer */ |
107 | /*--------------------------------------------------------------*/ | |
108 | ||
6d0f6bcf JCPV |
109 | #if defined(CONFIG_SYS_LOWBOOT) |
110 | # if defined(CONFIG_SYS_RAMBOOT) | |
111 | # error CONFIG_SYS_LOWBOOT is incompatible with CONFIG_SYS_RAMBOOT | |
112 | # endif /* CONFIG_SYS_RAMBOOT */ | |
07cba351 | 113 | # if defined(CONFIG_MGT5100) |
6d0f6bcf | 114 | # error CONFIG_SYS_LOWBOOT is incompatible with MGT5100 |
07cba351 | 115 | # endif /* CONFIG_MGT5100 */ |
6d0f6bcf JCPV |
116 | lis r4, CONFIG_SYS_DEFAULT_MBAR@h |
117 | lis r3, START_REG(CONFIG_SYS_BOOTCS_START)@h | |
118 | ori r3, r3, START_REG(CONFIG_SYS_BOOTCS_START)@l | |
79d696fc | 119 | stw r3, 0x4(r4) /* CS0 start */ |
6d0f6bcf JCPV |
120 | lis r3, STOP_REG(CONFIG_SYS_BOOTCS_START, CONFIG_SYS_BOOTCS_SIZE)@h |
121 | ori r3, r3, STOP_REG(CONFIG_SYS_BOOTCS_START, CONFIG_SYS_BOOTCS_SIZE)@l | |
79d696fc | 122 | stw r3, 0x8(r4) /* CS0 stop */ |
5cf9da48 WD |
123 | lis r3, 0x02010000@h |
124 | ori r3, r3, 0x02010000@l | |
e35745bb | 125 | stw r3, 0x54(r4) /* CS0 and Boot enable */ |
5cf9da48 | 126 | |
e35745bb WD |
127 | lis r3, lowboot_reentry@h /* jump from bootlow address space (0x0000xxxx) */ |
128 | ori r3, r3, lowboot_reentry@l /* to the address space the linker used */ | |
5cf9da48 | 129 | mtlr r3 |
e35745bb | 130 | blr |
d4ca31c4 WD |
131 | |
132 | lowboot_reentry: | |
6d0f6bcf JCPV |
133 | lis r3, START_REG(CONFIG_SYS_BOOTCS_START)@h |
134 | ori r3, r3, START_REG(CONFIG_SYS_BOOTCS_START)@l | |
79d696fc | 135 | stw r3, 0x4c(r4) /* Boot start */ |
6d0f6bcf JCPV |
136 | lis r3, STOP_REG(CONFIG_SYS_BOOTCS_START, CONFIG_SYS_BOOTCS_SIZE)@h |
137 | ori r3, r3, STOP_REG(CONFIG_SYS_BOOTCS_START, CONFIG_SYS_BOOTCS_SIZE)@l | |
79d696fc | 138 | stw r3, 0x50(r4) /* Boot stop */ |
5cf9da48 WD |
139 | lis r3, 0x02000001@h |
140 | ori r3, r3, 0x02000001@l | |
e35745bb | 141 | stw r3, 0x54(r4) /* Boot enable, CS0 disable */ |
6d0f6bcf | 142 | #endif /* CONFIG_SYS_LOWBOOT */ |
d4ca31c4 | 143 | |
6d0f6bcf JCPV |
144 | #if defined(CONFIG_SYS_DEFAULT_MBAR) && !defined(CONFIG_SYS_RAMBOOT) |
145 | lis r3, CONFIG_SYS_MBAR@h | |
146 | ori r3, r3, CONFIG_SYS_MBAR@l | |
945af8d7 | 147 | #if defined(CONFIG_MPC5200) |
3c74e32a WD |
148 | /* MBAR is mirrored into the MBAR SPR */ |
149 | mtspr MBAR,r3 | |
945af8d7 WD |
150 | rlwinm r3, r3, 16, 16, 31 |
151 | #endif | |
152 | #if defined(CONFIG_MGT5100) | |
153 | rlwinm r3, r3, 17, 15, 31 | |
154 | #endif | |
6d0f6bcf | 155 | lis r4, CONFIG_SYS_DEFAULT_MBAR@h |
945af8d7 | 156 | stw r3, 0(r4) |
6d0f6bcf | 157 | #endif /* CONFIG_SYS_DEFAULT_MBAR */ |
945af8d7 WD |
158 | |
159 | /* Initialise the MPC5xxx processor core */ | |
160 | /*--------------------------------------------------------------*/ | |
161 | ||
162 | bl init_5xxx_core | |
163 | ||
164 | /* initialize some things that are hard to access from C */ | |
165 | /*--------------------------------------------------------------*/ | |
166 | ||
167 | /* set up stack in on-chip SRAM */ | |
6d0f6bcf JCPV |
168 | lis r3, CONFIG_SYS_INIT_RAM_ADDR@h |
169 | ori r3, r3, CONFIG_SYS_INIT_RAM_ADDR@l | |
170 | ori r1, r3, CONFIG_SYS_INIT_SP_OFFSET | |
945af8d7 WD |
171 | li r0, 0 /* Make room for stack frame header and */ |
172 | stwu r0, -4(r1) /* clear final stack frame so that */ | |
173 | stwu r0, -4(r1) /* stack backtraces terminate cleanly */ | |
174 | ||
175 | /* let the C-code set up the rest */ | |
176 | /* */ | |
177 | /* Be careful to keep code relocatable ! */ | |
178 | /*--------------------------------------------------------------*/ | |
179 | ||
180 | GET_GOT /* initialize GOT access */ | |
181 | ||
182 | /* r3: IMMR */ | |
183 | bl cpu_init_f /* run low-level CPU init code (in Flash)*/ | |
184 | ||
185 | mr r3, r21 | |
186 | /* r3: BOOTFLAG */ | |
187 | bl board_init_f /* run 1st part of board init code (in Flash)*/ | |
188 | ||
189 | /* | |
190 | * Vector Table | |
191 | */ | |
192 | ||
193 | .globl _start_of_vectors | |
194 | _start_of_vectors: | |
195 | ||
196 | /* Machine check */ | |
197 | STD_EXCEPTION(0x200, MachineCheck, MachineCheckException) | |
198 | ||
199 | /* Data Storage exception. */ | |
200 | STD_EXCEPTION(0x300, DataStorage, UnknownException) | |
201 | ||
202 | /* Instruction Storage exception. */ | |
203 | STD_EXCEPTION(0x400, InstStorage, UnknownException) | |
204 | ||
205 | /* External Interrupt exception. */ | |
206 | STD_EXCEPTION(0x500, ExtInterrupt, external_interrupt) | |
207 | ||
208 | /* Alignment exception. */ | |
209 | . = 0x600 | |
210 | Alignment: | |
02032e8f | 211 | EXCEPTION_PROLOG(SRR0, SRR1) |
945af8d7 WD |
212 | mfspr r4,DAR |
213 | stw r4,_DAR(r21) | |
214 | mfspr r5,DSISR | |
215 | stw r5,_DSISR(r21) | |
216 | addi r3,r1,STACK_FRAME_OVERHEAD | |
217 | li r20,MSR_KERNEL | |
218 | rlwimi r20,r23,0,16,16 /* copy EE bit from saved MSR */ | |
219 | rlwimi r20,r23,0,25,25 /* copy IP bit from saved MSR */ | |
220 | lwz r6,GOT(transfer_to_handler) | |
221 | mtlr r6 | |
222 | blrl | |
223 | .L_Alignment: | |
224 | .long AlignmentException - _start + EXC_OFF_SYS_RESET | |
225 | .long int_return - _start + EXC_OFF_SYS_RESET | |
226 | ||
227 | /* Program check exception */ | |
228 | . = 0x700 | |
229 | ProgramCheck: | |
02032e8f | 230 | EXCEPTION_PROLOG(SRR0, SRR1) |
945af8d7 WD |
231 | addi r3,r1,STACK_FRAME_OVERHEAD |
232 | li r20,MSR_KERNEL | |
233 | rlwimi r20,r23,0,16,16 /* copy EE bit from saved MSR */ | |
234 | rlwimi r20,r23,0,25,25 /* copy IP bit from saved MSR */ | |
235 | lwz r6,GOT(transfer_to_handler) | |
236 | mtlr r6 | |
237 | blrl | |
238 | .L_ProgramCheck: | |
239 | .long ProgramCheckException - _start + EXC_OFF_SYS_RESET | |
240 | .long int_return - _start + EXC_OFF_SYS_RESET | |
241 | ||
242 | STD_EXCEPTION(0x800, FPUnavailable, UnknownException) | |
243 | ||
244 | /* I guess we could implement decrementer, and may have | |
245 | * to someday for timekeeping. | |
246 | */ | |
247 | STD_EXCEPTION(0x900, Decrementer, timer_interrupt) | |
248 | ||
249 | STD_EXCEPTION(0xa00, Trap_0a, UnknownException) | |
250 | STD_EXCEPTION(0xb00, Trap_0b, UnknownException) | |
27b207fd | 251 | STD_EXCEPTION(0xc00, SystemCall, UnknownException) |
945af8d7 WD |
252 | STD_EXCEPTION(0xd00, SingleStep, UnknownException) |
253 | ||
254 | STD_EXCEPTION(0xe00, Trap_0e, UnknownException) | |
255 | STD_EXCEPTION(0xf00, Trap_0f, UnknownException) | |
256 | ||
257 | STD_EXCEPTION(0x1000, InstructionTLBMiss, UnknownException) | |
258 | STD_EXCEPTION(0x1100, DataLoadTLBMiss, UnknownException) | |
259 | STD_EXCEPTION(0x1200, DataStoreTLBMiss, UnknownException) | |
260 | #ifdef DEBUG | |
261 | . = 0x1300 | |
262 | /* | |
263 | * This exception occurs when the program counter matches the | |
264 | * Instruction Address Breakpoint Register (IABR). | |
265 | * | |
266 | * I want the cpu to halt if this occurs so I can hunt around | |
267 | * with the debugger and look at things. | |
268 | * | |
269 | * When DEBUG is defined, both machine check enable (in the MSR) | |
270 | * and checkstop reset enable (in the reset mode register) are | |
271 | * turned off and so a checkstop condition will result in the cpu | |
272 | * halting. | |
273 | * | |
274 | * I force the cpu into a checkstop condition by putting an illegal | |
275 | * instruction here (at least this is the theory). | |
276 | * | |
277 | * well - that didnt work, so just do an infinite loop! | |
278 | */ | |
279 | 1: b 1b | |
280 | #else | |
281 | STD_EXCEPTION(0x1300, InstructionBreakpoint, DebugException) | |
282 | #endif | |
283 | STD_EXCEPTION(0x1400, SMI, UnknownException) | |
284 | ||
285 | STD_EXCEPTION(0x1500, Trap_15, UnknownException) | |
286 | STD_EXCEPTION(0x1600, Trap_16, UnknownException) | |
287 | STD_EXCEPTION(0x1700, Trap_17, UnknownException) | |
288 | STD_EXCEPTION(0x1800, Trap_18, UnknownException) | |
289 | STD_EXCEPTION(0x1900, Trap_19, UnknownException) | |
290 | STD_EXCEPTION(0x1a00, Trap_1a, UnknownException) | |
291 | STD_EXCEPTION(0x1b00, Trap_1b, UnknownException) | |
292 | STD_EXCEPTION(0x1c00, Trap_1c, UnknownException) | |
293 | STD_EXCEPTION(0x1d00, Trap_1d, UnknownException) | |
294 | STD_EXCEPTION(0x1e00, Trap_1e, UnknownException) | |
295 | STD_EXCEPTION(0x1f00, Trap_1f, UnknownException) | |
296 | STD_EXCEPTION(0x2000, Trap_20, UnknownException) | |
297 | STD_EXCEPTION(0x2100, Trap_21, UnknownException) | |
298 | STD_EXCEPTION(0x2200, Trap_22, UnknownException) | |
299 | STD_EXCEPTION(0x2300, Trap_23, UnknownException) | |
300 | STD_EXCEPTION(0x2400, Trap_24, UnknownException) | |
301 | STD_EXCEPTION(0x2500, Trap_25, UnknownException) | |
302 | STD_EXCEPTION(0x2600, Trap_26, UnknownException) | |
303 | STD_EXCEPTION(0x2700, Trap_27, UnknownException) | |
304 | STD_EXCEPTION(0x2800, Trap_28, UnknownException) | |
305 | STD_EXCEPTION(0x2900, Trap_29, UnknownException) | |
306 | STD_EXCEPTION(0x2a00, Trap_2a, UnknownException) | |
307 | STD_EXCEPTION(0x2b00, Trap_2b, UnknownException) | |
308 | STD_EXCEPTION(0x2c00, Trap_2c, UnknownException) | |
309 | STD_EXCEPTION(0x2d00, Trap_2d, UnknownException) | |
310 | STD_EXCEPTION(0x2e00, Trap_2e, UnknownException) | |
311 | STD_EXCEPTION(0x2f00, Trap_2f, UnknownException) | |
312 | ||
313 | ||
314 | .globl _end_of_vectors | |
315 | _end_of_vectors: | |
316 | ||
317 | . = 0x3000 | |
318 | ||
319 | /* | |
320 | * This code finishes saving the registers to the exception frame | |
321 | * and jumps to the appropriate handler for the exception. | |
322 | * Register r21 is pointer into trap frame, r1 has new stack pointer. | |
323 | */ | |
324 | .globl transfer_to_handler | |
325 | transfer_to_handler: | |
326 | stw r22,_NIP(r21) | |
327 | lis r22,MSR_POW@h | |
328 | andc r23,r23,r22 | |
329 | stw r23,_MSR(r21) | |
330 | SAVE_GPR(7, r21) | |
331 | SAVE_4GPRS(8, r21) | |
332 | SAVE_8GPRS(12, r21) | |
333 | SAVE_8GPRS(24, r21) | |
334 | mflr r23 | |
335 | andi. r24,r23,0x3f00 /* get vector offset */ | |
336 | stw r24,TRAP(r21) | |
337 | li r22,0 | |
338 | stw r22,RESULT(r21) | |
339 | lwz r24,0(r23) /* virtual address of handler */ | |
340 | lwz r23,4(r23) /* where to go when done */ | |
341 | mtspr SRR0,r24 | |
342 | mtspr SRR1,r20 | |
343 | mtlr r23 | |
344 | SYNC | |
345 | rfi /* jump to handler, enable MMU */ | |
346 | ||
347 | int_return: | |
348 | mfmsr r28 /* Disable interrupts */ | |
349 | li r4,0 | |
350 | ori r4,r4,MSR_EE | |
351 | andc r28,r28,r4 | |
352 | SYNC /* Some chip revs need this... */ | |
353 | mtmsr r28 | |
354 | SYNC | |
355 | lwz r2,_CTR(r1) | |
356 | lwz r0,_LINK(r1) | |
357 | mtctr r2 | |
358 | mtlr r0 | |
359 | lwz r2,_XER(r1) | |
360 | lwz r0,_CCR(r1) | |
361 | mtspr XER,r2 | |
362 | mtcrf 0xFF,r0 | |
363 | REST_10GPRS(3, r1) | |
364 | REST_10GPRS(13, r1) | |
365 | REST_8GPRS(23, r1) | |
366 | REST_GPR(31, r1) | |
367 | lwz r2,_NIP(r1) /* Restore environment */ | |
368 | lwz r0,_MSR(r1) | |
369 | mtspr SRR0,r2 | |
370 | mtspr SRR1,r0 | |
371 | lwz r0,GPR0(r1) | |
372 | lwz r2,GPR2(r1) | |
373 | lwz r1,GPR1(r1) | |
374 | SYNC | |
375 | rfi | |
376 | ||
377 | /* | |
378 | * This code initialises the MPC5xxx processor core | |
379 | * (conforms to PowerPC 603e spec) | |
380 | * Note: expects original MSR contents to be in r5. | |
381 | */ | |
382 | ||
383 | .globl init_5xx_core | |
384 | init_5xxx_core: | |
385 | ||
386 | /* Initialize machine status; enable machine check interrupt */ | |
387 | /*--------------------------------------------------------------*/ | |
388 | ||
389 | li r3, MSR_KERNEL /* Set ME and RI flags */ | |
390 | rlwimi r3, r5, 0, 25, 25 /* preserve IP bit set by HRCW */ | |
391 | #ifdef DEBUG | |
392 | rlwimi r3, r5, 0, 21, 22 /* debugger might set SE & BE bits */ | |
393 | #endif | |
394 | SYNC /* Some chip revs need this... */ | |
395 | mtmsr r3 | |
396 | SYNC | |
397 | mtspr SRR1, r3 /* Make SRR1 match MSR */ | |
398 | ||
399 | /* Initialize the Hardware Implementation-dependent Registers */ | |
400 | /* HID0 also contains cache control */ | |
401 | /*--------------------------------------------------------------*/ | |
402 | ||
6d0f6bcf JCPV |
403 | lis r3, CONFIG_SYS_HID0_INIT@h |
404 | ori r3, r3, CONFIG_SYS_HID0_INIT@l | |
945af8d7 WD |
405 | SYNC |
406 | mtspr HID0, r3 | |
407 | ||
6d0f6bcf JCPV |
408 | lis r3, CONFIG_SYS_HID0_FINAL@h |
409 | ori r3, r3, CONFIG_SYS_HID0_FINAL@l | |
945af8d7 WD |
410 | SYNC |
411 | mtspr HID0, r3 | |
412 | ||
413 | /* clear all BAT's */ | |
414 | /*--------------------------------------------------------------*/ | |
415 | ||
416 | li r0, 0 | |
417 | mtspr DBAT0U, r0 | |
418 | mtspr DBAT0L, r0 | |
419 | mtspr DBAT1U, r0 | |
420 | mtspr DBAT1L, r0 | |
421 | mtspr DBAT2U, r0 | |
422 | mtspr DBAT2L, r0 | |
423 | mtspr DBAT3U, r0 | |
424 | mtspr DBAT3L, r0 | |
35656de7 WD |
425 | mtspr DBAT4U, r0 |
426 | mtspr DBAT4L, r0 | |
427 | mtspr DBAT5U, r0 | |
428 | mtspr DBAT5L, r0 | |
429 | mtspr DBAT6U, r0 | |
430 | mtspr DBAT6L, r0 | |
431 | mtspr DBAT7U, r0 | |
432 | mtspr DBAT7L, r0 | |
945af8d7 WD |
433 | mtspr IBAT0U, r0 |
434 | mtspr IBAT0L, r0 | |
435 | mtspr IBAT1U, r0 | |
436 | mtspr IBAT1L, r0 | |
437 | mtspr IBAT2U, r0 | |
438 | mtspr IBAT2L, r0 | |
439 | mtspr IBAT3U, r0 | |
440 | mtspr IBAT3L, r0 | |
35656de7 WD |
441 | mtspr IBAT4U, r0 |
442 | mtspr IBAT4L, r0 | |
443 | mtspr IBAT5U, r0 | |
444 | mtspr IBAT5L, r0 | |
445 | mtspr IBAT6U, r0 | |
446 | mtspr IBAT6L, r0 | |
447 | mtspr IBAT7U, r0 | |
448 | mtspr IBAT7L, r0 | |
945af8d7 WD |
449 | SYNC |
450 | ||
451 | /* invalidate all tlb's */ | |
452 | /* */ | |
453 | /* From the 603e User Manual: "The 603e provides the ability to */ | |
454 | /* invalidate a TLB entry. The TLB Invalidate Entry (tlbie) */ | |
455 | /* instruction invalidates the TLB entry indexed by the EA, and */ | |
456 | /* operates on both the instruction and data TLBs simultaneously*/ | |
457 | /* invalidating four TLB entries (both sets in each TLB). The */ | |
458 | /* index corresponds to bits 15-19 of the EA. To invalidate all */ | |
459 | /* entries within both TLBs, 32 tlbie instructions should be */ | |
460 | /* issued, incrementing this field by one each time." */ | |
461 | /* */ | |
462 | /* "Note that the tlbia instruction is not implemented on the */ | |
463 | /* 603e." */ | |
464 | /* */ | |
465 | /* bits 15-19 correspond to addresses 0x00000000 to 0x0001F000 */ | |
466 | /* incrementing by 0x1000 each time. The code below is sort of */ | |
467 | /* based on code in "flush_tlbs" from arch/ppc/kernel/head.S */ | |
468 | /* */ | |
469 | /*--------------------------------------------------------------*/ | |
470 | ||
471 | li r3, 32 | |
472 | mtctr r3 | |
473 | li r3, 0 | |
474 | 1: tlbie r3 | |
475 | addi r3, r3, 0x1000 | |
476 | bdnz 1b | |
477 | SYNC | |
478 | ||
479 | /* Done! */ | |
480 | /*--------------------------------------------------------------*/ | |
481 | ||
482 | blr | |
483 | ||
484 | /* Cache functions. | |
485 | * | |
486 | * Note: requires that all cache bits in | |
487 | * HID0 are in the low half word. | |
488 | */ | |
489 | .globl icache_enable | |
490 | icache_enable: | |
491 | mfspr r3, HID0 | |
492 | ori r3, r3, HID0_ICE | |
493 | lis r4, 0 | |
494 | ori r4, r4, HID0_ILOCK | |
495 | andc r3, r3, r4 | |
496 | ori r4, r3, HID0_ICFI | |
497 | isync | |
498 | mtspr HID0, r4 /* sets enable and invalidate, clears lock */ | |
499 | isync | |
500 | mtspr HID0, r3 /* clears invalidate */ | |
501 | blr | |
502 | ||
503 | .globl icache_disable | |
504 | icache_disable: | |
505 | mfspr r3, HID0 | |
506 | lis r4, 0 | |
507 | ori r4, r4, HID0_ICE|HID0_ILOCK | |
508 | andc r3, r3, r4 | |
509 | ori r4, r3, HID0_ICFI | |
510 | isync | |
511 | mtspr HID0, r4 /* sets invalidate, clears enable and lock */ | |
512 | isync | |
513 | mtspr HID0, r3 /* clears invalidate */ | |
514 | blr | |
515 | ||
516 | .globl icache_status | |
517 | icache_status: | |
518 | mfspr r3, HID0 | |
519 | rlwinm r3, r3, HID0_ICE_BITPOS + 1, 31, 31 | |
520 | blr | |
521 | ||
522 | .globl dcache_enable | |
523 | dcache_enable: | |
524 | mfspr r3, HID0 | |
525 | ori r3, r3, HID0_DCE | |
526 | lis r4, 0 | |
527 | ori r4, r4, HID0_DLOCK | |
528 | andc r3, r3, r4 | |
529 | ori r4, r3, HID0_DCI | |
530 | sync | |
531 | mtspr HID0, r4 /* sets enable and invalidate, clears lock */ | |
532 | sync | |
533 | mtspr HID0, r3 /* clears invalidate */ | |
534 | blr | |
535 | ||
536 | .globl dcache_disable | |
537 | dcache_disable: | |
538 | mfspr r3, HID0 | |
539 | lis r4, 0 | |
540 | ori r4, r4, HID0_DCE|HID0_DLOCK | |
541 | andc r3, r3, r4 | |
542 | ori r4, r3, HID0_DCI | |
543 | sync | |
544 | mtspr HID0, r4 /* sets invalidate, clears enable and lock */ | |
545 | sync | |
546 | mtspr HID0, r3 /* clears invalidate */ | |
547 | blr | |
548 | ||
549 | .globl dcache_status | |
550 | dcache_status: | |
551 | mfspr r3, HID0 | |
552 | rlwinm r3, r3, HID0_DCE_BITPOS + 1, 31, 31 | |
553 | blr | |
554 | ||
36c72877 WD |
555 | .globl get_svr |
556 | get_svr: | |
557 | mfspr r3, SVR | |
558 | blr | |
559 | ||
945af8d7 WD |
560 | .globl get_pvr |
561 | get_pvr: | |
562 | mfspr r3, PVR | |
563 | blr | |
564 | ||
565 | /*------------------------------------------------------------------------------*/ | |
566 | ||
567 | /* | |
568 | * void relocate_code (addr_sp, gd, addr_moni) | |
569 | * | |
570 | * This "function" does not return, instead it continues in RAM | |
571 | * after relocating the monitor code. | |
572 | * | |
573 | * r3 = dest | |
574 | * r4 = src | |
575 | * r5 = length in bytes | |
576 | * r6 = cachelinesize | |
577 | */ | |
578 | .globl relocate_code | |
579 | relocate_code: | |
580 | mr r1, r3 /* Set new stack pointer */ | |
581 | mr r9, r4 /* Save copy of Global Data pointer */ | |
582 | mr r10, r5 /* Save copy of Destination Address */ | |
583 | ||
584 | mr r3, r5 /* Destination Address */ | |
6d0f6bcf JCPV |
585 | lis r4, CONFIG_SYS_MONITOR_BASE@h /* Source Address */ |
586 | ori r4, r4, CONFIG_SYS_MONITOR_BASE@l | |
945af8d7 WD |
587 | lwz r5, GOT(__init_end) |
588 | sub r5, r5, r4 | |
6d0f6bcf | 589 | li r6, CONFIG_SYS_CACHELINE_SIZE /* Cache Line Size */ |
945af8d7 WD |
590 | |
591 | /* | |
592 | * Fix GOT pointer: | |
593 | * | |
6d0f6bcf | 594 | * New GOT-PTR = (old GOT-PTR - CONFIG_SYS_MONITOR_BASE) + Destination Address |
945af8d7 WD |
595 | * |
596 | * Offset: | |
597 | */ | |
598 | sub r15, r10, r4 | |
599 | ||
600 | /* First our own GOT */ | |
601 | add r14, r14, r15 | |
602 | /* then the one used by the C code */ | |
603 | add r30, r30, r15 | |
604 | ||
605 | /* | |
606 | * Now relocate code | |
607 | */ | |
608 | ||
609 | cmplw cr1,r3,r4 | |
610 | addi r0,r5,3 | |
611 | srwi. r0,r0,2 | |
612 | beq cr1,4f /* In place copy is not necessary */ | |
613 | beq 7f /* Protect against 0 count */ | |
614 | mtctr r0 | |
615 | bge cr1,2f | |
616 | ||
617 | la r8,-4(r4) | |
618 | la r7,-4(r3) | |
619 | 1: lwzu r0,4(r8) | |
620 | stwu r0,4(r7) | |
621 | bdnz 1b | |
622 | b 4f | |
623 | ||
624 | 2: slwi r0,r0,2 | |
625 | add r8,r4,r0 | |
626 | add r7,r3,r0 | |
627 | 3: lwzu r0,-4(r8) | |
628 | stwu r0,-4(r7) | |
629 | bdnz 3b | |
630 | ||
631 | /* | |
632 | * Now flush the cache: note that we must start from a cache aligned | |
633 | * address. Otherwise we might miss one cache line. | |
634 | */ | |
635 | 4: cmpwi r6,0 | |
636 | add r5,r3,r5 | |
637 | beq 7f /* Always flush prefetch queue in any case */ | |
638 | subi r0,r6,1 | |
639 | andc r3,r3,r0 | |
640 | mfspr r7,HID0 /* don't do dcbst if dcache is disabled */ | |
641 | rlwinm r7,r7,HID0_DCE_BITPOS+1,31,31 | |
642 | cmpwi r7,0 | |
643 | beq 9f | |
644 | mr r4,r3 | |
645 | 5: dcbst 0,r4 | |
646 | add r4,r4,r6 | |
647 | cmplw r4,r5 | |
648 | blt 5b | |
649 | sync /* Wait for all dcbst to complete on bus */ | |
650 | 9: mfspr r7,HID0 /* don't do icbi if icache is disabled */ | |
651 | rlwinm r7,r7,HID0_ICE_BITPOS+1,31,31 | |
652 | cmpwi r7,0 | |
653 | beq 7f | |
654 | mr r4,r3 | |
655 | 6: icbi 0,r4 | |
656 | add r4,r4,r6 | |
657 | cmplw r4,r5 | |
658 | blt 6b | |
659 | 7: sync /* Wait for all icbi to complete on bus */ | |
660 | isync | |
661 | ||
662 | /* | |
663 | * We are done. Do not return, instead branch to second part of board | |
664 | * initialization, now running from RAM. | |
665 | */ | |
666 | ||
667 | addi r0, r10, in_ram - _start + EXC_OFF_SYS_RESET | |
668 | mtlr r0 | |
669 | blr | |
670 | ||
671 | in_ram: | |
672 | ||
673 | /* | |
674 | * Relocation Function, r14 point to got2+0x8000 | |
675 | * | |
676 | * Adjust got2 pointers, no need to check for 0, this code | |
677 | * already puts a few entries in the table. | |
678 | */ | |
679 | li r0,__got2_entries@sectoff@l | |
680 | la r3,GOT(_GOT2_TABLE_) | |
681 | lwz r11,GOT(_GOT2_TABLE_) | |
682 | mtctr r0 | |
683 | sub r11,r3,r11 | |
684 | addi r3,r3,-4 | |
685 | 1: lwzu r0,4(r3) | |
686 | add r0,r0,r11 | |
687 | stw r0,0(r3) | |
688 | bdnz 1b | |
689 | ||
690 | /* | |
691 | * Now adjust the fixups and the pointers to the fixups | |
692 | * in case we need to move ourselves again. | |
693 | */ | |
694 | 2: li r0,__fixup_entries@sectoff@l | |
695 | lwz r3,GOT(_FIXUP_TABLE_) | |
696 | cmpwi r0,0 | |
697 | mtctr r0 | |
698 | addi r3,r3,-4 | |
699 | beq 4f | |
700 | 3: lwzu r4,4(r3) | |
701 | lwzux r0,r4,r11 | |
702 | add r0,r0,r11 | |
703 | stw r10,0(r3) | |
704 | stw r0,0(r4) | |
705 | bdnz 3b | |
706 | 4: | |
707 | clear_bss: | |
708 | /* | |
709 | * Now clear BSS segment | |
710 | */ | |
711 | lwz r3,GOT(__bss_start) | |
712 | lwz r4,GOT(_end) | |
713 | ||
714 | cmplw 0, r3, r4 | |
715 | beq 6f | |
716 | ||
717 | li r0, 0 | |
718 | 5: | |
719 | stw r0, 0(r3) | |
720 | addi r3, r3, 4 | |
721 | cmplw 0, r3, r4 | |
722 | bne 5b | |
723 | 6: | |
724 | ||
725 | mr r3, r9 /* Global Data pointer */ | |
726 | mr r4, r10 /* Destination Address */ | |
727 | bl board_init_r | |
728 | ||
729 | /* | |
730 | * Copy exception vector code to low memory | |
731 | * | |
732 | * r3: dest_addr | |
733 | * r7: source address, r8: end address, r9: target address | |
734 | */ | |
735 | .globl trap_init | |
736 | trap_init: | |
737 | lwz r7, GOT(_start) | |
738 | lwz r8, GOT(_end_of_vectors) | |
739 | ||
740 | li r9, 0x100 /* reset vector always at 0x100 */ | |
741 | ||
742 | cmplw 0, r7, r8 | |
743 | bgelr /* return if r7>=r8 - just in case */ | |
744 | ||
745 | mflr r4 /* save link register */ | |
746 | 1: | |
747 | lwz r0, 0(r7) | |
748 | stw r0, 0(r9) | |
749 | addi r7, r7, 4 | |
750 | addi r9, r9, 4 | |
751 | cmplw 0, r7, r8 | |
752 | bne 1b | |
753 | ||
754 | /* | |
755 | * relocate `hdlr' and `int_return' entries | |
756 | */ | |
757 | li r7, .L_MachineCheck - _start + EXC_OFF_SYS_RESET | |
758 | li r8, Alignment - _start + EXC_OFF_SYS_RESET | |
759 | 2: | |
760 | bl trap_reloc | |
761 | addi r7, r7, 0x100 /* next exception vector */ | |
762 | cmplw 0, r7, r8 | |
763 | blt 2b | |
764 | ||
765 | li r7, .L_Alignment - _start + EXC_OFF_SYS_RESET | |
766 | bl trap_reloc | |
767 | ||
768 | li r7, .L_ProgramCheck - _start + EXC_OFF_SYS_RESET | |
769 | bl trap_reloc | |
770 | ||
771 | li r7, .L_FPUnavailable - _start + EXC_OFF_SYS_RESET | |
772 | li r8, SystemCall - _start + EXC_OFF_SYS_RESET | |
773 | 3: | |
774 | bl trap_reloc | |
775 | addi r7, r7, 0x100 /* next exception vector */ | |
776 | cmplw 0, r7, r8 | |
777 | blt 3b | |
778 | ||
779 | li r7, .L_SingleStep - _start + EXC_OFF_SYS_RESET | |
780 | li r8, _end_of_vectors - _start + EXC_OFF_SYS_RESET | |
781 | 4: | |
782 | bl trap_reloc | |
783 | addi r7, r7, 0x100 /* next exception vector */ | |
784 | cmplw 0, r7, r8 | |
785 | blt 4b | |
786 | ||
787 | mfmsr r3 /* now that the vectors have */ | |
788 | lis r7, MSR_IP@h /* relocated into low memory */ | |
789 | ori r7, r7, MSR_IP@l /* MSR[IP] can be turned off */ | |
790 | andc r3, r3, r7 /* (if it was on) */ | |
791 | SYNC /* Some chip revs need this... */ | |
792 | mtmsr r3 | |
793 | SYNC | |
794 | ||
795 | mtlr r4 /* restore link register */ | |
796 | blr | |
797 | ||
798 | /* | |
799 | * Function: relocate entries for one exception vector | |
800 | */ | |
801 | trap_reloc: | |
802 | lwz r0, 0(r7) /* hdlr ... */ | |
803 | add r0, r0, r3 /* ... += dest_addr */ | |
804 | stw r0, 0(r7) | |
805 | ||
806 | lwz r0, 4(r7) /* int_return ... */ | |
807 | add r0, r0, r3 /* ... += dest_addr */ | |
808 | stw r0, 4(r7) | |
809 | ||
810 | blr |