]> git.ipfire.org Git - people/ms/u-boot.git/blob - cpu/mpc86xx/start.S
Merge with /home/wd/git/u-boot/custodian/u-boot-mpc85xx
[people/ms/u-boot.git] / cpu / mpc86xx / start.S
1 /*
2 * Copyright 2004 Freescale Semiconductor.
3 * Srikanth Srinivasan <srikanth.srinivaan@freescale.com>
4 *
5 * See file CREDITS for list of people who contributed to this
6 * project.
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License as
10 * published by the Free Software Foundation; either version 2 of
11 * the License, or (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
21 * MA 02111-1307 USA
22 */
23
24 /* U-Boot - Startup Code for 86xx PowerPC based Embedded Boards
25 *
26 *
27 * The processor starts at 0xfff00100 and the code is executed
28 * from flash. The code is organized to be at an other address
29 * in memory, but as long we don't jump around before relocating.
30 * board_init lies at a quite high address and when the cpu has
31 * jumped there, everything is ok.
32 */
33 #include <config.h>
34 #include <mpc86xx.h>
35 #include <version.h>
36
37 #include <ppc_asm.tmpl>
38 #include <ppc_defs.h>
39
40 #include <asm/cache.h>
41 #include <asm/mmu.h>
42
43 #ifndef CONFIG_IDENT_STRING
44 #define CONFIG_IDENT_STRING ""
45 #endif
46
47 /* We don't want the MMU yet.
48 */
49 #undef MSR_KERNEL
50 /* Machine Check and Recoverable Interr. */
51 #define MSR_KERNEL ( MSR_ME | MSR_RI )
52
53 /*
54 * Set up GOT: Global Offset Table
55 *
56 * Use r14 to access the GOT
57 */
58 START_GOT
59 GOT_ENTRY(_GOT2_TABLE_)
60 GOT_ENTRY(_FIXUP_TABLE_)
61
62 GOT_ENTRY(_start)
63 GOT_ENTRY(_start_of_vectors)
64 GOT_ENTRY(_end_of_vectors)
65 GOT_ENTRY(transfer_to_handler)
66
67 GOT_ENTRY(__init_end)
68 GOT_ENTRY(_end)
69 GOT_ENTRY(__bss_start)
70 END_GOT
71
72 /*
73 * r3 - 1st arg to board_init(): IMMP pointer
74 * r4 - 2nd arg to board_init(): boot flag
75 */
76 .text
77 .long 0x27051956 /* U-Boot Magic Number */
78 .globl version_string
79 version_string:
80 .ascii U_BOOT_VERSION
81 .ascii " (", __DATE__, " - ", __TIME__, ")"
82 .ascii CONFIG_IDENT_STRING, "\0"
83
84 . = EXC_OFF_SYS_RESET
85 .globl _start
86 _start:
87 li r21, BOOTFLAG_COLD /* Normal Power-On: Boot from FLASH */
88 b boot_cold
89 sync
90
91 . = EXC_OFF_SYS_RESET + 0x10
92
93 .globl _start_warm
94 _start_warm:
95 li r21, BOOTFLAG_WARM /* Software reboot */
96 b boot_warm
97 sync
98
99 /* the boot code is located below the exception table */
100
101 .globl _start_of_vectors
102 _start_of_vectors:
103
104 /* Machine check */
105 STD_EXCEPTION(0x200, MachineCheck, MachineCheckException)
106
107 /* Data Storage exception. */
108 STD_EXCEPTION(0x300, DataStorage, UnknownException)
109
110 /* Instruction Storage exception. */
111 STD_EXCEPTION(0x400, InstStorage, UnknownException)
112
113 /* External Interrupt exception. */
114 STD_EXCEPTION(0x500, ExtInterrupt, external_interrupt)
115
116 /* Alignment exception. */
117 . = 0x600
118 Alignment:
119 EXCEPTION_PROLOG
120 mfspr r4,DAR
121 stw r4,_DAR(r21)
122 mfspr r5,DSISR
123 stw r5,_DSISR(r21)
124 addi r3,r1,STACK_FRAME_OVERHEAD
125 li r20,MSR_KERNEL
126 rlwimi r20,r23,0,16,16 /* copy EE bit from saved MSR */
127 lwz r6,GOT(transfer_to_handler)
128 mtlr r6
129 blrl
130 .L_Alignment:
131 .long AlignmentException - _start + EXC_OFF_SYS_RESET
132 .long int_return - _start + EXC_OFF_SYS_RESET
133
134 /* Program check exception */
135 . = 0x700
136 ProgramCheck:
137 EXCEPTION_PROLOG
138 addi r3,r1,STACK_FRAME_OVERHEAD
139 li r20,MSR_KERNEL
140 rlwimi r20,r23,0,16,16 /* copy EE bit from saved MSR */
141 lwz r6,GOT(transfer_to_handler)
142 mtlr r6
143 blrl
144 .L_ProgramCheck:
145 .long ProgramCheckException - _start + EXC_OFF_SYS_RESET
146 .long int_return - _start + EXC_OFF_SYS_RESET
147
148 STD_EXCEPTION(0x800, FPUnavailable, UnknownException)
149
150 /* I guess we could implement decrementer, and may have
151 * to someday for timekeeping.
152 */
153 STD_EXCEPTION(0x900, Decrementer, timer_interrupt)
154 STD_EXCEPTION(0xa00, Trap_0a, UnknownException)
155 STD_EXCEPTION(0xb00, Trap_0b, UnknownException)
156 STD_EXCEPTION(0xc00, SystemCall, UnknownException)
157 STD_EXCEPTION(0xd00, SingleStep, UnknownException)
158 STD_EXCEPTION(0xe00, Trap_0e, UnknownException)
159 STD_EXCEPTION(0xf00, Trap_0f, UnknownException)
160 STD_EXCEPTION(0x1000, SoftEmu, SoftEmuException)
161 STD_EXCEPTION(0x1100, InstructionTLBMiss, UnknownException)
162 STD_EXCEPTION(0x1200, DataTLBMiss, UnknownException)
163 STD_EXCEPTION(0x1300, InstructionTLBError, UnknownException)
164 STD_EXCEPTION(0x1400, DataTLBError, UnknownException)
165 STD_EXCEPTION(0x1500, Reserved5, UnknownException)
166 STD_EXCEPTION(0x1600, Reserved6, UnknownException)
167 STD_EXCEPTION(0x1700, Reserved7, UnknownException)
168 STD_EXCEPTION(0x1800, Reserved8, UnknownException)
169 STD_EXCEPTION(0x1900, Reserved9, UnknownException)
170 STD_EXCEPTION(0x1a00, ReservedA, UnknownException)
171 STD_EXCEPTION(0x1b00, ReservedB, UnknownException)
172 STD_EXCEPTION(0x1c00, DataBreakpoint, UnknownException)
173 STD_EXCEPTION(0x1d00, InstructionBreakpoint, UnknownException)
174 STD_EXCEPTION(0x1e00, PeripheralBreakpoint, UnknownException)
175 STD_EXCEPTION(0x1f00, DevPortBreakpoint, UnknownException)
176
177 .globl _end_of_vectors
178 _end_of_vectors:
179
180 . = 0x2000
181
182 boot_cold:
183 boot_warm:
184
185 /* if this is a multi-core system we need to check which cpu
186 * this is, if it is not cpu 0 send the cpu to the linux reset
187 * vector */
188 #if (CONFIG_NUM_CPUS > 1)
189 mfspr r0, MSSCR0
190 andi. r0, r0, 0x0020
191 rlwinm r0,r0,27,31,31
192 mtspr PIR, r0
193 beq 1f
194
195 bl secondary_cpu_setup
196 #endif
197
198 /* disable everything */
199 1: li r0, 0
200 mtspr HID0, r0
201 sync
202 mtmsr 0
203 bl invalidate_bats
204 sync
205
206 #ifdef CFG_L2
207 /* init the L2 cache */
208 addis r3, r0, L2_INIT@h
209 ori r3, r3, L2_INIT@l
210 mtspr l2cr, r3
211 /* invalidate the L2 cache */
212 bl l2cache_invalidate
213 sync
214 #endif
215
216 /*
217 * Calculate absolute address in FLASH and jump there
218 *------------------------------------------------------*/
219 lis r3, CFG_MONITOR_BASE@h
220 ori r3, r3, CFG_MONITOR_BASE@l
221 addi r3, r3, in_flash - _start + EXC_OFF_SYS_RESET
222 mtlr r3
223 blr
224
225 in_flash:
226 /* let the C-code set up the rest */
227 /* */
228 /* Be careful to keep code relocatable ! */
229 /*------------------------------------------------------*/
230 /* perform low-level init */
231
232 /* enable extended addressing */
233 bl enable_ext_addr
234
235 /* setup the bats */
236 bl setup_bats
237 sync
238
239 #if (CFG_CCSRBAR_DEFAULT != CFG_CCSRBAR)
240 /* setup ccsrbar */
241 bl setup_ccsrbar
242 #endif
243
244
245 /* -- MPC8641 Rev 1.0 MCM Errata fixups -- */
246
247 /* skip fixups if not Rev 1.0 */
248 mfspr r4, SVR
249 rlwinm r4,r4,0,24,31
250 cmpwi r4,0x10
251 bne 1f
252
253 lis r3,MCM_ABCR@ha
254 lwz r4,MCM_ABCR@l(r3) /* ABCR -> r4 */
255
256 /* set ABCR[A_STRM_CNT] = 0 */
257 rlwinm r4,r4,0,0,29
258
259 /* set ABCR[ARB_POLICY] to 0x1 (round-robin) */
260 addi r0,r0,1
261 rlwimi r4,r0,12,18,19
262
263 stw r4,MCM_ABCR@l(r3) /* r4 -> ABCR */
264 sync
265
266 /* Set DBCR[ERD_DIS] */
267 lis r3,MCM_DBCR@ha
268 lwz r4,MCM_DBCR@l(r3)
269 oris r4, r4, 0x4000
270 stw r4,MCM_DBCR@l(r3)
271 sync
272 1:
273 /* setup the law entries */
274 bl law_entry
275 sync
276
277
278 #if (EMULATOR_RUN == 1)
279 /* On the emulator we want to adjust these ASAP */
280 /* otherwise things are sloooow */
281 /* Setup OR0 (LALE FIX)*/
282 lis r3, CFG_CCSRBAR@h
283 ori r3, r3, 0x5004
284 li r4, 0x0FF3
285 stw r4, 0(r3)
286 sync
287
288 /* Setup LCRR */
289 lis r3, CFG_CCSRBAR@h
290 ori r3, r3, 0x50D4
291 lis r4, 0x8000
292 ori r4, r4, 0x0002
293 stw r4, 0(r3)
294 sync
295 #endif
296 #if 1
297 /* make sure timer enabled in guts register too */
298 lis r3, CFG_CCSRBAR@h
299 oris r3,r3, 0xE
300 ori r3,r3,0x0070
301 lwz r4, 0(r3)
302 lis r5,0xFFFC
303 ori r5,r5,0x5FFF
304 and r4,r4,r5
305 stw r4,0(r3)
306 #endif
307 /*
308 * Cache must be enabled here for stack-in-cache trick.
309 * This means we need to enable the BATS.
310 * Cache should be turned on after BATs, since by default
311 * everything is write-through.
312 */
313
314 /* enable address translation */
315 bl enable_addr_trans
316 sync
317
318 /* enable and invalidate the data cache */
319 /* bl l1dcache_enable */
320 bl dcache_enable
321 sync
322
323 #if 1
324 bl icache_enable
325 #endif
326
327 #ifdef CFG_INIT_RAM_LOCK
328 bl lock_ram_in_cache
329 sync
330 #endif
331
332 /* set up the stack pointer in our newly created
333 * cache-ram (r1) */
334 lis r1, (CFG_INIT_RAM_ADDR + CFG_GBL_DATA_OFFSET)@h
335 ori r1, r1, (CFG_INIT_RAM_ADDR + CFG_GBL_DATA_OFFSET)@l
336
337 li r0, 0 /* Make room for stack frame header and */
338 stwu r0, -4(r1) /* clear final stack frame so that */
339 stwu r0, -4(r1) /* stack backtraces terminate cleanly */
340
341 GET_GOT /* initialize GOT access */
342
343 /* run low-level CPU init code (from Flash) */
344 bl cpu_init_f
345 sync
346
347 #ifdef RUN_DIAG
348
349 /* Sri: Code to run the diagnostic automatically */
350
351 /* Load PX_AUX register address in r4 */
352 lis r4, 0xf810
353 ori r4, r4, 0x6
354 /* Load contents of PX_AUX in r3 bits 24 to 31*/
355 lbz r3, 0(r4)
356
357 /* Mask and obtain the bit in r3 */
358 rlwinm. r3, r3, 0, 24, 24
359 /* If not zero, jump and continue with u-boot */
360 bne diag_done
361
362 /* Load back contents of PX_AUX in r3 bits 24 to 31 */
363 lbz r3, 0(r4)
364 /* Set the MSB of the register value */
365 ori r3, r3, 0x80
366 /* Write value in r3 back to PX_AUX */
367 stb r3, 0(r4)
368
369 /* Get the address to jump to in r3*/
370 lis r3, CFG_DIAG_ADDR@h
371 ori r3, r3, CFG_DIAG_ADDR@l
372
373 /* Load the LR with the branch address */
374 mtlr r3
375
376 /* Branch to diagnostic */
377 blr
378
379 diag_done:
380 #endif
381
382 /* bl l2cache_enable */
383 mr r3, r21
384
385 /* r3: BOOTFLAG */
386 /* run 1st part of board init code (from Flash) */
387 bl board_init_f
388 sync
389
390 /* NOTREACHED */
391
392 .globl invalidate_bats
393 invalidate_bats:
394
395 /* invalidate BATs */
396 mtspr IBAT0U, r0
397 mtspr IBAT1U, r0
398 mtspr IBAT2U, r0
399 mtspr IBAT3U, r0
400 mtspr IBAT4U, r0
401 mtspr IBAT5U, r0
402 mtspr IBAT6U, r0
403 mtspr IBAT7U, r0
404
405 isync
406 mtspr DBAT0U, r0
407 mtspr DBAT1U, r0
408 mtspr DBAT2U, r0
409 mtspr DBAT3U, r0
410 mtspr DBAT4U, r0
411 mtspr DBAT5U, r0
412 mtspr DBAT6U, r0
413 mtspr DBAT7U, r0
414
415 isync
416 sync
417 blr
418
419
420 /* setup_bats - set them up to some initial state */
421 .globl setup_bats
422 setup_bats:
423
424 addis r0, r0, 0x0000
425
426 /* IBAT 0 */
427 addis r4, r0, CFG_IBAT0L@h
428 ori r4, r4, CFG_IBAT0L@l
429 addis r3, r0, CFG_IBAT0U@h
430 ori r3, r3, CFG_IBAT0U@l
431 mtspr IBAT0L, r4
432 mtspr IBAT0U, r3
433 isync
434
435 /* DBAT 0 */
436 addis r4, r0, CFG_DBAT0L@h
437 ori r4, r4, CFG_DBAT0L@l
438 addis r3, r0, CFG_DBAT0U@h
439 ori r3, r3, CFG_DBAT0U@l
440 mtspr DBAT0L, r4
441 mtspr DBAT0U, r3
442 isync
443
444 /* IBAT 1 */
445 addis r4, r0, CFG_IBAT1L@h
446 ori r4, r4, CFG_IBAT1L@l
447 addis r3, r0, CFG_IBAT1U@h
448 ori r3, r3, CFG_IBAT1U@l
449 mtspr IBAT1L, r4
450 mtspr IBAT1U, r3
451 isync
452
453 /* DBAT 1 */
454 addis r4, r0, CFG_DBAT1L@h
455 ori r4, r4, CFG_DBAT1L@l
456 addis r3, r0, CFG_DBAT1U@h
457 ori r3, r3, CFG_DBAT1U@l
458 mtspr DBAT1L, r4
459 mtspr DBAT1U, r3
460 isync
461
462 /* IBAT 2 */
463 addis r4, r0, CFG_IBAT2L@h
464 ori r4, r4, CFG_IBAT2L@l
465 addis r3, r0, CFG_IBAT2U@h
466 ori r3, r3, CFG_IBAT2U@l
467 mtspr IBAT2L, r4
468 mtspr IBAT2U, r3
469 isync
470
471 /* DBAT 2 */
472 addis r4, r0, CFG_DBAT2L@h
473 ori r4, r4, CFG_DBAT2L@l
474 addis r3, r0, CFG_DBAT2U@h
475 ori r3, r3, CFG_DBAT2U@l
476 mtspr DBAT2L, r4
477 mtspr DBAT2U, r3
478 isync
479
480 /* IBAT 3 */
481 addis r4, r0, CFG_IBAT3L@h
482 ori r4, r4, CFG_IBAT3L@l
483 addis r3, r0, CFG_IBAT3U@h
484 ori r3, r3, CFG_IBAT3U@l
485 mtspr IBAT3L, r4
486 mtspr IBAT3U, r3
487 isync
488
489 /* DBAT 3 */
490 addis r4, r0, CFG_DBAT3L@h
491 ori r4, r4, CFG_DBAT3L@l
492 addis r3, r0, CFG_DBAT3U@h
493 ori r3, r3, CFG_DBAT3U@l
494 mtspr DBAT3L, r4
495 mtspr DBAT3U, r3
496 isync
497
498 /* IBAT 4 */
499 addis r4, r0, CFG_IBAT4L@h
500 ori r4, r4, CFG_IBAT4L@l
501 addis r3, r0, CFG_IBAT4U@h
502 ori r3, r3, CFG_IBAT4U@l
503 mtspr IBAT4L, r4
504 mtspr IBAT4U, r3
505 isync
506
507 /* DBAT 4 */
508 addis r4, r0, CFG_DBAT4L@h
509 ori r4, r4, CFG_DBAT4L@l
510 addis r3, r0, CFG_DBAT4U@h
511 ori r3, r3, CFG_DBAT4U@l
512 mtspr DBAT4L, r4
513 mtspr DBAT4U, r3
514 isync
515
516 /* IBAT 5 */
517 addis r4, r0, CFG_IBAT5L@h
518 ori r4, r4, CFG_IBAT5L@l
519 addis r3, r0, CFG_IBAT5U@h
520 ori r3, r3, CFG_IBAT5U@l
521 mtspr IBAT5L, r4
522 mtspr IBAT5U, r3
523 isync
524
525 /* DBAT 5 */
526 addis r4, r0, CFG_DBAT5L@h
527 ori r4, r4, CFG_DBAT5L@l
528 addis r3, r0, CFG_DBAT5U@h
529 ori r3, r3, CFG_DBAT5U@l
530 mtspr DBAT5L, r4
531 mtspr DBAT5U, r3
532 isync
533
534 /* IBAT 6 */
535 addis r4, r0, CFG_IBAT6L@h
536 ori r4, r4, CFG_IBAT6L@l
537 addis r3, r0, CFG_IBAT6U@h
538 ori r3, r3, CFG_IBAT6U@l
539 mtspr IBAT6L, r4
540 mtspr IBAT6U, r3
541 isync
542
543 /* DBAT 6 */
544 addis r4, r0, CFG_DBAT6L@h
545 ori r4, r4, CFG_DBAT6L@l
546 addis r3, r0, CFG_DBAT6U@h
547 ori r3, r3, CFG_DBAT6U@l
548 mtspr DBAT6L, r4
549 mtspr DBAT6U, r3
550 isync
551
552 /* IBAT 7 */
553 addis r4, r0, CFG_IBAT7L@h
554 ori r4, r4, CFG_IBAT7L@l
555 addis r3, r0, CFG_IBAT7U@h
556 ori r3, r3, CFG_IBAT7U@l
557 mtspr IBAT7L, r4
558 mtspr IBAT7U, r3
559 isync
560
561 /* DBAT 7 */
562 addis r4, r0, CFG_DBAT7L@h
563 ori r4, r4, CFG_DBAT7L@l
564 addis r3, r0, CFG_DBAT7U@h
565 ori r3, r3, CFG_DBAT7U@l
566 mtspr DBAT7L, r4
567 mtspr DBAT7U, r3
568 isync
569
570 1:
571 addis r3, 0, 0x0000
572 addis r5, 0, 0x4 /* upper bound of 0x00040000 for 7400/750 */
573 isync
574
575 tlblp:
576 tlbie r3
577 sync
578 addi r3, r3, 0x1000
579 cmp 0, 0, r3, r5
580 blt tlblp
581
582 blr
583
584 .globl enable_addr_trans
585 enable_addr_trans:
586 /* enable address translation */
587 mfmsr r5
588 ori r5, r5, (MSR_IR | MSR_DR)
589 mtmsr r5
590 isync
591 blr
592
593 .globl disable_addr_trans
594 disable_addr_trans:
595 /* disable address translation */
596 mflr r4
597 mfmsr r3
598 andi. r0, r3, (MSR_IR | MSR_DR)
599 beqlr
600 andc r3, r3, r0
601 mtspr SRR0, r4
602 mtspr SRR1, r3
603 rfi
604
605 /*
606 * This code finishes saving the registers to the exception frame
607 * and jumps to the appropriate handler for the exception.
608 * Register r21 is pointer into trap frame, r1 has new stack pointer.
609 */
610 .globl transfer_to_handler
611 transfer_to_handler:
612 stw r22,_NIP(r21)
613 lis r22,MSR_POW@h
614 andc r23,r23,r22
615 stw r23,_MSR(r21)
616 SAVE_GPR(7, r21)
617 SAVE_4GPRS(8, r21)
618 SAVE_8GPRS(12, r21)
619 SAVE_8GPRS(24, r21)
620 mflr r23
621 andi. r24,r23,0x3f00 /* get vector offset */
622 stw r24,TRAP(r21)
623 li r22,0
624 stw r22,RESULT(r21)
625 mtspr SPRG2,r22 /* r1 is now kernel sp */
626 lwz r24,0(r23) /* virtual address of handler */
627 lwz r23,4(r23) /* where to go when done */
628 mtspr SRR0,r24
629 mtspr SRR1,r20
630 mtlr r23
631 SYNC
632 rfi /* jump to handler, enable MMU */
633
634 int_return:
635 mfmsr r28 /* Disable interrupts */
636 li r4,0
637 ori r4,r4,MSR_EE
638 andc r28,r28,r4
639 SYNC /* Some chip revs need this... */
640 mtmsr r28
641 SYNC
642 lwz r2,_CTR(r1)
643 lwz r0,_LINK(r1)
644 mtctr r2
645 mtlr r0
646 lwz r2,_XER(r1)
647 lwz r0,_CCR(r1)
648 mtspr XER,r2
649 mtcrf 0xFF,r0
650 REST_10GPRS(3, r1)
651 REST_10GPRS(13, r1)
652 REST_8GPRS(23, r1)
653 REST_GPR(31, r1)
654 lwz r2,_NIP(r1) /* Restore environment */
655 lwz r0,_MSR(r1)
656 mtspr SRR0,r2
657 mtspr SRR1,r0
658 lwz r0,GPR0(r1)
659 lwz r2,GPR2(r1)
660 lwz r1,GPR1(r1)
661 SYNC
662 rfi
663
664 .globl dc_read
665 dc_read:
666 blr
667
668 .globl get_pvr
669 get_pvr:
670 mfspr r3, PVR
671 blr
672
673 .globl get_svr
674 get_svr:
675 mfspr r3, SVR
676 blr
677
678
679 /*
680 * Function: in8
681 * Description: Input 8 bits
682 */
683 .globl in8
684 in8:
685 lbz r3,0x0000(r3)
686 blr
687
688 /*
689 * Function: out8
690 * Description: Output 8 bits
691 */
692 .globl out8
693 out8:
694 stb r4,0x0000(r3)
695 blr
696
697 /*
698 * Function: out16
699 * Description: Output 16 bits
700 */
701 .globl out16
702 out16:
703 sth r4,0x0000(r3)
704 blr
705
706 /*
707 * Function: out16r
708 * Description: Byte reverse and output 16 bits
709 */
710 .globl out16r
711 out16r:
712 sthbrx r4,r0,r3
713 blr
714
715 /*
716 * Function: out32
717 * Description: Output 32 bits
718 */
719 .globl out32
720 out32:
721 stw r4,0x0000(r3)
722 blr
723
724 /*
725 * Function: out32r
726 * Description: Byte reverse and output 32 bits
727 */
728 .globl out32r
729 out32r:
730 stwbrx r4,r0,r3
731 blr
732
733 /*
734 * Function: in16
735 * Description: Input 16 bits
736 */
737 .globl in16
738 in16:
739 lhz r3,0x0000(r3)
740 blr
741
742 /*
743 * Function: in16r
744 * Description: Input 16 bits and byte reverse
745 */
746 .globl in16r
747 in16r:
748 lhbrx r3,r0,r3
749 blr
750
751 /*
752 * Function: in32
753 * Description: Input 32 bits
754 */
755 .globl in32
756 in32:
757 lwz 3,0x0000(3)
758 blr
759
760 /*
761 * Function: in32r
762 * Description: Input 32 bits and byte reverse
763 */
764 .globl in32r
765 in32r:
766 lwbrx r3,r0,r3
767 blr
768
769 /*
770 * Function: ppcDcbf
771 * Description: Data Cache block flush
772 * Input: r3 = effective address
773 * Output: none.
774 */
775 .globl ppcDcbf
776 ppcDcbf:
777 dcbf r0,r3
778 blr
779
780 /*
781 * Function: ppcDcbi
782 * Description: Data Cache block Invalidate
783 * Input: r3 = effective address
784 * Output: none.
785 */
786 .globl ppcDcbi
787 ppcDcbi:
788 dcbi r0,r3
789 blr
790
791 /*
792 * Function: ppcDcbz
793 * Description: Data Cache block zero.
794 * Input: r3 = effective address
795 * Output: none.
796 */
797 .globl ppcDcbz
798 ppcDcbz:
799 dcbz r0,r3
800 blr
801
802 /*
803 * Function: ppcSync
804 * Description: Processor Synchronize
805 * Input: none.
806 * Output: none.
807 */
808 .globl ppcSync
809 ppcSync:
810 sync
811 blr
812
813 /*
814 * void relocate_code (addr_sp, gd, addr_moni)
815 *
816 * This "function" does not return, instead it continues in RAM
817 * after relocating the monitor code.
818 *
819 * r3 = dest
820 * r4 = src
821 * r5 = length in bytes
822 * r6 = cachelinesize
823 */
824 .globl relocate_code
825 relocate_code:
826
827 mr r1, r3 /* Set new stack pointer */
828 mr r9, r4 /* Save copy of Global Data pointer */
829 mr r29, r9 /* Save for DECLARE_GLOBAL_DATA_PTR */
830 mr r10, r5 /* Save copy of Destination Address */
831
832 mr r3, r5 /* Destination Address */
833 lis r4, CFG_MONITOR_BASE@h /* Source Address */
834 ori r4, r4, CFG_MONITOR_BASE@l
835 lwz r5, GOT(__init_end)
836 sub r5, r5, r4
837 li r6, CFG_CACHELINE_SIZE /* Cache Line Size */
838
839 /*
840 * Fix GOT pointer:
841 *
842 * New GOT-PTR = (old GOT-PTR - CFG_MONITOR_BASE) + Destination Address
843 *
844 * Offset:
845 */
846 sub r15, r10, r4
847
848 /* First our own GOT */
849 add r14, r14, r15
850 /* then the one used by the C code */
851 add r30, r30, r15
852
853 /*
854 * Now relocate code
855 */
856 #ifdef CONFIG_ECC
857 bl board_relocate_rom
858 sync
859 mr r3, r10 /* Destination Address */
860 lis r4, CFG_MONITOR_BASE@h /* Source Address */
861 ori r4, r4, CFG_MONITOR_BASE@l
862 lwz r5, GOT(__init_end)
863 sub r5, r5, r4
864 li r6, CFG_CACHELINE_SIZE /* Cache Line Size */
865 #else
866 cmplw cr1,r3,r4
867 addi r0,r5,3
868 srwi. r0,r0,2
869 beq cr1,4f /* In place copy is not necessary */
870 beq 7f /* Protect against 0 count */
871 mtctr r0
872 bge cr1,2f
873
874 la r8,-4(r4)
875 la r7,-4(r3)
876 1: lwzu r0,4(r8)
877 stwu r0,4(r7)
878 bdnz 1b
879 b 4f
880
881 2: slwi r0,r0,2
882 add r8,r4,r0
883 add r7,r3,r0
884 3: lwzu r0,-4(r8)
885 stwu r0,-4(r7)
886 bdnz 3b
887 #endif
888 /*
889 * Now flush the cache: note that we must start from a cache aligned
890 * address. Otherwise we might miss one cache line.
891 */
892 4: cmpwi r6,0
893 add r5,r3,r5
894 beq 7f /* Always flush prefetch queue in any case */
895 subi r0,r6,1
896 andc r3,r3,r0
897 mr r4,r3
898 5: dcbst 0,r4
899 add r4,r4,r6
900 cmplw r4,r5
901 blt 5b
902 sync /* Wait for all dcbst to complete on bus */
903 mr r4,r3
904 6: icbi 0,r4
905 add r4,r4,r6
906 cmplw r4,r5
907 blt 6b
908 7: sync /* Wait for all icbi to complete on bus */
909 isync
910
911 /*
912 * We are done. Do not return, instead branch to second part of board
913 * initialization, now running from RAM.
914 */
915 addi r0, r10, in_ram - _start + EXC_OFF_SYS_RESET
916 mtlr r0
917 blr
918
919 in_ram:
920 #ifdef CONFIG_ECC
921 bl board_init_ecc
922 #endif
923 /*
924 * Relocation Function, r14 point to got2+0x8000
925 *
926 * Adjust got2 pointers, no need to check for 0, this code
927 * already puts a few entries in the table.
928 */
929 li r0,__got2_entries@sectoff@l
930 la r3,GOT(_GOT2_TABLE_)
931 lwz r11,GOT(_GOT2_TABLE_)
932 mtctr r0
933 sub r11,r3,r11
934 addi r3,r3,-4
935 1: lwzu r0,4(r3)
936 add r0,r0,r11
937 stw r0,0(r3)
938 bdnz 1b
939
940 /*
941 * Now adjust the fixups and the pointers to the fixups
942 * in case we need to move ourselves again.
943 */
944 2: li r0,__fixup_entries@sectoff@l
945 lwz r3,GOT(_FIXUP_TABLE_)
946 cmpwi r0,0
947 mtctr r0
948 addi r3,r3,-4
949 beq 4f
950 3: lwzu r4,4(r3)
951 lwzux r0,r4,r11
952 add r0,r0,r11
953 stw r10,0(r3)
954 stw r0,0(r4)
955 bdnz 3b
956 4:
957 /* clear_bss: */
958 /*
959 * Now clear BSS segment
960 */
961 lwz r3,GOT(__bss_start)
962 lwz r4,GOT(_end)
963
964 cmplw 0, r3, r4
965 beq 6f
966
967 li r0, 0
968 5:
969 stw r0, 0(r3)
970 addi r3, r3, 4
971 cmplw 0, r3, r4
972 bne 5b
973 6:
974 mr r3, r9 /* Init Date pointer */
975 mr r4, r10 /* Destination Address */
976 bl board_init_r
977
978 /* not reached - end relocate_code */
979 /*-----------------------------------------------------------------------*/
980
981 /*
982 * Copy exception vector code to low memory
983 *
984 * r3: dest_addr
985 * r7: source address, r8: end address, r9: target address
986 */
987 .globl trap_init
988 trap_init:
989 lwz r7, GOT(_start)
990 lwz r8, GOT(_end_of_vectors)
991
992 li r9, 0x100 /* reset vector always at 0x100 */
993
994 cmplw 0, r7, r8
995 bgelr /* return if r7>=r8 - just in case */
996
997 mflr r4 /* save link register */
998 1:
999 lwz r0, 0(r7)
1000 stw r0, 0(r9)
1001 addi r7, r7, 4
1002 addi r9, r9, 4
1003 cmplw 0, r7, r8
1004 bne 1b
1005
1006 /*
1007 * relocate `hdlr' and `int_return' entries
1008 */
1009 li r7, .L_MachineCheck - _start + EXC_OFF_SYS_RESET
1010 li r8, Alignment - _start + EXC_OFF_SYS_RESET
1011 2:
1012 bl trap_reloc
1013 addi r7, r7, 0x100 /* next exception vector */
1014 cmplw 0, r7, r8
1015 blt 2b
1016
1017 li r7, .L_Alignment - _start + EXC_OFF_SYS_RESET
1018 bl trap_reloc
1019
1020 li r7, .L_ProgramCheck - _start + EXC_OFF_SYS_RESET
1021 bl trap_reloc
1022
1023 li r7, .L_FPUnavailable - _start + EXC_OFF_SYS_RESET
1024 li r8, SystemCall - _start + EXC_OFF_SYS_RESET
1025 3:
1026 bl trap_reloc
1027 addi r7, r7, 0x100 /* next exception vector */
1028 cmplw 0, r7, r8
1029 blt 3b
1030
1031 li r7, .L_SingleStep - _start + EXC_OFF_SYS_RESET
1032 li r8, _end_of_vectors - _start + EXC_OFF_SYS_RESET
1033 4:
1034 bl trap_reloc
1035 addi r7, r7, 0x100 /* next exception vector */
1036 cmplw 0, r7, r8
1037 blt 4b
1038
1039 /* enable execptions from RAM vectors */
1040 mfmsr r7
1041 li r8,MSR_IP
1042 andc r7,r7,r8
1043 mtmsr r7
1044
1045 mtlr r4 /* restore link register */
1046 blr
1047
1048 /*
1049 * Function: relocate entries for one exception vector
1050 */
1051 trap_reloc:
1052 lwz r0, 0(r7) /* hdlr ... */
1053 add r0, r0, r3 /* ... += dest_addr */
1054 stw r0, 0(r7)
1055
1056 lwz r0, 4(r7) /* int_return ... */
1057 add r0, r0, r3 /* ... += dest_addr */
1058 stw r0, 4(r7)
1059
1060 sync
1061 isync
1062
1063 blr
1064
1065 .globl enable_ext_addr
1066 enable_ext_addr:
1067 mfspr r0, HID0
1068 lis r0, (HID0_HIGH_BAT_EN | HID0_XBSEN | HID0_XAEN)@h
1069 ori r0, r0, (HID0_HIGH_BAT_EN | HID0_XBSEN | HID0_XAEN)@l
1070 mtspr HID0, r0
1071 sync
1072 isync
1073 blr
1074
1075 #if (CFG_CCSRBAR_DEFAULT != CFG_CCSRBAR)
1076 .globl setup_ccsrbar
1077 setup_ccsrbar:
1078 /* Special sequence needed to update CCSRBAR itself */
1079 lis r4, CFG_CCSRBAR_DEFAULT@h
1080 ori r4, r4, CFG_CCSRBAR_DEFAULT@l
1081
1082 lis r5, CFG_CCSRBAR@h
1083 ori r5, r5, CFG_CCSRBAR@l
1084 srwi r6,r5,12
1085 stw r6, 0(r4)
1086 isync
1087
1088 lis r5, 0xffff
1089 ori r5,r5,0xf000
1090 lwz r5, 0(r5)
1091 isync
1092
1093 lis r3, CFG_CCSRBAR@h
1094 lwz r5, CFG_CCSRBAR@l(r3)
1095 isync
1096
1097 blr
1098 #endif
1099
1100 #ifdef CFG_INIT_RAM_LOCK
1101 lock_ram_in_cache:
1102 /* Allocate Initial RAM in data cache.
1103 */
1104 lis r3, (CFG_INIT_RAM_ADDR & ~31)@h
1105 ori r3, r3, (CFG_INIT_RAM_ADDR & ~31)@l
1106 li r2, ((CFG_INIT_RAM_END & ~31) + \
1107 (CFG_INIT_RAM_ADDR & 31) + 31) / 32
1108 mtctr r2
1109 1:
1110 dcbz r0, r3
1111 addi r3, r3, 32
1112 bdnz 1b
1113 #if 1
1114 /* Lock the data cache */
1115 mfspr r0, HID0
1116 ori r0, r0, 0x1000
1117 sync
1118 mtspr HID0, r0
1119 sync
1120 blr
1121 #endif
1122 #if 0
1123 /* Lock the first way of the data cache */
1124 mfspr r0, LDSTCR
1125 ori r0, r0, 0x0080
1126 #if defined(CONFIG_ALTIVEC)
1127 dssall
1128 #endif
1129 sync
1130 mtspr LDSTCR, r0
1131 sync
1132 isync
1133 blr
1134 #endif
1135
1136 .globl unlock_ram_in_cache
1137 unlock_ram_in_cache:
1138 /* invalidate the INIT_RAM section */
1139 lis r3, (CFG_INIT_RAM_ADDR & ~31)@h
1140 ori r3, r3, (CFG_INIT_RAM_ADDR & ~31)@l
1141 li r2, ((CFG_INIT_RAM_END & ~31) + \
1142 (CFG_INIT_RAM_ADDR & 31) + 31) / 32
1143 mtctr r2
1144 1: icbi r0, r3
1145 addi r3, r3, 32
1146 bdnz 1b
1147 sync /* Wait for all icbi to complete on bus */
1148 isync
1149 #if 1
1150 /* Unlock the data cache and invalidate it */
1151 mfspr r0, HID0
1152 li r3,0x1000
1153 andc r0,r0,r3
1154 li r3,0x0400
1155 or r0,r0,r3
1156 sync
1157 mtspr HID0, r0
1158 sync
1159 blr
1160 #endif
1161 #if 0
1162 /* Unlock the first way of the data cache */
1163 mfspr r0, LDSTCR
1164 li r3,0x0080
1165 andc r0,r0,r3
1166 #ifdef CONFIG_ALTIVEC
1167 dssall
1168 #endif
1169 sync
1170 mtspr LDSTCR, r0
1171 sync
1172 isync
1173 li r3,0x0400
1174 or r0,r0,r3
1175 sync
1176 mtspr HID0, r0
1177 sync
1178 blr
1179 #endif
1180 #endif
1181
1182 /* If this is a multi-cpu system then we need to handle the
1183 * 2nd cpu. The assumption is that the 2nd cpu is being
1184 * held in boot holdoff mode until the 1st cpu unlocks it
1185 * from Linux. We'll do some basic cpu init and then pass
1186 * it to the Linux Reset Vector.
1187 * Sri: Much of this initialization is not required. Linux
1188 * rewrites the bats, and the sprs and also enables the L1 cache.
1189 */
1190 #if (CONFIG_NUM_CPUS > 1)
1191 .globl secondary_cpu_setup
1192 secondary_cpu_setup:
1193 /* Do only core setup on all cores except cpu0 */
1194 bl invalidate_bats
1195 sync
1196 bl enable_ext_addr
1197
1198 #ifdef CFG_L2
1199 /* init the L2 cache */
1200 addis r3, r0, L2_INIT@h
1201 ori r3, r3, L2_INIT@l
1202 sync
1203 mtspr l2cr, r3
1204 #ifdef CONFIG_ALTIVEC
1205 dssall
1206 #endif
1207 /* invalidate the L2 cache */
1208 bl l2cache_invalidate
1209 sync
1210 #endif
1211
1212 /* enable and invalidate the data cache */
1213 bl dcache_enable
1214 sync
1215
1216 /* enable and invalidate the instruction cache*/
1217 bl icache_enable
1218 sync
1219
1220 /* TBEN in HID0 */
1221 mfspr r4, HID0
1222 oris r4, r4, 0x0400
1223 mtspr HID0, r4
1224 sync
1225 isync
1226
1227 /*SYNCBE|ABE in HID1*/
1228 mfspr r4, HID1
1229 ori r4, r4, 0x0C00
1230 mtspr HID1, r4
1231 sync
1232 isync
1233
1234 lis r3, CONFIG_LINUX_RESET_VEC@h
1235 ori r3, r3, CONFIG_LINUX_RESET_VEC@l
1236 mtlr r3
1237 blr
1238
1239 /* Never Returns, Running in Linux Now */
1240 #endif