]> git.ipfire.org Git - people/ms/u-boot.git/blob - arch/powerpc/cpu/mpc85xx/start.S
powerpc/mpc85xx: SECURE BOOT- Add secure boot target for B4860QDS
[people/ms/u-boot.git] / arch / powerpc / cpu / mpc85xx / start.S
1 /*
2 * Copyright 2004, 2007-2012 Freescale Semiconductor, Inc.
3 * Copyright (C) 2003 Motorola,Inc.
4 *
5 * SPDX-License-Identifier: GPL-2.0+
6 */
7
8 /* U-Boot Startup Code for Motorola 85xx PowerPC based Embedded Boards
9 *
10 * The processor starts at 0xfffffffc and the code is first executed in the
11 * last 4K page(0xfffff000-0xffffffff) in flash/rom.
12 *
13 */
14
15 #include <asm-offsets.h>
16 #include <config.h>
17 #include <mpc85xx.h>
18 #include <version.h>
19
20 #include <ppc_asm.tmpl>
21 #include <ppc_defs.h>
22
23 #include <asm/cache.h>
24 #include <asm/mmu.h>
25
26 #undef MSR_KERNEL
27 #define MSR_KERNEL ( MSR_ME ) /* Machine Check */
28
29 #if defined(CONFIG_NAND_SPL) || \
30 (defined(CONFIG_SPL_BUILD) && defined(CONFIG_SPL_INIT_MINIMAL))
31 #define MINIMAL_SPL
32 #endif
33
34 #if !defined(CONFIG_SPL) && !defined(CONFIG_SYS_RAMBOOT) && \
35 !defined(CONFIG_SECURE_BOOT) && !defined(CONFIG_SRIO_PCIE_BOOT_SLAVE)
36 #define NOR_BOOT
37 #endif
38
39 /*
40 * Set up GOT: Global Offset Table
41 *
42 * Use r12 to access the GOT
43 */
44 START_GOT
45 GOT_ENTRY(_GOT2_TABLE_)
46 GOT_ENTRY(_FIXUP_TABLE_)
47
48 #ifndef MINIMAL_SPL
49 GOT_ENTRY(_start)
50 GOT_ENTRY(_start_of_vectors)
51 GOT_ENTRY(_end_of_vectors)
52 GOT_ENTRY(transfer_to_handler)
53 #endif
54
55 GOT_ENTRY(__init_end)
56 GOT_ENTRY(__bss_end)
57 GOT_ENTRY(__bss_start)
58 END_GOT
59
60 /*
61 * e500 Startup -- after reset only the last 4KB of the effective
62 * address space is mapped in the MMU L2 TLB1 Entry0. The .bootpg
63 * section is located at THIS LAST page and basically does three
64 * things: clear some registers, set up exception tables and
65 * add more TLB entries for 'larger spaces'(e.g. the boot rom) to
66 * continue the boot procedure.
67
68 * Once the boot rom is mapped by TLB entries we can proceed
69 * with normal startup.
70 *
71 */
72
73 .section .bootpg,"ax"
74 .globl _start_e500
75
76 _start_e500:
77 /* Enable debug exception */
78 li r1,MSR_DE
79 mtmsr r1
80
81 /*
82 * If we got an ePAPR device tree pointer passed in as r3, we need that
83 * later in cpu_init_early_f(). Save it to a safe register before we
84 * clobber it so that we can fetch it from there later.
85 */
86 mr r24, r3
87
88 #ifdef CONFIG_SYS_FSL_ERRATUM_A004510
89 mfspr r3,SPRN_SVR
90 rlwinm r3,r3,0,0xff
91 li r4,CONFIG_SYS_FSL_ERRATUM_A004510_SVR_REV
92 cmpw r3,r4
93 beq 1f
94
95 #ifdef CONFIG_SYS_FSL_ERRATUM_A004510_SVR_REV2
96 li r4,CONFIG_SYS_FSL_ERRATUM_A004510_SVR_REV2
97 cmpw r3,r4
98 beq 1f
99 #endif
100
101 /* Not a supported revision affected by erratum */
102 li r27,0
103 b 2f
104
105 1: li r27,1 /* Remember for later that we have the erratum */
106 /* Erratum says set bits 55:60 to 001001 */
107 msync
108 isync
109 mfspr r3,SPRN_HDBCR0
110 li r4,0x48
111 rlwimi r3,r4,0,0x1f8
112 mtspr SPRN_HDBCR0,r3
113 isync
114 2:
115 #endif
116 #ifdef CONFIG_SYS_FSL_ERRATUM_A005125
117 msync
118 isync
119 mfspr r3, SPRN_HDBCR0
120 oris r3, r3, 0x0080
121 mtspr SPRN_HDBCR0, r3
122 #endif
123
124
125 #if defined(CONFIG_SECURE_BOOT) && defined(CONFIG_E500MC) && \
126 !defined(CONFIG_E6500)
127 /* ISBC uses L2 as stack.
128 * Disable L2 cache here so that u-boot can enable it later
129 * as part of it's normal flow
130 */
131
132 /* Check if L2 is enabled */
133 mfspr r3, SPRN_L2CSR0
134 lis r2, L2CSR0_L2E@h
135 ori r2, r2, L2CSR0_L2E@l
136 and. r4, r3, r2
137 beq l2_disabled
138
139 mfspr r3, SPRN_L2CSR0
140 /* Flush L2 cache */
141 lis r2,(L2CSR0_L2FL)@h
142 ori r2, r2, (L2CSR0_L2FL)@l
143 or r3, r2, r3
144 sync
145 isync
146 mtspr SPRN_L2CSR0,r3
147 isync
148 1:
149 mfspr r3, SPRN_L2CSR0
150 and. r1, r3, r2
151 bne 1b
152
153 mfspr r3, SPRN_L2CSR0
154 lis r2, L2CSR0_L2E@h
155 ori r2, r2, L2CSR0_L2E@l
156 andc r4, r3, r2
157 sync
158 isync
159 mtspr SPRN_L2CSR0,r4
160 isync
161
162 l2_disabled:
163 #endif
164
165 /* clear registers/arrays not reset by hardware */
166
167 /* L1 */
168 li r0,2
169 mtspr L1CSR0,r0 /* invalidate d-cache */
170 mtspr L1CSR1,r0 /* invalidate i-cache */
171
172 mfspr r1,DBSR
173 mtspr DBSR,r1 /* Clear all valid bits */
174
175
176 .macro create_tlb1_entry esel ts tsize epn wimg rpn perm phy_high scratch
177 lis \scratch, FSL_BOOKE_MAS0(1, \esel, 0)@h
178 ori \scratch, \scratch, FSL_BOOKE_MAS0(1, \esel, 0)@l
179 mtspr MAS0, \scratch
180 lis \scratch, FSL_BOOKE_MAS1(1, 1, 0, \ts, \tsize)@h
181 ori \scratch, \scratch, FSL_BOOKE_MAS1(1, 1, 0, \ts, \tsize)@l
182 mtspr MAS1, \scratch
183 lis \scratch, FSL_BOOKE_MAS2(\epn, \wimg)@h
184 ori \scratch, \scratch, FSL_BOOKE_MAS2(\epn, \wimg)@l
185 mtspr MAS2, \scratch
186 lis \scratch, FSL_BOOKE_MAS3(\rpn, 0, \perm)@h
187 ori \scratch, \scratch, FSL_BOOKE_MAS3(\rpn, 0, \perm)@l
188 mtspr MAS3, \scratch
189 lis \scratch, \phy_high@h
190 ori \scratch, \scratch, \phy_high@l
191 mtspr MAS7, \scratch
192 isync
193 msync
194 tlbwe
195 isync
196 .endm
197
198 .macro create_tlb0_entry esel ts tsize epn wimg rpn perm phy_high scratch
199 lis \scratch, FSL_BOOKE_MAS0(0, \esel, 0)@h
200 ori \scratch, \scratch, FSL_BOOKE_MAS0(0, \esel, 0)@l
201 mtspr MAS0, \scratch
202 lis \scratch, FSL_BOOKE_MAS1(1, 0, 0, \ts, \tsize)@h
203 ori \scratch, \scratch, FSL_BOOKE_MAS1(1, 0, 0, \ts, \tsize)@l
204 mtspr MAS1, \scratch
205 lis \scratch, FSL_BOOKE_MAS2(\epn, \wimg)@h
206 ori \scratch, \scratch, FSL_BOOKE_MAS2(\epn, \wimg)@l
207 mtspr MAS2, \scratch
208 lis \scratch, FSL_BOOKE_MAS3(\rpn, 0, \perm)@h
209 ori \scratch, \scratch, FSL_BOOKE_MAS3(\rpn, 0, \perm)@l
210 mtspr MAS3, \scratch
211 lis \scratch, \phy_high@h
212 ori \scratch, \scratch, \phy_high@l
213 mtspr MAS7, \scratch
214 isync
215 msync
216 tlbwe
217 isync
218 .endm
219
220 .macro delete_tlb1_entry esel scratch
221 lis \scratch, FSL_BOOKE_MAS0(1, \esel, 0)@h
222 ori \scratch, \scratch, FSL_BOOKE_MAS0(1, \esel, 0)@l
223 mtspr MAS0, \scratch
224 li \scratch, 0
225 mtspr MAS1, \scratch
226 isync
227 msync
228 tlbwe
229 isync
230 .endm
231
232 .macro delete_tlb0_entry esel epn wimg scratch
233 lis \scratch, FSL_BOOKE_MAS0(0, \esel, 0)@h
234 ori \scratch, \scratch, FSL_BOOKE_MAS0(0, \esel, 0)@l
235 mtspr MAS0, \scratch
236 li \scratch, 0
237 mtspr MAS1, \scratch
238 lis \scratch, FSL_BOOKE_MAS2(\epn, \wimg)@h
239 ori \scratch, \scratch, FSL_BOOKE_MAS2(\epn, \wimg)@l
240 mtspr MAS2, \scratch
241 isync
242 msync
243 tlbwe
244 isync
245 .endm
246
247 /* Interrupt vectors do not fit in minimal SPL. */
248 #if !defined(MINIMAL_SPL)
249 /* Setup interrupt vectors */
250 lis r1,CONFIG_SYS_MONITOR_BASE@h
251 mtspr IVPR,r1
252
253 lis r3,(CONFIG_SYS_MONITOR_BASE & 0xffff)@h
254 ori r3,r3,(CONFIG_SYS_MONITOR_BASE & 0xffff)@l
255
256 addi r4,r3,CriticalInput - _start + _START_OFFSET
257 mtspr IVOR0,r4 /* 0: Critical input */
258 addi r4,r3,MachineCheck - _start + _START_OFFSET
259 mtspr IVOR1,r4 /* 1: Machine check */
260 addi r4,r3,DataStorage - _start + _START_OFFSET
261 mtspr IVOR2,r4 /* 2: Data storage */
262 addi r4,r3,InstStorage - _start + _START_OFFSET
263 mtspr IVOR3,r4 /* 3: Instruction storage */
264 addi r4,r3,ExtInterrupt - _start + _START_OFFSET
265 mtspr IVOR4,r4 /* 4: External interrupt */
266 addi r4,r3,Alignment - _start + _START_OFFSET
267 mtspr IVOR5,r4 /* 5: Alignment */
268 addi r4,r3,ProgramCheck - _start + _START_OFFSET
269 mtspr IVOR6,r4 /* 6: Program check */
270 addi r4,r3,FPUnavailable - _start + _START_OFFSET
271 mtspr IVOR7,r4 /* 7: floating point unavailable */
272 addi r4,r3,SystemCall - _start + _START_OFFSET
273 mtspr IVOR8,r4 /* 8: System call */
274 /* 9: Auxiliary processor unavailable(unsupported) */
275 addi r4,r3,Decrementer - _start + _START_OFFSET
276 mtspr IVOR10,r4 /* 10: Decrementer */
277 addi r4,r3,IntervalTimer - _start + _START_OFFSET
278 mtspr IVOR11,r4 /* 11: Interval timer */
279 addi r4,r3,WatchdogTimer - _start + _START_OFFSET
280 mtspr IVOR12,r4 /* 12: Watchdog timer */
281 addi r4,r3,DataTLBError - _start + _START_OFFSET
282 mtspr IVOR13,r4 /* 13: Data TLB error */
283 addi r4,r3,InstructionTLBError - _start + _START_OFFSET
284 mtspr IVOR14,r4 /* 14: Instruction TLB error */
285 addi r4,r3,DebugBreakpoint - _start + _START_OFFSET
286 mtspr IVOR15,r4 /* 15: Debug */
287 #endif
288
289 /* Clear and set up some registers. */
290 li r0,0x0000
291 lis r1,0xffff
292 mtspr DEC,r0 /* prevent dec exceptions */
293 mttbl r0 /* prevent fit & wdt exceptions */
294 mttbu r0
295 mtspr TSR,r1 /* clear all timer exception status */
296 mtspr TCR,r0 /* disable all */
297 mtspr ESR,r0 /* clear exception syndrome register */
298 mtspr MCSR,r0 /* machine check syndrome register */
299 mtxer r0 /* clear integer exception register */
300
301 #ifdef CONFIG_SYS_BOOK3E_HV
302 mtspr MAS8,r0 /* make sure MAS8 is clear */
303 #endif
304
305 /* Enable Time Base and Select Time Base Clock */
306 lis r0,HID0_EMCP@h /* Enable machine check */
307 #if defined(CONFIG_ENABLE_36BIT_PHYS)
308 ori r0,r0,HID0_ENMAS7@l /* Enable MAS7 */
309 #endif
310 #ifndef CONFIG_E500MC
311 ori r0,r0,HID0_TBEN@l /* Enable Timebase */
312 #endif
313 mtspr HID0,r0
314
315 #ifndef CONFIG_E500MC
316 li r0,(HID1_ASTME|HID1_ABE)@l /* Addr streaming & broadcast */
317 mfspr r3,PVR
318 andi. r3,r3, 0xff
319 cmpwi r3,0x50@l /* if we are rev 5.0 or greater set MBDD */
320 blt 1f
321 /* Set MBDD bit also */
322 ori r0, r0, HID1_MBDD@l
323 1:
324 mtspr HID1,r0
325 #endif
326
327 #ifdef CONFIG_SYS_FSL_ERRATUM_CPU_A003999
328 mfspr r3,SPRN_HDBCR1
329 oris r3,r3,0x0100
330 mtspr SPRN_HDBCR1,r3
331 #endif
332
333 /* Enable Branch Prediction */
334 #if defined(CONFIG_BTB)
335 lis r0,BUCSR_ENABLE@h
336 ori r0,r0,BUCSR_ENABLE@l
337 mtspr SPRN_BUCSR,r0
338 #endif
339
340 #if defined(CONFIG_SYS_INIT_DBCR)
341 lis r1,0xffff
342 ori r1,r1,0xffff
343 mtspr DBSR,r1 /* Clear all status bits */
344 lis r0,CONFIG_SYS_INIT_DBCR@h /* DBCR0[IDM] must be set */
345 ori r0,r0,CONFIG_SYS_INIT_DBCR@l
346 mtspr DBCR0,r0
347 #endif
348
349 #ifdef CONFIG_MPC8569
350 #define CONFIG_SYS_LBC_ADDR (CONFIG_SYS_CCSRBAR_DEFAULT + 0x5000)
351 #define CONFIG_SYS_LBCR_ADDR (CONFIG_SYS_LBC_ADDR + 0xd0)
352
353 /* MPC8569 Rev.0 silcon needs to set bit 13 of LBCR to allow elBC to
354 * use address space which is more than 12bits, and it must be done in
355 * the 4K boot page. So we set this bit here.
356 */
357
358 /* create a temp mapping TLB0[0] for LBCR */
359 create_tlb0_entry 0, \
360 0, BOOKE_PAGESZ_4K, \
361 CONFIG_SYS_LBC_ADDR, MAS2_I|MAS2_G, \
362 CONFIG_SYS_LBC_ADDR, MAS3_SW|MAS3_SR, \
363 0, r6
364
365 /* Set LBCR register */
366 lis r4,CONFIG_SYS_LBCR_ADDR@h
367 ori r4,r4,CONFIG_SYS_LBCR_ADDR@l
368
369 lis r5,CONFIG_SYS_LBC_LBCR@h
370 ori r5,r5,CONFIG_SYS_LBC_LBCR@l
371 stw r5,0(r4)
372 isync
373
374 /* invalidate this temp TLB */
375 lis r4,CONFIG_SYS_LBC_ADDR@h
376 ori r4,r4,CONFIG_SYS_LBC_ADDR@l
377 tlbivax 0,r4
378 isync
379
380 #endif /* CONFIG_MPC8569 */
381
382 /*
383 * Search for the TLB that covers the code we're executing, and shrink it
384 * so that it covers only this 4K page. That will ensure that any other
385 * TLB we create won't interfere with it. We assume that the TLB exists,
386 * which is why we don't check the Valid bit of MAS1. We also assume
387 * it is in TLB1.
388 *
389 * This is necessary, for example, when booting from the on-chip ROM,
390 * which (oddly) creates a single 4GB TLB that covers CCSR and DDR.
391 */
392 bl nexti /* Find our address */
393 nexti: mflr r1 /* R1 = our PC */
394 li r2, 0
395 mtspr MAS6, r2 /* Assume the current PID and AS are 0 */
396 isync
397 msync
398 tlbsx 0, r1 /* This must succeed */
399
400 mfspr r14, MAS0 /* Save ESEL for later */
401 rlwinm r14, r14, 16, 0xfff
402
403 /* Set the size of the TLB to 4KB */
404 mfspr r3, MAS1
405 li r2, 0xF80
406 andc r3, r3, r2 /* Clear the TSIZE bits */
407 ori r3, r3, MAS1_TSIZE(BOOKE_PAGESZ_4K)@l
408 oris r3, r3, MAS1_IPROT@h
409 mtspr MAS1, r3
410
411 /*
412 * Set the base address of the TLB to our PC. We assume that
413 * virtual == physical. We also assume that MAS2_EPN == MAS3_RPN.
414 */
415 lis r3, MAS2_EPN@h
416 ori r3, r3, MAS2_EPN@l /* R3 = MAS2_EPN */
417
418 and r1, r1, r3 /* Our PC, rounded down to the nearest page */
419
420 mfspr r2, MAS2
421 andc r2, r2, r3
422 or r2, r2, r1
423 #ifdef CONFIG_SYS_FSL_ERRATUM_A004510
424 cmpwi r27,0
425 beq 1f
426 andi. r15, r2, MAS2_I|MAS2_G /* save the old I/G for later */
427 rlwinm r2, r2, 0, ~MAS2_I
428 ori r2, r2, MAS2_G
429 1:
430 #endif
431 mtspr MAS2, r2 /* Set the EPN to our PC base address */
432
433 mfspr r2, MAS3
434 andc r2, r2, r3
435 or r2, r2, r1
436 mtspr MAS3, r2 /* Set the RPN to our PC base address */
437
438 isync
439 msync
440 tlbwe
441
442 /*
443 * Clear out any other TLB entries that may exist, to avoid conflicts.
444 * Our TLB entry is in r14.
445 */
446 li r0, TLBIVAX_ALL | TLBIVAX_TLB0
447 tlbivax 0, r0
448 tlbsync
449
450 mfspr r4, SPRN_TLB1CFG
451 rlwinm r4, r4, 0, TLBnCFG_NENTRY_MASK
452
453 li r3, 0
454 mtspr MAS1, r3
455 1: cmpw r3, r14
456 rlwinm r5, r3, 16, MAS0_ESEL_MSK
457 addi r3, r3, 1
458 beq 2f /* skip the entry we're executing from */
459
460 oris r5, r5, MAS0_TLBSEL(1)@h
461 mtspr MAS0, r5
462
463 isync
464 tlbwe
465 isync
466 msync
467
468 2: cmpw r3, r4
469 blt 1b
470
471 #if defined(CONFIG_SYS_PPC_E500_DEBUG_TLB) && !defined(MINIMAL_SPL) && \
472 !defined(CONFIG_SECURE_BOOT)
473 /*
474 * TLB entry for debuggging in AS1
475 * Create temporary TLB entry in AS0 to handle debug exception
476 * As on debug exception MSR is cleared i.e. Address space is changed
477 * to 0. A TLB entry (in AS0) is required to handle debug exception generated
478 * in AS1.
479 */
480
481 #ifdef NOR_BOOT
482 /*
483 * TLB entry is created for IVPR + IVOR15 to map on valid OP code address
484 * bacause flash's virtual address maps to 0xff800000 - 0xffffffff.
485 * and this window is outside of 4K boot window.
486 */
487 create_tlb1_entry CONFIG_SYS_PPC_E500_DEBUG_TLB, \
488 0, BOOKE_PAGESZ_4M, \
489 CONFIG_SYS_MONITOR_BASE & 0xffc00000, MAS2_I|MAS2_G, \
490 0xffc00000, MAS3_SX|MAS3_SW|MAS3_SR, \
491 0, r6
492
493 #else
494 /*
495 * TLB entry is created for IVPR + IVOR15 to map on valid OP code address
496 * because "nexti" will resize TLB to 4K
497 */
498 create_tlb1_entry CONFIG_SYS_PPC_E500_DEBUG_TLB, \
499 0, BOOKE_PAGESZ_256K, \
500 CONFIG_SYS_MONITOR_BASE & 0xfffc0000, MAS2_I, \
501 CONFIG_SYS_MONITOR_BASE & 0xfffc0000, MAS3_SX|MAS3_SW|MAS3_SR, \
502 0, r6
503 #endif
504 #endif
505
506 /*
507 * Relocate CCSR, if necessary. We relocate CCSR if (obviously) the default
508 * location is not where we want it. This typically happens on a 36-bit
509 * system, where we want to move CCSR to near the top of 36-bit address space.
510 *
511 * To move CCSR, we create two temporary TLBs, one for the old location, and
512 * another for the new location. On CoreNet systems, we also need to create
513 * a special, temporary LAW.
514 *
515 * As a general rule, TLB0 is used for short-term TLBs, and TLB1 is used for
516 * long-term TLBs, so we use TLB0 here.
517 */
518 #if (CONFIG_SYS_CCSRBAR_DEFAULT != CONFIG_SYS_CCSRBAR_PHYS)
519
520 #if !defined(CONFIG_SYS_CCSRBAR_PHYS_HIGH) || !defined(CONFIG_SYS_CCSRBAR_PHYS_LOW)
521 #error "CONFIG_SYS_CCSRBAR_PHYS_HIGH and CONFIG_SYS_CCSRBAR_PHYS_LOW) must be defined."
522 #endif
523
524 create_ccsr_new_tlb:
525 /*
526 * Create a TLB for the new location of CCSR. Register R8 is reserved
527 * for the virtual address of this TLB (CONFIG_SYS_CCSRBAR).
528 */
529 lis r8, CONFIG_SYS_CCSRBAR@h
530 ori r8, r8, CONFIG_SYS_CCSRBAR@l
531 lis r9, (CONFIG_SYS_CCSRBAR + 0x1000)@h
532 ori r9, r9, (CONFIG_SYS_CCSRBAR + 0x1000)@l
533 create_tlb0_entry 0, \
534 0, BOOKE_PAGESZ_4K, \
535 CONFIG_SYS_CCSRBAR, MAS2_I|MAS2_G, \
536 CONFIG_SYS_CCSRBAR_PHYS_LOW, MAS3_SW|MAS3_SR, \
537 CONFIG_SYS_CCSRBAR_PHYS_HIGH, r3
538 /*
539 * Create a TLB for the current location of CCSR. Register R9 is reserved
540 * for the virtual address of this TLB (CONFIG_SYS_CCSRBAR + 0x1000).
541 */
542 create_ccsr_old_tlb:
543 create_tlb0_entry 1, \
544 0, BOOKE_PAGESZ_4K, \
545 CONFIG_SYS_CCSRBAR + 0x1000, MAS2_I|MAS2_G, \
546 CONFIG_SYS_CCSRBAR_DEFAULT, MAS3_SW|MAS3_SR, \
547 0, r3 /* The default CCSR address is always a 32-bit number */
548
549
550 /*
551 * We have a TLB for what we think is the current (old) CCSR. Let's
552 * verify that, otherwise we won't be able to move it.
553 * CONFIG_SYS_CCSRBAR_DEFAULT is always a 32-bit number, so we only
554 * need to compare the lower 32 bits of CCSRBAR on CoreNet systems.
555 */
556 verify_old_ccsr:
557 lis r0, CONFIG_SYS_CCSRBAR_DEFAULT@h
558 ori r0, r0, CONFIG_SYS_CCSRBAR_DEFAULT@l
559 #ifdef CONFIG_FSL_CORENET
560 lwz r1, 4(r9) /* CCSRBARL */
561 #else
562 lwz r1, 0(r9) /* CCSRBAR, shifted right by 12 */
563 slwi r1, r1, 12
564 #endif
565
566 cmpl 0, r0, r1
567
568 /*
569 * If the value we read from CCSRBARL is not what we expect, then
570 * enter an infinite loop. This will at least allow a debugger to
571 * halt execution and examine TLBs, etc. There's no point in going
572 * on.
573 */
574 infinite_debug_loop:
575 bne infinite_debug_loop
576
577 #ifdef CONFIG_FSL_CORENET
578
579 #define CCSR_LAWBARH0 (CONFIG_SYS_CCSRBAR + 0x1000)
580 #define LAW_EN 0x80000000
581 #define LAW_SIZE_4K 0xb
582 #define CCSRBAR_LAWAR (LAW_EN | (0x1e << 20) | LAW_SIZE_4K)
583 #define CCSRAR_C 0x80000000 /* Commit */
584
585 create_temp_law:
586 /*
587 * On CoreNet systems, we create the temporary LAW using a special LAW
588 * target ID of 0x1e. LAWBARH is at offset 0xc00 in CCSR.
589 */
590 lis r0, CONFIG_SYS_CCSRBAR_PHYS_HIGH@h
591 ori r0, r0, CONFIG_SYS_CCSRBAR_PHYS_HIGH@l
592 lis r1, CONFIG_SYS_CCSRBAR_PHYS_LOW@h
593 ori r1, r1, CONFIG_SYS_CCSRBAR_PHYS_LOW@l
594 lis r2, CCSRBAR_LAWAR@h
595 ori r2, r2, CCSRBAR_LAWAR@l
596
597 stw r0, 0xc00(r9) /* LAWBARH0 */
598 stw r1, 0xc04(r9) /* LAWBARL0 */
599 sync
600 stw r2, 0xc08(r9) /* LAWAR0 */
601
602 /*
603 * Read back from LAWAR to ensure the update is complete. e500mc
604 * cores also require an isync.
605 */
606 lwz r0, 0xc08(r9) /* LAWAR0 */
607 isync
608
609 /*
610 * Read the current CCSRBARH and CCSRBARL using load word instructions.
611 * Follow this with an isync instruction. This forces any outstanding
612 * accesses to configuration space to completion.
613 */
614 read_old_ccsrbar:
615 lwz r0, 0(r9) /* CCSRBARH */
616 lwz r0, 4(r9) /* CCSRBARL */
617 isync
618
619 /*
620 * Write the new values for CCSRBARH and CCSRBARL to their old
621 * locations. The CCSRBARH has a shadow register. When the CCSRBARH
622 * has a new value written it loads a CCSRBARH shadow register. When
623 * the CCSRBARL is written, the CCSRBARH shadow register contents
624 * along with the CCSRBARL value are loaded into the CCSRBARH and
625 * CCSRBARL registers, respectively. Follow this with a sync
626 * instruction.
627 */
628 write_new_ccsrbar:
629 lis r0, CONFIG_SYS_CCSRBAR_PHYS_HIGH@h
630 ori r0, r0, CONFIG_SYS_CCSRBAR_PHYS_HIGH@l
631 lis r1, CONFIG_SYS_CCSRBAR_PHYS_LOW@h
632 ori r1, r1, CONFIG_SYS_CCSRBAR_PHYS_LOW@l
633 lis r2, CCSRAR_C@h
634 ori r2, r2, CCSRAR_C@l
635
636 stw r0, 0(r9) /* Write to CCSRBARH */
637 sync /* Make sure we write to CCSRBARH first */
638 stw r1, 4(r9) /* Write to CCSRBARL */
639 sync
640
641 /*
642 * Write a 1 to the commit bit (C) of CCSRAR at the old location.
643 * Follow this with a sync instruction.
644 */
645 stw r2, 8(r9)
646 sync
647
648 /* Delete the temporary LAW */
649 delete_temp_law:
650 li r1, 0
651 stw r1, 0xc08(r8)
652 sync
653 stw r1, 0xc00(r8)
654 stw r1, 0xc04(r8)
655 sync
656
657 #else /* #ifdef CONFIG_FSL_CORENET */
658
659 write_new_ccsrbar:
660 /*
661 * Read the current value of CCSRBAR using a load word instruction
662 * followed by an isync. This forces all accesses to configuration
663 * space to complete.
664 */
665 sync
666 lwz r0, 0(r9)
667 isync
668
669 /* CONFIG_SYS_CCSRBAR_PHYS right shifted by 12 */
670 #define CCSRBAR_PHYS_RS12 ((CONFIG_SYS_CCSRBAR_PHYS_HIGH << 20) | \
671 (CONFIG_SYS_CCSRBAR_PHYS_LOW >> 12))
672
673 /* Write the new value to CCSRBAR. */
674 lis r0, CCSRBAR_PHYS_RS12@h
675 ori r0, r0, CCSRBAR_PHYS_RS12@l
676 stw r0, 0(r9)
677 sync
678
679 /*
680 * The manual says to perform a load of an address that does not
681 * access configuration space or the on-chip SRAM using an existing TLB,
682 * but that doesn't appear to be necessary. We will do the isync,
683 * though.
684 */
685 isync
686
687 /*
688 * Read the contents of CCSRBAR from its new location, followed by
689 * another isync.
690 */
691 lwz r0, 0(r8)
692 isync
693
694 #endif /* #ifdef CONFIG_FSL_CORENET */
695
696 /* Delete the temporary TLBs */
697 delete_temp_tlbs:
698 delete_tlb0_entry 0, CONFIG_SYS_CCSRBAR, MAS2_I|MAS2_G, r3
699 delete_tlb0_entry 1, CONFIG_SYS_CCSRBAR + 0x1000, MAS2_I|MAS2_G, r3
700
701 #endif /* #if (CONFIG_SYS_CCSRBAR_DEFAULT != CONFIG_SYS_CCSRBAR_PHYS) */
702
703 #if defined(CONFIG_SYS_FSL_QORIQ_CHASSIS2) && defined(CONFIG_E6500)
704 create_ccsr_l2_tlb:
705 /*
706 * Create a TLB for the MMR location of CCSR
707 * to access L2CSR0 register
708 */
709 create_tlb0_entry 0, \
710 0, BOOKE_PAGESZ_4K, \
711 CONFIG_SYS_CCSRBAR + 0xC20000, MAS2_I|MAS2_G, \
712 CONFIG_SYS_CCSRBAR_PHYS_LOW + 0xC20000, MAS3_SW|MAS3_SR, \
713 CONFIG_SYS_CCSRBAR_PHYS_HIGH, r3
714
715 enable_l2_cluster_l2:
716 /* enable L2 cache */
717 lis r3, (CONFIG_SYS_CCSRBAR + 0xC20000)@h
718 ori r3, r3, (CONFIG_SYS_CCSRBAR + 0xC20000)@l
719 li r4, 33 /* stash id */
720 stw r4, 4(r3)
721 lis r4, (L2CSR0_L2FI|L2CSR0_L2LFC)@h
722 ori r4, r4, (L2CSR0_L2FI|L2CSR0_L2LFC)@l
723 sync
724 stw r4, 0(r3) /* invalidate L2 */
725 1: sync
726 lwz r0, 0(r3)
727 twi 0, r0, 0
728 isync
729 and. r1, r0, r4
730 bne 1b
731 lis r4, (L2CSR0_L2E|L2CSR0_L2PE)@h
732 ori r4, r4, (L2CSR0_L2REP_MODE)@l
733 sync
734 stw r4, 0(r3) /* enable L2 */
735 delete_ccsr_l2_tlb:
736 delete_tlb0_entry 0, CONFIG_SYS_CCSRBAR + 0xC20000, MAS2_I|MAS2_G, r3
737 #endif
738
739 /*
740 * Enable the L1. On e6500, this has to be done
741 * after the L2 is up.
742 */
743
744 #ifdef CONFIG_SYS_CACHE_STASHING
745 /* set stash id to (coreID) * 2 + 32 + L1 CT (0) */
746 li r2,(32 + 0)
747 mtspr L1CSR2,r2
748 #endif
749
750 /* Enable/invalidate the I-Cache */
751 lis r2,(L1CSR1_ICFI|L1CSR1_ICLFR)@h
752 ori r2,r2,(L1CSR1_ICFI|L1CSR1_ICLFR)@l
753 mtspr SPRN_L1CSR1,r2
754 1:
755 mfspr r3,SPRN_L1CSR1
756 and. r1,r3,r2
757 bne 1b
758
759 lis r3,(L1CSR1_CPE|L1CSR1_ICE)@h
760 ori r3,r3,(L1CSR1_CPE|L1CSR1_ICE)@l
761 mtspr SPRN_L1CSR1,r3
762 isync
763 2:
764 mfspr r3,SPRN_L1CSR1
765 andi. r1,r3,L1CSR1_ICE@l
766 beq 2b
767
768 /* Enable/invalidate the D-Cache */
769 lis r2,(L1CSR0_DCFI|L1CSR0_DCLFR)@h
770 ori r2,r2,(L1CSR0_DCFI|L1CSR0_DCLFR)@l
771 mtspr SPRN_L1CSR0,r2
772 1:
773 mfspr r3,SPRN_L1CSR0
774 and. r1,r3,r2
775 bne 1b
776
777 lis r3,(L1CSR0_CPE|L1CSR0_DCE)@h
778 ori r3,r3,(L1CSR0_CPE|L1CSR0_DCE)@l
779 mtspr SPRN_L1CSR0,r3
780 isync
781 2:
782 mfspr r3,SPRN_L1CSR0
783 andi. r1,r3,L1CSR0_DCE@l
784 beq 2b
785 #ifdef CONFIG_SYS_FSL_ERRATUM_A004510
786 #define DCSR_LAWBARH0 (CONFIG_SYS_CCSRBAR + 0x1000)
787 #define LAW_SIZE_1M 0x13
788 #define DCSRBAR_LAWAR (LAW_EN | (0x1d << 20) | LAW_SIZE_1M)
789
790 cmpwi r27,0
791 beq 9f
792
793 /*
794 * Create a TLB entry for CCSR
795 *
796 * We're executing out of TLB1 entry in r14, and that's the only
797 * TLB entry that exists. To allocate some TLB entries for our
798 * own use, flip a bit high enough that we won't flip it again
799 * via incrementing.
800 */
801
802 xori r8, r14, 32
803 lis r0, MAS0_TLBSEL(1)@h
804 rlwimi r0, r8, 16, MAS0_ESEL_MSK
805 lis r1, FSL_BOOKE_MAS1(1, 1, 0, 0, BOOKE_PAGESZ_16M)@h
806 ori r1, r1, FSL_BOOKE_MAS1(1, 1, 0, 0, BOOKE_PAGESZ_16M)@l
807 lis r7, CONFIG_SYS_CCSRBAR@h
808 ori r7, r7, CONFIG_SYS_CCSRBAR@l
809 ori r2, r7, MAS2_I|MAS2_G
810 lis r3, FSL_BOOKE_MAS3(CONFIG_SYS_CCSRBAR_PHYS_LOW, 0, (MAS3_SW|MAS3_SR))@h
811 ori r3, r3, FSL_BOOKE_MAS3(CONFIG_SYS_CCSRBAR_PHYS_LOW, 0, (MAS3_SW|MAS3_SR))@l
812 lis r4, CONFIG_SYS_CCSRBAR_PHYS_HIGH@h
813 ori r4, r4, CONFIG_SYS_CCSRBAR_PHYS_HIGH@l
814 mtspr MAS0, r0
815 mtspr MAS1, r1
816 mtspr MAS2, r2
817 mtspr MAS3, r3
818 mtspr MAS7, r4
819 isync
820 tlbwe
821 isync
822 msync
823
824 /* Map DCSR temporarily to physical address zero */
825 li r0, 0
826 lis r3, DCSRBAR_LAWAR@h
827 ori r3, r3, DCSRBAR_LAWAR@l
828
829 stw r0, 0xc00(r7) /* LAWBARH0 */
830 stw r0, 0xc04(r7) /* LAWBARL0 */
831 sync
832 stw r3, 0xc08(r7) /* LAWAR0 */
833
834 /* Read back from LAWAR to ensure the update is complete. */
835 lwz r3, 0xc08(r7) /* LAWAR0 */
836 isync
837
838 /* Create a TLB entry for DCSR at zero */
839
840 addi r9, r8, 1
841 lis r0, MAS0_TLBSEL(1)@h
842 rlwimi r0, r9, 16, MAS0_ESEL_MSK
843 lis r1, FSL_BOOKE_MAS1(1, 1, 0, 0, BOOKE_PAGESZ_1M)@h
844 ori r1, r1, FSL_BOOKE_MAS1(1, 1, 0, 0, BOOKE_PAGESZ_1M)@l
845 li r6, 0 /* DCSR effective address */
846 ori r2, r6, MAS2_I|MAS2_G
847 li r3, MAS3_SW|MAS3_SR
848 li r4, 0
849 mtspr MAS0, r0
850 mtspr MAS1, r1
851 mtspr MAS2, r2
852 mtspr MAS3, r3
853 mtspr MAS7, r4
854 isync
855 tlbwe
856 isync
857 msync
858
859 /* enable the timebase */
860 #define CTBENR 0xe2084
861 li r3, 1
862 addis r4, r7, CTBENR@ha
863 stw r3, CTBENR@l(r4)
864 lwz r3, CTBENR@l(r4)
865 twi 0,r3,0
866 isync
867
868 .macro erratum_set_ccsr offset value
869 addis r3, r7, \offset@ha
870 lis r4, \value@h
871 addi r3, r3, \offset@l
872 ori r4, r4, \value@l
873 bl erratum_set_value
874 .endm
875
876 .macro erratum_set_dcsr offset value
877 addis r3, r6, \offset@ha
878 lis r4, \value@h
879 addi r3, r3, \offset@l
880 ori r4, r4, \value@l
881 bl erratum_set_value
882 .endm
883
884 erratum_set_dcsr 0xb0e08 0xe0201800
885 erratum_set_dcsr 0xb0e18 0xe0201800
886 erratum_set_dcsr 0xb0e38 0xe0400000
887 erratum_set_dcsr 0xb0008 0x00900000
888 erratum_set_dcsr 0xb0e40 0xe00a0000
889 erratum_set_ccsr 0x18600 CONFIG_SYS_FSL_CORENET_SNOOPVEC_COREONLY
890 #ifdef CONFIG_RAMBOOT_PBL
891 erratum_set_ccsr 0x10f00 0x495e5000
892 #else
893 erratum_set_ccsr 0x10f00 0x415e5000
894 #endif
895 erratum_set_ccsr 0x11f00 0x415e5000
896
897 /* Make temp mapping uncacheable again, if it was initially */
898 bl 2f
899 2: mflr r3
900 tlbsx 0, r3
901 mfspr r4, MAS2
902 rlwimi r4, r15, 0, MAS2_I
903 rlwimi r4, r15, 0, MAS2_G
904 mtspr MAS2, r4
905 isync
906 tlbwe
907 isync
908 msync
909
910 /* Clear the cache */
911 lis r3,(L1CSR1_ICFI|L1CSR1_ICLFR)@h
912 ori r3,r3,(L1CSR1_ICFI|L1CSR1_ICLFR)@l
913 sync
914 isync
915 mtspr SPRN_L1CSR1,r3
916 isync
917 2: sync
918 mfspr r4,SPRN_L1CSR1
919 and. r4,r4,r3
920 bne 2b
921
922 lis r3,(L1CSR1_CPE|L1CSR1_ICE)@h
923 ori r3,r3,(L1CSR1_CPE|L1CSR1_ICE)@l
924 sync
925 isync
926 mtspr SPRN_L1CSR1,r3
927 isync
928 2: sync
929 mfspr r4,SPRN_L1CSR1
930 and. r4,r4,r3
931 beq 2b
932
933 /* Remove temporary mappings */
934 lis r0, MAS0_TLBSEL(1)@h
935 rlwimi r0, r9, 16, MAS0_ESEL_MSK
936 li r3, 0
937 mtspr MAS0, r0
938 mtspr MAS1, r3
939 isync
940 tlbwe
941 isync
942 msync
943
944 li r3, 0
945 stw r3, 0xc08(r7) /* LAWAR0 */
946 lwz r3, 0xc08(r7)
947 isync
948
949 lis r0, MAS0_TLBSEL(1)@h
950 rlwimi r0, r8, 16, MAS0_ESEL_MSK
951 li r3, 0
952 mtspr MAS0, r0
953 mtspr MAS1, r3
954 isync
955 tlbwe
956 isync
957 msync
958
959 b 9f
960
961 /* r3 = addr, r4 = value, clobbers r5, r11, r12 */
962 erratum_set_value:
963 /* Lock two cache lines into I-Cache */
964 sync
965 mfspr r11, SPRN_L1CSR1
966 rlwinm r11, r11, 0, ~L1CSR1_ICUL
967 sync
968 isync
969 mtspr SPRN_L1CSR1, r11
970 isync
971
972 mflr r12
973 bl 5f
974 5: mflr r5
975 addi r5, r5, 2f - 5b
976 icbtls 0, 0, r5
977 addi r5, r5, 64
978
979 sync
980 mfspr r11, SPRN_L1CSR1
981 3: andi. r11, r11, L1CSR1_ICUL
982 bne 3b
983
984 icbtls 0, 0, r5
985 addi r5, r5, 64
986
987 sync
988 mfspr r11, SPRN_L1CSR1
989 3: andi. r11, r11, L1CSR1_ICUL
990 bne 3b
991
992 b 2f
993 .align 6
994 /* Inside a locked cacheline, wait a while, write, then wait a while */
995 2: sync
996
997 mfspr r5, SPRN_TBRL
998 addis r11, r5, 0x10000@h /* wait 65536 timebase ticks */
999 4: mfspr r5, SPRN_TBRL
1000 subf. r5, r5, r11
1001 bgt 4b
1002
1003 stw r4, 0(r3)
1004
1005 mfspr r5, SPRN_TBRL
1006 addis r11, r5, 0x10000@h /* wait 65536 timebase ticks */
1007 4: mfspr r5, SPRN_TBRL
1008 subf. r5, r5, r11
1009 bgt 4b
1010
1011 sync
1012
1013 /*
1014 * Fill out the rest of this cache line and the next with nops,
1015 * to ensure that nothing outside the locked area will be
1016 * fetched due to a branch.
1017 */
1018 .rept 19
1019 nop
1020 .endr
1021
1022 sync
1023 mfspr r11, SPRN_L1CSR1
1024 rlwinm r11, r11, 0, ~L1CSR1_ICUL
1025 sync
1026 isync
1027 mtspr SPRN_L1CSR1, r11
1028 isync
1029
1030 mtlr r12
1031 blr
1032
1033 9:
1034 #endif
1035
1036 create_init_ram_area:
1037 lis r6,FSL_BOOKE_MAS0(1, 15, 0)@h
1038 ori r6,r6,FSL_BOOKE_MAS0(1, 15, 0)@l
1039
1040 #ifdef NOR_BOOT
1041 /* create a temp mapping in AS=1 to the 4M boot window */
1042 create_tlb1_entry 15, \
1043 1, BOOKE_PAGESZ_4M, \
1044 CONFIG_SYS_MONITOR_BASE & 0xffc00000, MAS2_I|MAS2_G, \
1045 0xffc00000, MAS3_SX|MAS3_SW|MAS3_SR, \
1046 0, r6
1047
1048 #elif !defined(CONFIG_SYS_RAMBOOT) && defined(CONFIG_SECURE_BOOT)
1049 /* create a temp mapping in AS = 1 for Flash mapping
1050 * created by PBL for ISBC code
1051 */
1052 create_tlb1_entry 15, \
1053 1, BOOKE_PAGESZ_1M, \
1054 CONFIG_SYS_MONITOR_BASE & 0xfff00000, MAS2_I|MAS2_G, \
1055 CONFIG_SYS_PBI_FLASH_WINDOW & 0xfff00000, MAS3_SX|MAS3_SW|MAS3_SR, \
1056 0, r6
1057 #else
1058 /*
1059 * create a temp mapping in AS=1 to the 1M CONFIG_SYS_MONITOR_BASE space, the main
1060 * image has been relocated to CONFIG_SYS_MONITOR_BASE on the second stage.
1061 */
1062 create_tlb1_entry 15, \
1063 1, BOOKE_PAGESZ_1M, \
1064 CONFIG_SYS_MONITOR_BASE & 0xfff00000, MAS2_I|MAS2_G, \
1065 CONFIG_SYS_MONITOR_BASE & 0xfff00000, MAS3_SX|MAS3_SW|MAS3_SR, \
1066 0, r6
1067 #endif
1068
1069 /* create a temp mapping in AS=1 to the stack */
1070 #if defined(CONFIG_SYS_INIT_RAM_ADDR_PHYS_LOW) && \
1071 defined(CONFIG_SYS_INIT_RAM_ADDR_PHYS_HIGH)
1072 create_tlb1_entry 14, \
1073 1, BOOKE_PAGESZ_16K, \
1074 CONFIG_SYS_INIT_RAM_ADDR, 0, \
1075 CONFIG_SYS_INIT_RAM_ADDR_PHYS_LOW, MAS3_SX|MAS3_SW|MAS3_SR, \
1076 CONFIG_SYS_INIT_RAM_ADDR_PHYS_HIGH, r6
1077
1078 #else
1079 create_tlb1_entry 14, \
1080 1, BOOKE_PAGESZ_16K, \
1081 CONFIG_SYS_INIT_RAM_ADDR, 0, \
1082 CONFIG_SYS_INIT_RAM_ADDR, MAS3_SX|MAS3_SW|MAS3_SR, \
1083 0, r6
1084 #endif
1085
1086 lis r6,MSR_IS|MSR_DS|MSR_DE@h
1087 ori r6,r6,MSR_IS|MSR_DS|MSR_DE@l
1088 lis r7,switch_as@h
1089 ori r7,r7,switch_as@l
1090
1091 mtspr SPRN_SRR0,r7
1092 mtspr SPRN_SRR1,r6
1093 rfi
1094
1095 switch_as:
1096 /* L1 DCache is used for initial RAM */
1097
1098 /* Allocate Initial RAM in data cache.
1099 */
1100 lis r3,CONFIG_SYS_INIT_RAM_ADDR@h
1101 ori r3,r3,CONFIG_SYS_INIT_RAM_ADDR@l
1102 mfspr r2, L1CFG0
1103 andi. r2, r2, 0x1ff
1104 /* cache size * 1024 / (2 * L1 line size) */
1105 slwi r2, r2, (10 - 1 - L1_CACHE_SHIFT)
1106 mtctr r2
1107 li r0,0
1108 1:
1109 dcbz r0,r3
1110 dcbtls 0,r0,r3
1111 addi r3,r3,CONFIG_SYS_CACHELINE_SIZE
1112 bdnz 1b
1113
1114 /* Jump out the last 4K page and continue to 'normal' start */
1115 #if defined(CONFIG_SYS_RAMBOOT) || defined(CONFIG_SPL)
1116 /* We assume that we're already running at the address we're linked at */
1117 b _start_cont
1118 #else
1119 /* Calculate absolute address in FLASH and jump there */
1120 /*--------------------------------------------------------------*/
1121 lis r3,CONFIG_SYS_MONITOR_BASE@h
1122 ori r3,r3,CONFIG_SYS_MONITOR_BASE@l
1123 addi r3,r3,_start_cont - _start + _START_OFFSET
1124 mtlr r3
1125 blr
1126 #endif
1127
1128 .text
1129 .globl _start
1130 _start:
1131 .long 0x27051956 /* U-BOOT Magic Number */
1132 .globl version_string
1133 version_string:
1134 .ascii U_BOOT_VERSION_STRING, "\0"
1135
1136 .align 4
1137 .globl _start_cont
1138 _start_cont:
1139 /* Setup the stack in initial RAM,could be L2-as-SRAM or L1 dcache*/
1140 lis r3,(CONFIG_SYS_INIT_RAM_ADDR)@h
1141 ori r3,r3,((CONFIG_SYS_INIT_SP_OFFSET-16)&~0xf)@l /* Align to 16 */
1142 li r0,0
1143 stw r0,0(r3) /* Terminate Back Chain */
1144 stw r0,+4(r3) /* NULL return address. */
1145 mr r1,r3 /* Transfer to SP(r1) */
1146
1147 GET_GOT
1148
1149 /* Pass our potential ePAPR device tree pointer to cpu_init_early_f */
1150 mr r3, r24
1151
1152 bl cpu_init_early_f
1153
1154 /* switch back to AS = 0 */
1155 lis r3,(MSR_CE|MSR_ME|MSR_DE)@h
1156 ori r3,r3,(MSR_CE|MSR_ME|MSR_DE)@l
1157 mtmsr r3
1158 isync
1159
1160 bl cpu_init_f
1161 bl board_init_f
1162 isync
1163
1164 /* NOTREACHED - board_init_f() does not return */
1165
1166 #ifndef MINIMAL_SPL
1167 . = EXC_OFF_SYS_RESET
1168 .globl _start_of_vectors
1169 _start_of_vectors:
1170
1171 /* Critical input. */
1172 CRIT_EXCEPTION(0x0100, CriticalInput, CritcalInputException)
1173
1174 /* Machine check */
1175 MCK_EXCEPTION(0x200, MachineCheck, MachineCheckException)
1176
1177 /* Data Storage exception. */
1178 STD_EXCEPTION(0x0300, DataStorage, UnknownException)
1179
1180 /* Instruction Storage exception. */
1181 STD_EXCEPTION(0x0400, InstStorage, UnknownException)
1182
1183 /* External Interrupt exception. */
1184 STD_EXCEPTION(0x0500, ExtInterrupt, ExtIntException)
1185
1186 /* Alignment exception. */
1187 . = 0x0600
1188 Alignment:
1189 EXCEPTION_PROLOG(SRR0, SRR1)
1190 mfspr r4,DAR
1191 stw r4,_DAR(r21)
1192 mfspr r5,DSISR
1193 stw r5,_DSISR(r21)
1194 addi r3,r1,STACK_FRAME_OVERHEAD
1195 EXC_XFER_TEMPLATE(Alignment, AlignmentException, MSR_KERNEL, COPY_EE)
1196
1197 /* Program check exception */
1198 . = 0x0700
1199 ProgramCheck:
1200 EXCEPTION_PROLOG(SRR0, SRR1)
1201 addi r3,r1,STACK_FRAME_OVERHEAD
1202 EXC_XFER_TEMPLATE(ProgramCheck, ProgramCheckException,
1203 MSR_KERNEL, COPY_EE)
1204
1205 /* No FPU on MPC85xx. This exception is not supposed to happen.
1206 */
1207 STD_EXCEPTION(0x0800, FPUnavailable, UnknownException)
1208
1209 . = 0x0900
1210 /*
1211 * r0 - SYSCALL number
1212 * r3-... arguments
1213 */
1214 SystemCall:
1215 addis r11,r0,0 /* get functions table addr */
1216 ori r11,r11,0 /* Note: this code is patched in trap_init */
1217 addis r12,r0,0 /* get number of functions */
1218 ori r12,r12,0
1219
1220 cmplw 0,r0,r12
1221 bge 1f
1222
1223 rlwinm r0,r0,2,0,31 /* fn_addr = fn_tbl[r0] */
1224 add r11,r11,r0
1225 lwz r11,0(r11)
1226
1227 li r20,0xd00-4 /* Get stack pointer */
1228 lwz r12,0(r20)
1229 subi r12,r12,12 /* Adjust stack pointer */
1230 li r0,0xc00+_end_back-SystemCall
1231 cmplw 0,r0,r12 /* Check stack overflow */
1232 bgt 1f
1233 stw r12,0(r20)
1234
1235 mflr r0
1236 stw r0,0(r12)
1237 mfspr r0,SRR0
1238 stw r0,4(r12)
1239 mfspr r0,SRR1
1240 stw r0,8(r12)
1241
1242 li r12,0xc00+_back-SystemCall
1243 mtlr r12
1244 mtspr SRR0,r11
1245
1246 1: SYNC
1247 rfi
1248 _back:
1249
1250 mfmsr r11 /* Disable interrupts */
1251 li r12,0
1252 ori r12,r12,MSR_EE
1253 andc r11,r11,r12
1254 SYNC /* Some chip revs need this... */
1255 mtmsr r11
1256 SYNC
1257
1258 li r12,0xd00-4 /* restore regs */
1259 lwz r12,0(r12)
1260
1261 lwz r11,0(r12)
1262 mtlr r11
1263 lwz r11,4(r12)
1264 mtspr SRR0,r11
1265 lwz r11,8(r12)
1266 mtspr SRR1,r11
1267
1268 addi r12,r12,12 /* Adjust stack pointer */
1269 li r20,0xd00-4
1270 stw r12,0(r20)
1271
1272 SYNC
1273 rfi
1274 _end_back:
1275
1276 STD_EXCEPTION(0x0a00, Decrementer, timer_interrupt)
1277 STD_EXCEPTION(0x0b00, IntervalTimer, UnknownException)
1278 STD_EXCEPTION(0x0c00, WatchdogTimer, UnknownException)
1279
1280 STD_EXCEPTION(0x0d00, DataTLBError, UnknownException)
1281 STD_EXCEPTION(0x0e00, InstructionTLBError, UnknownException)
1282
1283 CRIT_EXCEPTION(0x0f00, DebugBreakpoint, DebugException )
1284
1285 .globl _end_of_vectors
1286 _end_of_vectors:
1287
1288
1289 . = . + (0x100 - ( . & 0xff )) /* align for debug */
1290
1291 /*
1292 * This code finishes saving the registers to the exception frame
1293 * and jumps to the appropriate handler for the exception.
1294 * Register r21 is pointer into trap frame, r1 has new stack pointer.
1295 */
1296 .globl transfer_to_handler
1297 transfer_to_handler:
1298 stw r22,_NIP(r21)
1299 lis r22,MSR_POW@h
1300 andc r23,r23,r22
1301 stw r23,_MSR(r21)
1302 SAVE_GPR(7, r21)
1303 SAVE_4GPRS(8, r21)
1304 SAVE_8GPRS(12, r21)
1305 SAVE_8GPRS(24, r21)
1306
1307 mflr r23
1308 andi. r24,r23,0x3f00 /* get vector offset */
1309 stw r24,TRAP(r21)
1310 li r22,0
1311 stw r22,RESULT(r21)
1312 mtspr SPRG2,r22 /* r1 is now kernel sp */
1313
1314 lwz r24,0(r23) /* virtual address of handler */
1315 lwz r23,4(r23) /* where to go when done */
1316 mtspr SRR0,r24
1317 mtspr SRR1,r20
1318 mtlr r23
1319 SYNC
1320 rfi /* jump to handler, enable MMU */
1321
1322 int_return:
1323 mfmsr r28 /* Disable interrupts */
1324 li r4,0
1325 ori r4,r4,MSR_EE
1326 andc r28,r28,r4
1327 SYNC /* Some chip revs need this... */
1328 mtmsr r28
1329 SYNC
1330 lwz r2,_CTR(r1)
1331 lwz r0,_LINK(r1)
1332 mtctr r2
1333 mtlr r0
1334 lwz r2,_XER(r1)
1335 lwz r0,_CCR(r1)
1336 mtspr XER,r2
1337 mtcrf 0xFF,r0
1338 REST_10GPRS(3, r1)
1339 REST_10GPRS(13, r1)
1340 REST_8GPRS(23, r1)
1341 REST_GPR(31, r1)
1342 lwz r2,_NIP(r1) /* Restore environment */
1343 lwz r0,_MSR(r1)
1344 mtspr SRR0,r2
1345 mtspr SRR1,r0
1346 lwz r0,GPR0(r1)
1347 lwz r2,GPR2(r1)
1348 lwz r1,GPR1(r1)
1349 SYNC
1350 rfi
1351
1352 crit_return:
1353 mfmsr r28 /* Disable interrupts */
1354 li r4,0
1355 ori r4,r4,MSR_EE
1356 andc r28,r28,r4
1357 SYNC /* Some chip revs need this... */
1358 mtmsr r28
1359 SYNC
1360 lwz r2,_CTR(r1)
1361 lwz r0,_LINK(r1)
1362 mtctr r2
1363 mtlr r0
1364 lwz r2,_XER(r1)
1365 lwz r0,_CCR(r1)
1366 mtspr XER,r2
1367 mtcrf 0xFF,r0
1368 REST_10GPRS(3, r1)
1369 REST_10GPRS(13, r1)
1370 REST_8GPRS(23, r1)
1371 REST_GPR(31, r1)
1372 lwz r2,_NIP(r1) /* Restore environment */
1373 lwz r0,_MSR(r1)
1374 mtspr SPRN_CSRR0,r2
1375 mtspr SPRN_CSRR1,r0
1376 lwz r0,GPR0(r1)
1377 lwz r2,GPR2(r1)
1378 lwz r1,GPR1(r1)
1379 SYNC
1380 rfci
1381
1382 mck_return:
1383 mfmsr r28 /* Disable interrupts */
1384 li r4,0
1385 ori r4,r4,MSR_EE
1386 andc r28,r28,r4
1387 SYNC /* Some chip revs need this... */
1388 mtmsr r28
1389 SYNC
1390 lwz r2,_CTR(r1)
1391 lwz r0,_LINK(r1)
1392 mtctr r2
1393 mtlr r0
1394 lwz r2,_XER(r1)
1395 lwz r0,_CCR(r1)
1396 mtspr XER,r2
1397 mtcrf 0xFF,r0
1398 REST_10GPRS(3, r1)
1399 REST_10GPRS(13, r1)
1400 REST_8GPRS(23, r1)
1401 REST_GPR(31, r1)
1402 lwz r2,_NIP(r1) /* Restore environment */
1403 lwz r0,_MSR(r1)
1404 mtspr SPRN_MCSRR0,r2
1405 mtspr SPRN_MCSRR1,r0
1406 lwz r0,GPR0(r1)
1407 lwz r2,GPR2(r1)
1408 lwz r1,GPR1(r1)
1409 SYNC
1410 rfmci
1411
1412 /* Cache functions.
1413 */
1414 .globl flush_icache
1415 flush_icache:
1416 .globl invalidate_icache
1417 invalidate_icache:
1418 mfspr r0,L1CSR1
1419 ori r0,r0,L1CSR1_ICFI
1420 msync
1421 isync
1422 mtspr L1CSR1,r0
1423 isync
1424 blr /* entire I cache */
1425
1426 .globl invalidate_dcache
1427 invalidate_dcache:
1428 mfspr r0,L1CSR0
1429 ori r0,r0,L1CSR0_DCFI
1430 msync
1431 isync
1432 mtspr L1CSR0,r0
1433 isync
1434 blr
1435
1436 .globl icache_enable
1437 icache_enable:
1438 mflr r8
1439 bl invalidate_icache
1440 mtlr r8
1441 isync
1442 mfspr r4,L1CSR1
1443 ori r4,r4,0x0001
1444 oris r4,r4,0x0001
1445 mtspr L1CSR1,r4
1446 isync
1447 blr
1448
1449 .globl icache_disable
1450 icache_disable:
1451 mfspr r0,L1CSR1
1452 lis r3,0
1453 ori r3,r3,L1CSR1_ICE
1454 andc r0,r0,r3
1455 mtspr L1CSR1,r0
1456 isync
1457 blr
1458
1459 .globl icache_status
1460 icache_status:
1461 mfspr r3,L1CSR1
1462 andi. r3,r3,L1CSR1_ICE
1463 blr
1464
1465 .globl dcache_enable
1466 dcache_enable:
1467 mflr r8
1468 bl invalidate_dcache
1469 mtlr r8
1470 isync
1471 mfspr r0,L1CSR0
1472 ori r0,r0,0x0001
1473 oris r0,r0,0x0001
1474 msync
1475 isync
1476 mtspr L1CSR0,r0
1477 isync
1478 blr
1479
1480 .globl dcache_disable
1481 dcache_disable:
1482 mfspr r3,L1CSR0
1483 lis r4,0
1484 ori r4,r4,L1CSR0_DCE
1485 andc r3,r3,r4
1486 mtspr L1CSR0,r3
1487 isync
1488 blr
1489
1490 .globl dcache_status
1491 dcache_status:
1492 mfspr r3,L1CSR0
1493 andi. r3,r3,L1CSR0_DCE
1494 blr
1495
1496 .globl get_pir
1497 get_pir:
1498 mfspr r3,PIR
1499 blr
1500
1501 .globl get_pvr
1502 get_pvr:
1503 mfspr r3,PVR
1504 blr
1505
1506 .globl get_svr
1507 get_svr:
1508 mfspr r3,SVR
1509 blr
1510
1511 .globl wr_tcr
1512 wr_tcr:
1513 mtspr TCR,r3
1514 blr
1515
1516 /*------------------------------------------------------------------------------- */
1517 /* Function: in8 */
1518 /* Description: Input 8 bits */
1519 /*------------------------------------------------------------------------------- */
1520 .globl in8
1521 in8:
1522 lbz r3,0x0000(r3)
1523 blr
1524
1525 /*------------------------------------------------------------------------------- */
1526 /* Function: out8 */
1527 /* Description: Output 8 bits */
1528 /*------------------------------------------------------------------------------- */
1529 .globl out8
1530 out8:
1531 stb r4,0x0000(r3)
1532 sync
1533 blr
1534
1535 /*------------------------------------------------------------------------------- */
1536 /* Function: out16 */
1537 /* Description: Output 16 bits */
1538 /*------------------------------------------------------------------------------- */
1539 .globl out16
1540 out16:
1541 sth r4,0x0000(r3)
1542 sync
1543 blr
1544
1545 /*------------------------------------------------------------------------------- */
1546 /* Function: out16r */
1547 /* Description: Byte reverse and output 16 bits */
1548 /*------------------------------------------------------------------------------- */
1549 .globl out16r
1550 out16r:
1551 sthbrx r4,r0,r3
1552 sync
1553 blr
1554
1555 /*------------------------------------------------------------------------------- */
1556 /* Function: out32 */
1557 /* Description: Output 32 bits */
1558 /*------------------------------------------------------------------------------- */
1559 .globl out32
1560 out32:
1561 stw r4,0x0000(r3)
1562 sync
1563 blr
1564
1565 /*------------------------------------------------------------------------------- */
1566 /* Function: out32r */
1567 /* Description: Byte reverse and output 32 bits */
1568 /*------------------------------------------------------------------------------- */
1569 .globl out32r
1570 out32r:
1571 stwbrx r4,r0,r3
1572 sync
1573 blr
1574
1575 /*------------------------------------------------------------------------------- */
1576 /* Function: in16 */
1577 /* Description: Input 16 bits */
1578 /*------------------------------------------------------------------------------- */
1579 .globl in16
1580 in16:
1581 lhz r3,0x0000(r3)
1582 blr
1583
1584 /*------------------------------------------------------------------------------- */
1585 /* Function: in16r */
1586 /* Description: Input 16 bits and byte reverse */
1587 /*------------------------------------------------------------------------------- */
1588 .globl in16r
1589 in16r:
1590 lhbrx r3,r0,r3
1591 blr
1592
1593 /*------------------------------------------------------------------------------- */
1594 /* Function: in32 */
1595 /* Description: Input 32 bits */
1596 /*------------------------------------------------------------------------------- */
1597 .globl in32
1598 in32:
1599 lwz 3,0x0000(3)
1600 blr
1601
1602 /*------------------------------------------------------------------------------- */
1603 /* Function: in32r */
1604 /* Description: Input 32 bits and byte reverse */
1605 /*------------------------------------------------------------------------------- */
1606 .globl in32r
1607 in32r:
1608 lwbrx r3,r0,r3
1609 blr
1610 #endif /* !MINIMAL_SPL */
1611
1612 /*------------------------------------------------------------------------------*/
1613
1614 /*
1615 * void write_tlb(mas0, mas1, mas2, mas3, mas7)
1616 */
1617 .globl write_tlb
1618 write_tlb:
1619 mtspr MAS0,r3
1620 mtspr MAS1,r4
1621 mtspr MAS2,r5
1622 mtspr MAS3,r6
1623 #ifdef CONFIG_ENABLE_36BIT_PHYS
1624 mtspr MAS7,r7
1625 #endif
1626 li r3,0
1627 #ifdef CONFIG_SYS_BOOK3E_HV
1628 mtspr MAS8,r3
1629 #endif
1630 isync
1631 tlbwe
1632 msync
1633 isync
1634 blr
1635
1636 /*
1637 * void relocate_code (addr_sp, gd, addr_moni)
1638 *
1639 * This "function" does not return, instead it continues in RAM
1640 * after relocating the monitor code.
1641 *
1642 * r3 = dest
1643 * r4 = src
1644 * r5 = length in bytes
1645 * r6 = cachelinesize
1646 */
1647 .globl relocate_code
1648 relocate_code:
1649 mr r1,r3 /* Set new stack pointer */
1650 mr r9,r4 /* Save copy of Init Data pointer */
1651 mr r10,r5 /* Save copy of Destination Address */
1652
1653 GET_GOT
1654 mr r3,r5 /* Destination Address */
1655 lis r4,CONFIG_SYS_MONITOR_BASE@h /* Source Address */
1656 ori r4,r4,CONFIG_SYS_MONITOR_BASE@l
1657 lwz r5,GOT(__init_end)
1658 sub r5,r5,r4
1659 li r6,CONFIG_SYS_CACHELINE_SIZE /* Cache Line Size */
1660
1661 /*
1662 * Fix GOT pointer:
1663 *
1664 * New GOT-PTR = (old GOT-PTR - CONFIG_SYS_MONITOR_BASE) + Destination Address
1665 *
1666 * Offset:
1667 */
1668 sub r15,r10,r4
1669
1670 /* First our own GOT */
1671 add r12,r12,r15
1672 /* the the one used by the C code */
1673 add r30,r30,r15
1674
1675 /*
1676 * Now relocate code
1677 */
1678
1679 cmplw cr1,r3,r4
1680 addi r0,r5,3
1681 srwi. r0,r0,2
1682 beq cr1,4f /* In place copy is not necessary */
1683 beq 7f /* Protect against 0 count */
1684 mtctr r0
1685 bge cr1,2f
1686
1687 la r8,-4(r4)
1688 la r7,-4(r3)
1689 1: lwzu r0,4(r8)
1690 stwu r0,4(r7)
1691 bdnz 1b
1692 b 4f
1693
1694 2: slwi r0,r0,2
1695 add r8,r4,r0
1696 add r7,r3,r0
1697 3: lwzu r0,-4(r8)
1698 stwu r0,-4(r7)
1699 bdnz 3b
1700
1701 /*
1702 * Now flush the cache: note that we must start from a cache aligned
1703 * address. Otherwise we might miss one cache line.
1704 */
1705 4: cmpwi r6,0
1706 add r5,r3,r5
1707 beq 7f /* Always flush prefetch queue in any case */
1708 subi r0,r6,1
1709 andc r3,r3,r0
1710 mr r4,r3
1711 5: dcbst 0,r4
1712 add r4,r4,r6
1713 cmplw r4,r5
1714 blt 5b
1715 sync /* Wait for all dcbst to complete on bus */
1716 mr r4,r3
1717 6: icbi 0,r4
1718 add r4,r4,r6
1719 cmplw r4,r5
1720 blt 6b
1721 7: sync /* Wait for all icbi to complete on bus */
1722 isync
1723
1724 /*
1725 * We are done. Do not return, instead branch to second part of board
1726 * initialization, now running from RAM.
1727 */
1728
1729 addi r0,r10,in_ram - _start + _START_OFFSET
1730
1731 /*
1732 * As IVPR is going to point RAM address,
1733 * Make sure IVOR15 has valid opcode to support debugger
1734 */
1735 mtspr IVOR15,r0
1736
1737 /*
1738 * Re-point the IVPR at RAM
1739 */
1740 mtspr IVPR,r10
1741
1742 mtlr r0
1743 blr /* NEVER RETURNS! */
1744 .globl in_ram
1745 in_ram:
1746
1747 /*
1748 * Relocation Function, r12 point to got2+0x8000
1749 *
1750 * Adjust got2 pointers, no need to check for 0, this code
1751 * already puts a few entries in the table.
1752 */
1753 li r0,__got2_entries@sectoff@l
1754 la r3,GOT(_GOT2_TABLE_)
1755 lwz r11,GOT(_GOT2_TABLE_)
1756 mtctr r0
1757 sub r11,r3,r11
1758 addi r3,r3,-4
1759 1: lwzu r0,4(r3)
1760 cmpwi r0,0
1761 beq- 2f
1762 add r0,r0,r11
1763 stw r0,0(r3)
1764 2: bdnz 1b
1765
1766 /*
1767 * Now adjust the fixups and the pointers to the fixups
1768 * in case we need to move ourselves again.
1769 */
1770 li r0,__fixup_entries@sectoff@l
1771 lwz r3,GOT(_FIXUP_TABLE_)
1772 cmpwi r0,0
1773 mtctr r0
1774 addi r3,r3,-4
1775 beq 4f
1776 3: lwzu r4,4(r3)
1777 lwzux r0,r4,r11
1778 cmpwi r0,0
1779 add r0,r0,r11
1780 stw r4,0(r3)
1781 beq- 5f
1782 stw r0,0(r4)
1783 5: bdnz 3b
1784 4:
1785 clear_bss:
1786 /*
1787 * Now clear BSS segment
1788 */
1789 lwz r3,GOT(__bss_start)
1790 lwz r4,GOT(__bss_end)
1791
1792 cmplw 0,r3,r4
1793 beq 6f
1794
1795 li r0,0
1796 5:
1797 stw r0,0(r3)
1798 addi r3,r3,4
1799 cmplw 0,r3,r4
1800 blt 5b
1801 6:
1802
1803 mr r3,r9 /* Init Data pointer */
1804 mr r4,r10 /* Destination Address */
1805 bl board_init_r
1806
1807 #ifndef MINIMAL_SPL
1808 /*
1809 * Copy exception vector code to low memory
1810 *
1811 * r3: dest_addr
1812 * r7: source address, r8: end address, r9: target address
1813 */
1814 .globl trap_init
1815 trap_init:
1816 mflr r4 /* save link register */
1817 GET_GOT
1818 lwz r7,GOT(_start_of_vectors)
1819 lwz r8,GOT(_end_of_vectors)
1820
1821 li r9,0x100 /* reset vector always at 0x100 */
1822
1823 cmplw 0,r7,r8
1824 bgelr /* return if r7>=r8 - just in case */
1825 1:
1826 lwz r0,0(r7)
1827 stw r0,0(r9)
1828 addi r7,r7,4
1829 addi r9,r9,4
1830 cmplw 0,r7,r8
1831 bne 1b
1832
1833 /*
1834 * relocate `hdlr' and `int_return' entries
1835 */
1836 li r7,.L_CriticalInput - _start + _START_OFFSET
1837 bl trap_reloc
1838 li r7,.L_MachineCheck - _start + _START_OFFSET
1839 bl trap_reloc
1840 li r7,.L_DataStorage - _start + _START_OFFSET
1841 bl trap_reloc
1842 li r7,.L_InstStorage - _start + _START_OFFSET
1843 bl trap_reloc
1844 li r7,.L_ExtInterrupt - _start + _START_OFFSET
1845 bl trap_reloc
1846 li r7,.L_Alignment - _start + _START_OFFSET
1847 bl trap_reloc
1848 li r7,.L_ProgramCheck - _start + _START_OFFSET
1849 bl trap_reloc
1850 li r7,.L_FPUnavailable - _start + _START_OFFSET
1851 bl trap_reloc
1852 li r7,.L_Decrementer - _start + _START_OFFSET
1853 bl trap_reloc
1854 li r7,.L_IntervalTimer - _start + _START_OFFSET
1855 li r8,_end_of_vectors - _start + _START_OFFSET
1856 2:
1857 bl trap_reloc
1858 addi r7,r7,0x100 /* next exception vector */
1859 cmplw 0,r7,r8
1860 blt 2b
1861
1862 /* Update IVORs as per relocated vector table address */
1863 li r7,0x0100
1864 mtspr IVOR0,r7 /* 0: Critical input */
1865 li r7,0x0200
1866 mtspr IVOR1,r7 /* 1: Machine check */
1867 li r7,0x0300
1868 mtspr IVOR2,r7 /* 2: Data storage */
1869 li r7,0x0400
1870 mtspr IVOR3,r7 /* 3: Instruction storage */
1871 li r7,0x0500
1872 mtspr IVOR4,r7 /* 4: External interrupt */
1873 li r7,0x0600
1874 mtspr IVOR5,r7 /* 5: Alignment */
1875 li r7,0x0700
1876 mtspr IVOR6,r7 /* 6: Program check */
1877 li r7,0x0800
1878 mtspr IVOR7,r7 /* 7: floating point unavailable */
1879 li r7,0x0900
1880 mtspr IVOR8,r7 /* 8: System call */
1881 /* 9: Auxiliary processor unavailable(unsupported) */
1882 li r7,0x0a00
1883 mtspr IVOR10,r7 /* 10: Decrementer */
1884 li r7,0x0b00
1885 mtspr IVOR11,r7 /* 11: Interval timer */
1886 li r7,0x0c00
1887 mtspr IVOR12,r7 /* 12: Watchdog timer */
1888 li r7,0x0d00
1889 mtspr IVOR13,r7 /* 13: Data TLB error */
1890 li r7,0x0e00
1891 mtspr IVOR14,r7 /* 14: Instruction TLB error */
1892 li r7,0x0f00
1893 mtspr IVOR15,r7 /* 15: Debug */
1894
1895 lis r7,0x0
1896 mtspr IVPR,r7
1897
1898 mtlr r4 /* restore link register */
1899 blr
1900
1901 .globl unlock_ram_in_cache
1902 unlock_ram_in_cache:
1903 /* invalidate the INIT_RAM section */
1904 lis r3,(CONFIG_SYS_INIT_RAM_ADDR & ~(CONFIG_SYS_CACHELINE_SIZE-1))@h
1905 ori r3,r3,(CONFIG_SYS_INIT_RAM_ADDR & ~(CONFIG_SYS_CACHELINE_SIZE-1))@l
1906 mfspr r4,L1CFG0
1907 andi. r4,r4,0x1ff
1908 slwi r4,r4,(10 - 1 - L1_CACHE_SHIFT)
1909 mtctr r4
1910 1: dcbi r0,r3
1911 dcblc r0,r3
1912 addi r3,r3,CONFIG_SYS_CACHELINE_SIZE
1913 bdnz 1b
1914 sync
1915
1916 /* Invalidate the TLB entries for the cache */
1917 lis r3,CONFIG_SYS_INIT_RAM_ADDR@h
1918 ori r3,r3,CONFIG_SYS_INIT_RAM_ADDR@l
1919 tlbivax 0,r3
1920 addi r3,r3,0x1000
1921 tlbivax 0,r3
1922 addi r3,r3,0x1000
1923 tlbivax 0,r3
1924 addi r3,r3,0x1000
1925 tlbivax 0,r3
1926 isync
1927 blr
1928
1929 .globl flush_dcache
1930 flush_dcache:
1931 mfspr r3,SPRN_L1CFG0
1932
1933 rlwinm r5,r3,9,3 /* Extract cache block size */
1934 twlgti r5,1 /* Only 32 and 64 byte cache blocks
1935 * are currently defined.
1936 */
1937 li r4,32
1938 subfic r6,r5,2 /* r6 = log2(1KiB / cache block size) -
1939 * log2(number of ways)
1940 */
1941 slw r5,r4,r5 /* r5 = cache block size */
1942
1943 rlwinm r7,r3,0,0xff /* Extract number of KiB in the cache */
1944 mulli r7,r7,13 /* An 8-way cache will require 13
1945 * loads per set.
1946 */
1947 slw r7,r7,r6
1948
1949 /* save off HID0 and set DCFA */
1950 mfspr r8,SPRN_HID0
1951 ori r9,r8,HID0_DCFA@l
1952 mtspr SPRN_HID0,r9
1953 isync
1954
1955 lis r4,0
1956 mtctr r7
1957
1958 1: lwz r3,0(r4) /* Load... */
1959 add r4,r4,r5
1960 bdnz 1b
1961
1962 msync
1963 lis r4,0
1964 mtctr r7
1965
1966 1: dcbf 0,r4 /* ...and flush. */
1967 add r4,r4,r5
1968 bdnz 1b
1969
1970 /* restore HID0 */
1971 mtspr SPRN_HID0,r8
1972 isync
1973
1974 blr
1975 #endif /* !MINIMAL_SPL */