]> git.ipfire.org Git - people/ms/u-boot.git/blame - arch/powerpc/cpu/mpc85xx/cpu_init.c
powerpc/85xx: Add PBL boot from SPI flash support on P4080DS
[people/ms/u-boot.git] / arch / powerpc / cpu / mpc85xx / cpu_init.c
CommitLineData
42d1f039 1/*
a09b9b68 2 * Copyright 2007-2011 Freescale Semiconductor, Inc.
29372ff3 3 *
42d1f039
WD
4 * (C) Copyright 2003 Motorola Inc.
5 * Modified by Xianghua Xiao, X.Xiao@motorola.com
6 *
7 * (C) Copyright 2000
8 * Wolfgang Denk, DENX Software Engineering, wd@denx.de.
9 *
10 * See file CREDITS for list of people who contributed to this
11 * project.
12 *
13 * This program is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU General Public License as
15 * published by the Free Software Foundation; either version 2 of
16 * the License, or (at your option) any later version.
17 *
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
22 *
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
26 * MA 02111-1307 USA
27 */
28
29#include <common.h>
30#include <watchdog.h>
31#include <asm/processor.h>
32#include <ioports.h>
f54fe87a 33#include <sata.h>
42d1f039 34#include <asm/io.h>
fd3c9bef 35#include <asm/cache.h>
87163180 36#include <asm/mmu.h>
83d40dfd 37#include <asm/fsl_law.h>
f54fe87a 38#include <asm/fsl_serdes.h>
ec2b74ff 39#include "mp.h"
a7b1e1b7
HW
40#ifdef CONFIG_SYS_QE_FW_IN_NAND
41#include <nand.h>
42#include <errno.h>
43#endif
42d1f039 44
d87080b7
WD
45DECLARE_GLOBAL_DATA_PTR;
46
a09b9b68
KG
47extern void srio_init(void);
48
da9d4610
AF
49#ifdef CONFIG_QE
50extern qe_iop_conf_t qe_iop_conf_tab[];
51extern void qe_config_iopin(u8 port, u8 pin, int dir,
52 int open_drain, int assign);
53extern void qe_init(uint qe_base);
54extern void qe_reset(void);
55
56static void config_qe_ioports(void)
57{
58 u8 port, pin;
59 int dir, open_drain, assign;
60 int i;
61
62 for (i = 0; qe_iop_conf_tab[i].assign != QE_IOP_TAB_END; i++) {
63 port = qe_iop_conf_tab[i].port;
64 pin = qe_iop_conf_tab[i].pin;
65 dir = qe_iop_conf_tab[i].dir;
66 open_drain = qe_iop_conf_tab[i].open_drain;
67 assign = qe_iop_conf_tab[i].assign;
68 qe_config_iopin(port, pin, dir, open_drain, assign);
69 }
70}
71#endif
40d5fa35 72
9c4c5ae3 73#ifdef CONFIG_CPM2
aafeefbd 74void config_8560_ioports (volatile ccsr_cpm_t * cpm)
42d1f039
WD
75{
76 int portnum;
77
78 for (portnum = 0; portnum < 4; portnum++) {
79 uint pmsk = 0,
80 ppar = 0,
81 psor = 0,
82 pdir = 0,
83 podr = 0,
84 pdat = 0;
85 iop_conf_t *iopc = (iop_conf_t *) & iop_conf_tab[portnum][0];
86 iop_conf_t *eiopc = iopc + 32;
87 uint msk = 1;
88
89 /*
90 * NOTE:
91 * index 0 refers to pin 31,
92 * index 31 refers to pin 0
93 */
94 while (iopc < eiopc) {
95 if (iopc->conf) {
96 pmsk |= msk;
97 if (iopc->ppar)
98 ppar |= msk;
99 if (iopc->psor)
100 psor |= msk;
101 if (iopc->pdir)
102 pdir |= msk;
103 if (iopc->podr)
104 podr |= msk;
105 if (iopc->pdat)
106 pdat |= msk;
107 }
108
109 msk <<= 1;
110 iopc++;
111 }
112
113 if (pmsk != 0) {
aafeefbd 114 volatile ioport_t *iop = ioport_addr (cpm, portnum);
42d1f039
WD
115 uint tpmsk = ~pmsk;
116
117 /*
118 * the (somewhat confused) paragraph at the
119 * bottom of page 35-5 warns that there might
120 * be "unknown behaviour" when programming
121 * PSORx and PDIRx, if PPARx = 1, so I
122 * decided this meant I had to disable the
123 * dedicated function first, and enable it
124 * last.
125 */
126 iop->ppar &= tpmsk;
127 iop->psor = (iop->psor & tpmsk) | psor;
128 iop->podr = (iop->podr & tpmsk) | podr;
129 iop->pdat = (iop->pdat & tpmsk) | pdat;
130 iop->pdir = (iop->pdir & tpmsk) | pdir;
131 iop->ppar |= ppar;
132 }
133 }
134}
135#endif
136
6aba33e9
KG
137#ifdef CONFIG_SYS_FSL_CPC
138static void enable_cpc(void)
139{
140 int i;
141 u32 size = 0;
142
143 cpc_corenet_t *cpc = (cpc_corenet_t *)CONFIG_SYS_FSL_CPC_ADDR;
144
145 for (i = 0; i < CONFIG_SYS_NUM_CPC; i++, cpc++) {
146 u32 cpccfg0 = in_be32(&cpc->cpccfg0);
147 size += CPC_CFG0_SZ_K(cpccfg0);
2a9fab82
SX
148#ifdef CONFIG_RAMBOOT_PBL
149 if (in_be32(&cpc->cpcsrcr0) & CPC_SRCR0_SRAMEN) {
150 /* find and disable LAW of SRAM */
151 struct law_entry law = find_law(CONFIG_SYS_INIT_L3_ADDR);
152
153 if (law.index == -1) {
154 printf("\nFatal error happened\n");
155 return;
156 }
157 disable_law(law.index);
158
159 clrbits_be32(&cpc->cpchdbcr0, CPC_HDBCR0_CDQ_SPEC_DIS);
160 out_be32(&cpc->cpccsr0, 0);
161 out_be32(&cpc->cpcsrcr0, 0);
162 }
163#endif
6aba33e9 164
1d2c2a62
KG
165#ifdef CONFIG_SYS_FSL_ERRATUM_CPC_A002
166 setbits_be32(&cpc->cpchdbcr0, CPC_HDBCR0_TAG_ECC_SCRUB_DIS);
167#endif
868da593
KG
168#ifdef CONFIG_SYS_FSL_ERRATUM_CPC_A003
169 setbits_be32(&cpc->cpchdbcr0, CPC_HDBCR0_DATA_ECC_SCRUB_DIS);
170#endif
1d2c2a62 171
6aba33e9
KG
172 out_be32(&cpc->cpccsr0, CPC_CSR0_CE | CPC_CSR0_PE);
173 /* Read back to sync write */
174 in_be32(&cpc->cpccsr0);
175
176 }
177
178 printf("Corenet Platform Cache: %d KB enabled\n", size);
179}
180
181void invalidate_cpc(void)
182{
183 int i;
184 cpc_corenet_t *cpc = (cpc_corenet_t *)CONFIG_SYS_FSL_CPC_ADDR;
185
186 for (i = 0; i < CONFIG_SYS_NUM_CPC; i++, cpc++) {
2a9fab82
SX
187 /* skip CPC when it used as all SRAM */
188 if (in_be32(&cpc->cpcsrcr0) & CPC_SRCR0_SRAMEN)
189 continue;
6aba33e9
KG
190 /* Flash invalidate the CPC and clear all the locks */
191 out_be32(&cpc->cpccsr0, CPC_CSR0_FI | CPC_CSR0_LFC);
192 while (in_be32(&cpc->cpccsr0) & (CPC_CSR0_FI | CPC_CSR0_LFC))
193 ;
194 }
195}
196#else
197#define enable_cpc()
198#define invalidate_cpc()
199#endif /* CONFIG_SYS_FSL_CPC */
200
42d1f039
WD
201/*
202 * Breathe some life into the CPU...
203 *
204 * Set up the memory map
205 * initialize a bunch of registers
206 */
207
3c2a67ee
KG
208#ifdef CONFIG_FSL_CORENET
209static void corenet_tb_init(void)
210{
211 volatile ccsr_rcpm_t *rcpm =
212 (void *)(CONFIG_SYS_FSL_CORENET_RCPM_ADDR);
213 volatile ccsr_pic_t *pic =
680c613a 214 (void *)(CONFIG_SYS_MPC8xxx_PIC_ADDR);
3c2a67ee
KG
215 u32 whoami = in_be32(&pic->whoami);
216
217 /* Enable the timebase register for this core */
218 out_be32(&rcpm->ctbenrl, (1 << whoami));
219}
220#endif
221
42d1f039
WD
222void cpu_init_f (void)
223{
42d1f039 224 extern void m8560_cpm_reset (void);
a2cd50ed
PT
225#ifdef CONFIG_MPC8548
226 ccsr_local_ecm_t *ecm = (void *)(CONFIG_SYS_MPC85xx_ECM_ADDR);
227 uint svr = get_svr();
228
229 /*
230 * CPU2 errata workaround: A core hang possible while executing
231 * a msync instruction and a snoopable transaction from an I/O
232 * master tagged to make quick forward progress is present.
233 * Fixed in silicon rev 2.1.
234 */
235 if ((SVR_MAJ(svr) == 1) || ((SVR_MAJ(svr) == 2 && SVR_MIN(svr) == 0x0)))
236 out_be32(&ecm->eebpcr, in_be32(&ecm->eebpcr) | (1 << 16));
237#endif
42d1f039 238
87163180
KG
239 disable_tlb(14);
240 disable_tlb(15);
241
9c4c5ae3 242#ifdef CONFIG_CPM2
6d0f6bcf 243 config_8560_ioports((ccsr_cpm_t *)CONFIG_SYS_MPC85xx_CPM_ADDR);
42d1f039
WD
244#endif
245
f51cdaf1 246 init_early_memctl_regs();
42d1f039 247
9c4c5ae3 248#if defined(CONFIG_CPM2)
42d1f039
WD
249 m8560_cpm_reset();
250#endif
da9d4610
AF
251#ifdef CONFIG_QE
252 /* Config QE ioports */
253 config_qe_ioports();
254#endif
79f4333c
PT
255#if defined(CONFIG_FSL_DMA)
256 dma_init();
257#endif
3c2a67ee
KG
258#ifdef CONFIG_FSL_CORENET
259 corenet_tb_init();
260#endif
94e9411b 261 init_used_tlb_cams();
6aba33e9
KG
262
263 /* Invalidate the CPC before DDR gets enabled */
264 invalidate_cpc();
42d1f039
WD
265}
266
35079aa9
KG
267/* Implement a dummy function for those platforms w/o SERDES */
268static void __fsl_serdes__init(void)
269{
270 return ;
271}
272__attribute__((weak, alias("__fsl_serdes__init"))) void fsl_serdes_init(void);
d9b94f28 273
42d1f039 274/*
d9b94f28
JL
275 * Initialize L2 as cache.
276 *
277 * The newer 8548, etc, parts have twice as much cache, but
278 * use the same bit-encoding as the older 8555, etc, parts.
279 *
42d1f039 280 */
d9b94f28 281int cpu_init_r(void)
42d1f039 282{
3f0202ed 283#ifdef CONFIG_SYS_LBC_LCRR
f51cdaf1 284 volatile fsl_lbc_t *lbc = LBC_BASE_ADDR;
3f0202ed
LC
285#endif
286
fd3c9bef
KG
287#if defined(CONFIG_SYS_P4080_ERRATUM_CPU22)
288 flush_dcache();
289 mtspr(L1CSR2, (mfspr(L1CSR2) | L1CSR2_DCWS));
290 sync();
291#endif
292
6beecfbb
WG
293 puts ("L2: ");
294
42d1f039 295#if defined(CONFIG_L2_CACHE)
6d0f6bcf 296 volatile ccsr_l2cache_t *l2cache = (void *)CONFIG_SYS_MPC85xx_L2_ADDR;
d9b94f28
JL
297 volatile uint cache_ctl;
298 uint svr, ver;
29372ff3 299 uint l2srbar;
73f15a06 300 u32 l2siz_field;
d9b94f28
JL
301
302 svr = get_svr();
f3e04bdc 303 ver = SVR_SOC_VER(svr);
42d1f039 304
d65cfe89 305 asm("msync;isync");
d9b94f28 306 cache_ctl = l2cache->l2ctl;
7da53351
MH
307
308#if defined(CONFIG_SYS_RAMBOOT) && defined(CONFIG_SYS_INIT_L2_ADDR)
309 if (cache_ctl & MPC85xx_L2CTL_L2E) {
310 /* Clear L2 SRAM memory-mapped base address */
311 out_be32(&l2cache->l2srbar0, 0x0);
312 out_be32(&l2cache->l2srbar1, 0x0);
313
314 /* set MBECCDIS=0, SBECCDIS=0 */
315 clrbits_be32(&l2cache->l2errdis,
316 (MPC85xx_L2ERRDIS_MBECC |
317 MPC85xx_L2ERRDIS_SBECC));
318
319 /* set L2E=0, L2SRAM=0 */
320 clrbits_be32(&l2cache->l2ctl,
321 (MPC85xx_L2CTL_L2E |
322 MPC85xx_L2CTL_L2SRAM_ENTIRE));
323 }
324#endif
325
73f15a06 326 l2siz_field = (cache_ctl >> 28) & 0x3;
d9b94f28 327
73f15a06
KG
328 switch (l2siz_field) {
329 case 0x0:
330 printf(" unknown size (0x%08x)\n", cache_ctl);
331 return -1;
332 break;
333 case 0x1:
334 if (ver == SVR_8540 || ver == SVR_8560 ||
335 ver == SVR_8541 || ver == SVR_8541_E ||
336 ver == SVR_8555 || ver == SVR_8555_E) {
337 puts("128 KB ");
338 /* set L2E=1, L2I=1, & L2BLKSZ=1 (128 Kbyte) */
339 cache_ctl = 0xc4000000;
d9b94f28 340 } else {
73f15a06
KG
341 puts("256 KB ");
342 cache_ctl = 0xc0000000; /* set L2E=1, L2I=1, & L2SRAM=0 */
343 }
344 break;
345 case 0x2:
346 if (ver == SVR_8540 || ver == SVR_8560 ||
347 ver == SVR_8541 || ver == SVR_8541_E ||
348 ver == SVR_8555 || ver == SVR_8555_E) {
6beecfbb 349 puts("256 KB ");
29372ff3
ES
350 /* set L2E=1, L2I=1, & L2BLKSZ=2 (256 Kbyte) */
351 cache_ctl = 0xc8000000;
73f15a06
KG
352 } else {
353 puts ("512 KB ");
354 /* set L2E=1, L2I=1, & L2SRAM=0 */
355 cache_ctl = 0xc0000000;
d9b94f28 356 }
d65cfe89 357 break;
73f15a06
KG
358 case 0x3:
359 puts("1024 KB ");
360 /* set L2E=1, L2I=1, & L2SRAM=0 */
361 cache_ctl = 0xc0000000;
29372ff3 362 break;
d65cfe89
JL
363 }
364
76b474e2 365 if (l2cache->l2ctl & MPC85xx_L2CTL_L2E) {
6beecfbb 366 puts("already enabled");
29372ff3 367 l2srbar = l2cache->l2srbar0;
888279b5 368#if defined(CONFIG_SYS_INIT_L2_ADDR) && defined(CONFIG_SYS_FLASH_BASE)
76b474e2
MH
369 if (l2cache->l2ctl & MPC85xx_L2CTL_L2SRAM_ENTIRE
370 && l2srbar >= CONFIG_SYS_FLASH_BASE) {
6d0f6bcf 371 l2srbar = CONFIG_SYS_INIT_L2_ADDR;
29372ff3 372 l2cache->l2srbar0 = l2srbar;
6d0f6bcf 373 printf("moving to 0x%08x", CONFIG_SYS_INIT_L2_ADDR);
29372ff3 374 }
6d0f6bcf 375#endif /* CONFIG_SYS_INIT_L2_ADDR */
29372ff3
ES
376 puts("\n");
377 } else {
378 asm("msync;isync");
379 l2cache->l2ctl = cache_ctl; /* invalidate & enable */
380 asm("msync;isync");
6beecfbb 381 puts("enabled\n");
29372ff3 382 }
1b3e4044
KG
383#elif defined(CONFIG_BACKSIDE_L2_CACHE)
384 u32 l2cfg0 = mfspr(SPRN_L2CFG0);
385
386 /* invalidate the L2 cache */
25bacf7a
KG
387 mtspr(SPRN_L2CSR0, (L2CSR0_L2FI|L2CSR0_L2LFC));
388 while (mfspr(SPRN_L2CSR0) & (L2CSR0_L2FI|L2CSR0_L2LFC))
1b3e4044
KG
389 ;
390
82fd1f8d
KG
391#ifdef CONFIG_SYS_CACHE_STASHING
392 /* set stash id to (coreID) * 2 + 32 + L2 (1) */
393 mtspr(SPRN_L2CSR1, (32 + 1));
394#endif
395
1b3e4044
KG
396 /* enable the cache */
397 mtspr(SPRN_L2CSR0, CONFIG_SYS_INIT_L2CSR0);
398
654ea1f3
DL
399 if (CONFIG_SYS_INIT_L2CSR0 & L2CSR0_L2E) {
400 while (!(mfspr(SPRN_L2CSR0) & L2CSR0_L2E))
401 ;
1b3e4044 402 printf("%d KB enabled\n", (l2cfg0 & 0x3fff) * 64);
654ea1f3 403 }
42d1f039 404#else
6beecfbb 405 puts("disabled\n");
42d1f039 406#endif
6aba33e9
KG
407
408 enable_cpc();
409
af025065
KG
410 /* needs to be in ram since code uses global static vars */
411 fsl_serdes_init();
af025065 412
a09b9b68
KG
413#ifdef CONFIG_SYS_SRIO
414 srio_init();
415#endif
416
ec2b74ff
KG
417#if defined(CONFIG_MP)
418 setup_mp();
419#endif
3f0202ed 420
ae026ffd
RZ
421#ifdef CONFIG_SYS_FSL_ERRATUM_ESDHC136
422 {
423 void *p;
424 p = (void *)CONFIG_SYS_DCSRBAR + 0x20520;
425 setbits_be32(p, 1 << (31 - 14));
426 }
427#endif
428
3f0202ed
LC
429#ifdef CONFIG_SYS_LBC_LCRR
430 /*
431 * Modify the CLKDIV field of LCRR register to improve the writing
432 * speed for NOR flash.
433 */
434 clrsetbits_be32(&lbc->lcrr, LCRR_CLKDIV, CONFIG_SYS_LBC_LCRR);
435 __raw_readl(&lbc->lcrr);
436 isync();
437#endif
438
42d1f039
WD
439 return 0;
440}
26f4cdba
KG
441
442extern void setup_ivors(void);
443
444void arch_preboot_os(void)
445{
15fba327
KG
446 u32 msr;
447
448 /*
449 * We are changing interrupt offsets and are about to boot the OS so
450 * we need to make sure we disable all async interrupts. EE is already
451 * disabled by the time we get called.
452 */
453 msr = mfmsr();
454 msr &= ~(MSR_ME|MSR_CE|MSR_DE);
455 mtmsr(msr);
456
26f4cdba
KG
457 setup_ivors();
458}
f54fe87a
KG
459
460#if defined(CONFIG_CMD_SATA) && defined(CONFIG_FSL_SATA)
461int sata_initialize(void)
462{
463 if (is_serdes_configured(SATA1) || is_serdes_configured(SATA2))
464 return __sata_initialize();
465
466 return 1;
467}
468#endif
f9a33f1c
KG
469
470void cpu_secondary_init_r(void)
471{
472#ifdef CONFIG_QE
473 uint qe_base = CONFIG_SYS_IMMR + 0x00080000; /* QE immr base */
a7b1e1b7
HW
474#ifdef CONFIG_SYS_QE_FW_IN_NAND
475 int ret;
476 size_t fw_length = CONFIG_SYS_QE_FW_LENGTH;
477
478 /* load QE firmware from NAND flash to DDR first */
479 ret = nand_read(&nand_info[0], (loff_t)CONFIG_SYS_QE_FW_IN_NAND,
480 &fw_length, (u_char *)CONFIG_SYS_QE_FW_ADDR);
481
482 if (ret && ret == -EUCLEAN) {
483 printf ("NAND read for QE firmware at offset %x failed %d\n",
484 CONFIG_SYS_QE_FW_IN_NAND, ret);
485 }
486#endif
f9a33f1c
KG
487 qe_init(qe_base);
488 qe_reset();
489#endif
490}