]> git.ipfire.org Git - people/ms/u-boot.git/blob - drivers/ddr/fsl/options.c
driver/ddr: Add 256 byte interleaving support
[people/ms/u-boot.git] / drivers / ddr / fsl / options.c
1 /*
2 * Copyright 2008, 2010-2012 Freescale Semiconductor, Inc.
3 *
4 * SPDX-License-Identifier: GPL-2.0+
5 */
6
7 #include <common.h>
8 #include <hwconfig.h>
9 #include <fsl_ddr_sdram.h>
10
11 #include <fsl_ddr.h>
12
13 /*
14 * Use our own stack based buffer before relocation to allow accessing longer
15 * hwconfig strings that might be in the environment before we've relocated.
16 * This is pretty fragile on both the use of stack and if the buffer is big
17 * enough. However we will get a warning from getenv_f for the later.
18 */
19
20 /* Board-specific functions defined in each board's ddr.c */
21 extern void fsl_ddr_board_options(memctl_options_t *popts,
22 dimm_params_t *pdimm,
23 unsigned int ctrl_num);
24
25 struct dynamic_odt {
26 unsigned int odt_rd_cfg;
27 unsigned int odt_wr_cfg;
28 unsigned int odt_rtt_norm;
29 unsigned int odt_rtt_wr;
30 };
31
32 #ifdef CONFIG_SYS_FSL_DDR3
33 static const struct dynamic_odt single_Q[4] = {
34 { /* cs0 */
35 FSL_DDR_ODT_NEVER,
36 FSL_DDR_ODT_CS_AND_OTHER_DIMM,
37 DDR3_RTT_20_OHM,
38 DDR3_RTT_120_OHM
39 },
40 { /* cs1 */
41 FSL_DDR_ODT_NEVER,
42 FSL_DDR_ODT_NEVER, /* tied high */
43 DDR3_RTT_OFF,
44 DDR3_RTT_120_OHM
45 },
46 { /* cs2 */
47 FSL_DDR_ODT_NEVER,
48 FSL_DDR_ODT_CS_AND_OTHER_DIMM,
49 DDR3_RTT_20_OHM,
50 DDR3_RTT_120_OHM
51 },
52 { /* cs3 */
53 FSL_DDR_ODT_NEVER,
54 FSL_DDR_ODT_NEVER, /* tied high */
55 DDR3_RTT_OFF,
56 DDR3_RTT_120_OHM
57 }
58 };
59
60 static const struct dynamic_odt single_D[4] = {
61 { /* cs0 */
62 FSL_DDR_ODT_NEVER,
63 FSL_DDR_ODT_ALL,
64 DDR3_RTT_40_OHM,
65 DDR3_RTT_OFF
66 },
67 { /* cs1 */
68 FSL_DDR_ODT_NEVER,
69 FSL_DDR_ODT_NEVER,
70 DDR3_RTT_OFF,
71 DDR3_RTT_OFF
72 },
73 {0, 0, 0, 0},
74 {0, 0, 0, 0}
75 };
76
77 static const struct dynamic_odt single_S[4] = {
78 { /* cs0 */
79 FSL_DDR_ODT_NEVER,
80 FSL_DDR_ODT_ALL,
81 DDR3_RTT_40_OHM,
82 DDR3_RTT_OFF
83 },
84 {0, 0, 0, 0},
85 {0, 0, 0, 0},
86 {0, 0, 0, 0},
87 };
88
89 static const struct dynamic_odt dual_DD[4] = {
90 { /* cs0 */
91 FSL_DDR_ODT_NEVER,
92 FSL_DDR_ODT_SAME_DIMM,
93 DDR3_RTT_120_OHM,
94 DDR3_RTT_OFF
95 },
96 { /* cs1 */
97 FSL_DDR_ODT_OTHER_DIMM,
98 FSL_DDR_ODT_OTHER_DIMM,
99 DDR3_RTT_30_OHM,
100 DDR3_RTT_OFF
101 },
102 { /* cs2 */
103 FSL_DDR_ODT_NEVER,
104 FSL_DDR_ODT_SAME_DIMM,
105 DDR3_RTT_120_OHM,
106 DDR3_RTT_OFF
107 },
108 { /* cs3 */
109 FSL_DDR_ODT_OTHER_DIMM,
110 FSL_DDR_ODT_OTHER_DIMM,
111 DDR3_RTT_30_OHM,
112 DDR3_RTT_OFF
113 }
114 };
115
116 static const struct dynamic_odt dual_DS[4] = {
117 { /* cs0 */
118 FSL_DDR_ODT_NEVER,
119 FSL_DDR_ODT_SAME_DIMM,
120 DDR3_RTT_120_OHM,
121 DDR3_RTT_OFF
122 },
123 { /* cs1 */
124 FSL_DDR_ODT_OTHER_DIMM,
125 FSL_DDR_ODT_OTHER_DIMM,
126 DDR3_RTT_30_OHM,
127 DDR3_RTT_OFF
128 },
129 { /* cs2 */
130 FSL_DDR_ODT_OTHER_DIMM,
131 FSL_DDR_ODT_ALL,
132 DDR3_RTT_20_OHM,
133 DDR3_RTT_120_OHM
134 },
135 {0, 0, 0, 0}
136 };
137 static const struct dynamic_odt dual_SD[4] = {
138 { /* cs0 */
139 FSL_DDR_ODT_OTHER_DIMM,
140 FSL_DDR_ODT_ALL,
141 DDR3_RTT_20_OHM,
142 DDR3_RTT_120_OHM
143 },
144 {0, 0, 0, 0},
145 { /* cs2 */
146 FSL_DDR_ODT_NEVER,
147 FSL_DDR_ODT_SAME_DIMM,
148 DDR3_RTT_120_OHM,
149 DDR3_RTT_OFF
150 },
151 { /* cs3 */
152 FSL_DDR_ODT_OTHER_DIMM,
153 FSL_DDR_ODT_OTHER_DIMM,
154 DDR3_RTT_20_OHM,
155 DDR3_RTT_OFF
156 }
157 };
158
159 static const struct dynamic_odt dual_SS[4] = {
160 { /* cs0 */
161 FSL_DDR_ODT_OTHER_DIMM,
162 FSL_DDR_ODT_ALL,
163 DDR3_RTT_30_OHM,
164 DDR3_RTT_120_OHM
165 },
166 {0, 0, 0, 0},
167 { /* cs2 */
168 FSL_DDR_ODT_OTHER_DIMM,
169 FSL_DDR_ODT_ALL,
170 DDR3_RTT_30_OHM,
171 DDR3_RTT_120_OHM
172 },
173 {0, 0, 0, 0}
174 };
175
176 static const struct dynamic_odt dual_D0[4] = {
177 { /* cs0 */
178 FSL_DDR_ODT_NEVER,
179 FSL_DDR_ODT_SAME_DIMM,
180 DDR3_RTT_40_OHM,
181 DDR3_RTT_OFF
182 },
183 { /* cs1 */
184 FSL_DDR_ODT_NEVER,
185 FSL_DDR_ODT_NEVER,
186 DDR3_RTT_OFF,
187 DDR3_RTT_OFF
188 },
189 {0, 0, 0, 0},
190 {0, 0, 0, 0}
191 };
192
193 static const struct dynamic_odt dual_0D[4] = {
194 {0, 0, 0, 0},
195 {0, 0, 0, 0},
196 { /* cs2 */
197 FSL_DDR_ODT_NEVER,
198 FSL_DDR_ODT_SAME_DIMM,
199 DDR3_RTT_40_OHM,
200 DDR3_RTT_OFF
201 },
202 { /* cs3 */
203 FSL_DDR_ODT_NEVER,
204 FSL_DDR_ODT_NEVER,
205 DDR3_RTT_OFF,
206 DDR3_RTT_OFF
207 }
208 };
209
210 static const struct dynamic_odt dual_S0[4] = {
211 { /* cs0 */
212 FSL_DDR_ODT_NEVER,
213 FSL_DDR_ODT_CS,
214 DDR3_RTT_40_OHM,
215 DDR3_RTT_OFF
216 },
217 {0, 0, 0, 0},
218 {0, 0, 0, 0},
219 {0, 0, 0, 0}
220
221 };
222
223 static const struct dynamic_odt dual_0S[4] = {
224 {0, 0, 0, 0},
225 {0, 0, 0, 0},
226 { /* cs2 */
227 FSL_DDR_ODT_NEVER,
228 FSL_DDR_ODT_CS,
229 DDR3_RTT_40_OHM,
230 DDR3_RTT_OFF
231 },
232 {0, 0, 0, 0}
233
234 };
235
236 static const struct dynamic_odt odt_unknown[4] = {
237 { /* cs0 */
238 FSL_DDR_ODT_NEVER,
239 FSL_DDR_ODT_CS,
240 DDR3_RTT_120_OHM,
241 DDR3_RTT_OFF
242 },
243 { /* cs1 */
244 FSL_DDR_ODT_NEVER,
245 FSL_DDR_ODT_CS,
246 DDR3_RTT_120_OHM,
247 DDR3_RTT_OFF
248 },
249 { /* cs2 */
250 FSL_DDR_ODT_NEVER,
251 FSL_DDR_ODT_CS,
252 DDR3_RTT_120_OHM,
253 DDR3_RTT_OFF
254 },
255 { /* cs3 */
256 FSL_DDR_ODT_NEVER,
257 FSL_DDR_ODT_CS,
258 DDR3_RTT_120_OHM,
259 DDR3_RTT_OFF
260 }
261 };
262 #else /* CONFIG_SYS_FSL_DDR3 */
263 static const struct dynamic_odt single_Q[4] = {
264 {0, 0, 0, 0},
265 {0, 0, 0, 0},
266 {0, 0, 0, 0},
267 {0, 0, 0, 0}
268 };
269
270 static const struct dynamic_odt single_D[4] = {
271 { /* cs0 */
272 FSL_DDR_ODT_NEVER,
273 FSL_DDR_ODT_ALL,
274 DDR2_RTT_150_OHM,
275 DDR2_RTT_OFF
276 },
277 { /* cs1 */
278 FSL_DDR_ODT_NEVER,
279 FSL_DDR_ODT_NEVER,
280 DDR2_RTT_OFF,
281 DDR2_RTT_OFF
282 },
283 {0, 0, 0, 0},
284 {0, 0, 0, 0}
285 };
286
287 static const struct dynamic_odt single_S[4] = {
288 { /* cs0 */
289 FSL_DDR_ODT_NEVER,
290 FSL_DDR_ODT_ALL,
291 DDR2_RTT_150_OHM,
292 DDR2_RTT_OFF
293 },
294 {0, 0, 0, 0},
295 {0, 0, 0, 0},
296 {0, 0, 0, 0},
297 };
298
299 static const struct dynamic_odt dual_DD[4] = {
300 { /* cs0 */
301 FSL_DDR_ODT_OTHER_DIMM,
302 FSL_DDR_ODT_OTHER_DIMM,
303 DDR2_RTT_75_OHM,
304 DDR2_RTT_OFF
305 },
306 { /* cs1 */
307 FSL_DDR_ODT_NEVER,
308 FSL_DDR_ODT_NEVER,
309 DDR2_RTT_OFF,
310 DDR2_RTT_OFF
311 },
312 { /* cs2 */
313 FSL_DDR_ODT_OTHER_DIMM,
314 FSL_DDR_ODT_OTHER_DIMM,
315 DDR2_RTT_75_OHM,
316 DDR2_RTT_OFF
317 },
318 { /* cs3 */
319 FSL_DDR_ODT_NEVER,
320 FSL_DDR_ODT_NEVER,
321 DDR2_RTT_OFF,
322 DDR2_RTT_OFF
323 }
324 };
325
326 static const struct dynamic_odt dual_DS[4] = {
327 { /* cs0 */
328 FSL_DDR_ODT_OTHER_DIMM,
329 FSL_DDR_ODT_OTHER_DIMM,
330 DDR2_RTT_75_OHM,
331 DDR2_RTT_OFF
332 },
333 { /* cs1 */
334 FSL_DDR_ODT_NEVER,
335 FSL_DDR_ODT_NEVER,
336 DDR2_RTT_OFF,
337 DDR2_RTT_OFF
338 },
339 { /* cs2 */
340 FSL_DDR_ODT_OTHER_DIMM,
341 FSL_DDR_ODT_OTHER_DIMM,
342 DDR2_RTT_75_OHM,
343 DDR2_RTT_OFF
344 },
345 {0, 0, 0, 0}
346 };
347
348 static const struct dynamic_odt dual_SD[4] = {
349 { /* cs0 */
350 FSL_DDR_ODT_OTHER_DIMM,
351 FSL_DDR_ODT_OTHER_DIMM,
352 DDR2_RTT_75_OHM,
353 DDR2_RTT_OFF
354 },
355 {0, 0, 0, 0},
356 { /* cs2 */
357 FSL_DDR_ODT_OTHER_DIMM,
358 FSL_DDR_ODT_OTHER_DIMM,
359 DDR2_RTT_75_OHM,
360 DDR2_RTT_OFF
361 },
362 { /* cs3 */
363 FSL_DDR_ODT_NEVER,
364 FSL_DDR_ODT_NEVER,
365 DDR2_RTT_OFF,
366 DDR2_RTT_OFF
367 }
368 };
369
370 static const struct dynamic_odt dual_SS[4] = {
371 { /* cs0 */
372 FSL_DDR_ODT_OTHER_DIMM,
373 FSL_DDR_ODT_OTHER_DIMM,
374 DDR2_RTT_75_OHM,
375 DDR2_RTT_OFF
376 },
377 {0, 0, 0, 0},
378 { /* cs2 */
379 FSL_DDR_ODT_OTHER_DIMM,
380 FSL_DDR_ODT_OTHER_DIMM,
381 DDR2_RTT_75_OHM,
382 DDR2_RTT_OFF
383 },
384 {0, 0, 0, 0}
385 };
386
387 static const struct dynamic_odt dual_D0[4] = {
388 { /* cs0 */
389 FSL_DDR_ODT_NEVER,
390 FSL_DDR_ODT_ALL,
391 DDR2_RTT_150_OHM,
392 DDR2_RTT_OFF
393 },
394 { /* cs1 */
395 FSL_DDR_ODT_NEVER,
396 FSL_DDR_ODT_NEVER,
397 DDR2_RTT_OFF,
398 DDR2_RTT_OFF
399 },
400 {0, 0, 0, 0},
401 {0, 0, 0, 0}
402 };
403
404 static const struct dynamic_odt dual_0D[4] = {
405 {0, 0, 0, 0},
406 {0, 0, 0, 0},
407 { /* cs2 */
408 FSL_DDR_ODT_NEVER,
409 FSL_DDR_ODT_ALL,
410 DDR2_RTT_150_OHM,
411 DDR2_RTT_OFF
412 },
413 { /* cs3 */
414 FSL_DDR_ODT_NEVER,
415 FSL_DDR_ODT_NEVER,
416 DDR2_RTT_OFF,
417 DDR2_RTT_OFF
418 }
419 };
420
421 static const struct dynamic_odt dual_S0[4] = {
422 { /* cs0 */
423 FSL_DDR_ODT_NEVER,
424 FSL_DDR_ODT_CS,
425 DDR2_RTT_150_OHM,
426 DDR2_RTT_OFF
427 },
428 {0, 0, 0, 0},
429 {0, 0, 0, 0},
430 {0, 0, 0, 0}
431
432 };
433
434 static const struct dynamic_odt dual_0S[4] = {
435 {0, 0, 0, 0},
436 {0, 0, 0, 0},
437 { /* cs2 */
438 FSL_DDR_ODT_NEVER,
439 FSL_DDR_ODT_CS,
440 DDR2_RTT_150_OHM,
441 DDR2_RTT_OFF
442 },
443 {0, 0, 0, 0}
444
445 };
446
447 static const struct dynamic_odt odt_unknown[4] = {
448 { /* cs0 */
449 FSL_DDR_ODT_NEVER,
450 FSL_DDR_ODT_CS,
451 DDR2_RTT_75_OHM,
452 DDR2_RTT_OFF
453 },
454 { /* cs1 */
455 FSL_DDR_ODT_NEVER,
456 FSL_DDR_ODT_NEVER,
457 DDR2_RTT_OFF,
458 DDR2_RTT_OFF
459 },
460 { /* cs2 */
461 FSL_DDR_ODT_NEVER,
462 FSL_DDR_ODT_CS,
463 DDR2_RTT_75_OHM,
464 DDR2_RTT_OFF
465 },
466 { /* cs3 */
467 FSL_DDR_ODT_NEVER,
468 FSL_DDR_ODT_NEVER,
469 DDR2_RTT_OFF,
470 DDR2_RTT_OFF
471 }
472 };
473 #endif
474
475 /*
476 * Automatically seleect bank interleaving mode based on DIMMs
477 * in this order: cs0_cs1_cs2_cs3, cs0_cs1, null.
478 * This function only deal with one or two slots per controller.
479 */
480 static inline unsigned int auto_bank_intlv(dimm_params_t *pdimm)
481 {
482 #if (CONFIG_DIMM_SLOTS_PER_CTLR == 1)
483 if (pdimm[0].n_ranks == 4)
484 return FSL_DDR_CS0_CS1_CS2_CS3;
485 else if (pdimm[0].n_ranks == 2)
486 return FSL_DDR_CS0_CS1;
487 #elif (CONFIG_DIMM_SLOTS_PER_CTLR == 2)
488 #ifdef CONFIG_FSL_DDR_FIRST_SLOT_QUAD_CAPABLE
489 if (pdimm[0].n_ranks == 4)
490 return FSL_DDR_CS0_CS1_CS2_CS3;
491 #endif
492 if (pdimm[0].n_ranks == 2) {
493 if (pdimm[1].n_ranks == 2)
494 return FSL_DDR_CS0_CS1_CS2_CS3;
495 else
496 return FSL_DDR_CS0_CS1;
497 }
498 #endif
499 return 0;
500 }
501
502 unsigned int populate_memctl_options(int all_dimms_registered,
503 memctl_options_t *popts,
504 dimm_params_t *pdimm,
505 unsigned int ctrl_num)
506 {
507 unsigned int i;
508 char buffer[HWCONFIG_BUFFER_SIZE];
509 char *buf = NULL;
510 #if defined(CONFIG_SYS_FSL_DDR3) || defined(CONFIG_SYS_FSL_DDR2)
511 const struct dynamic_odt *pdodt = odt_unknown;
512 #endif
513 ulong ddr_freq;
514
515 /*
516 * Extract hwconfig from environment since we have not properly setup
517 * the environment but need it for ddr config params
518 */
519 if (getenv_f("hwconfig", buffer, sizeof(buffer)) > 0)
520 buf = buffer;
521
522 #if defined(CONFIG_SYS_FSL_DDR3) || defined(CONFIG_SYS_FSL_DDR2)
523 /* Chip select options. */
524 if (CONFIG_DIMM_SLOTS_PER_CTLR == 1) {
525 switch (pdimm[0].n_ranks) {
526 case 1:
527 pdodt = single_S;
528 break;
529 case 2:
530 pdodt = single_D;
531 break;
532 case 4:
533 pdodt = single_Q;
534 break;
535 }
536 } else if (CONFIG_DIMM_SLOTS_PER_CTLR == 2) {
537 switch (pdimm[0].n_ranks) {
538 #ifdef CONFIG_FSL_DDR_FIRST_SLOT_QUAD_CAPABLE
539 case 4:
540 pdodt = single_Q;
541 if (pdimm[1].n_ranks)
542 printf("Error: Quad- and Dual-rank DIMMs "
543 "cannot be used together\n");
544 break;
545 #endif
546 case 2:
547 switch (pdimm[1].n_ranks) {
548 case 2:
549 pdodt = dual_DD;
550 break;
551 case 1:
552 pdodt = dual_DS;
553 break;
554 case 0:
555 pdodt = dual_D0;
556 break;
557 }
558 break;
559 case 1:
560 switch (pdimm[1].n_ranks) {
561 case 2:
562 pdodt = dual_SD;
563 break;
564 case 1:
565 pdodt = dual_SS;
566 break;
567 case 0:
568 pdodt = dual_S0;
569 break;
570 }
571 break;
572 case 0:
573 switch (pdimm[1].n_ranks) {
574 case 2:
575 pdodt = dual_0D;
576 break;
577 case 1:
578 pdodt = dual_0S;
579 break;
580 }
581 break;
582 }
583 }
584 #endif
585
586 /* Pick chip-select local options. */
587 for (i = 0; i < CONFIG_CHIP_SELECTS_PER_CTRL; i++) {
588 #if defined(CONFIG_SYS_FSL_DDR3) || defined(CONFIG_SYS_FSL_DDR2)
589 popts->cs_local_opts[i].odt_rd_cfg = pdodt[i].odt_rd_cfg;
590 popts->cs_local_opts[i].odt_wr_cfg = pdodt[i].odt_wr_cfg;
591 popts->cs_local_opts[i].odt_rtt_norm = pdodt[i].odt_rtt_norm;
592 popts->cs_local_opts[i].odt_rtt_wr = pdodt[i].odt_rtt_wr;
593 #else
594 popts->cs_local_opts[i].odt_rd_cfg = FSL_DDR_ODT_NEVER;
595 popts->cs_local_opts[i].odt_wr_cfg = FSL_DDR_ODT_CS;
596 #endif
597 popts->cs_local_opts[i].auto_precharge = 0;
598 }
599
600 /* Pick interleaving mode. */
601
602 /*
603 * 0 = no interleaving
604 * 1 = interleaving between 2 controllers
605 */
606 popts->memctl_interleaving = 0;
607
608 /*
609 * 0 = cacheline
610 * 1 = page
611 * 2 = (logical) bank
612 * 3 = superbank (only if CS interleaving is enabled)
613 */
614 popts->memctl_interleaving_mode = 0;
615
616 /*
617 * 0: cacheline: bit 30 of the 36-bit physical addr selects the memctl
618 * 1: page: bit to the left of the column bits selects the memctl
619 * 2: bank: bit to the left of the bank bits selects the memctl
620 * 3: superbank: bit to the left of the chip select selects the memctl
621 *
622 * NOTE: ba_intlv (rank interleaving) is independent of memory
623 * controller interleaving; it is only within a memory controller.
624 * Must use superbank interleaving if rank interleaving is used and
625 * memory controller interleaving is enabled.
626 */
627
628 /*
629 * 0 = no
630 * 0x40 = CS0,CS1
631 * 0x20 = CS2,CS3
632 * 0x60 = CS0,CS1 + CS2,CS3
633 * 0x04 = CS0,CS1,CS2,CS3
634 */
635 popts->ba_intlv_ctl = 0;
636
637 /* Memory Organization Parameters */
638 popts->registered_dimm_en = all_dimms_registered;
639
640 /* Operational Mode Paramters */
641
642 /* Pick ECC modes */
643 popts->ecc_mode = 0; /* 0 = disabled, 1 = enabled */
644 #ifdef CONFIG_DDR_ECC
645 if (hwconfig_sub_f("fsl_ddr", "ecc", buf)) {
646 if (hwconfig_subarg_cmp_f("fsl_ddr", "ecc", "on", buf))
647 popts->ecc_mode = 1;
648 } else
649 popts->ecc_mode = 1;
650 #endif
651 popts->ecc_init_using_memctl = 1; /* 0 = use DMA, 1 = use memctl */
652
653 /*
654 * Choose DQS config
655 * 0 for DDR1
656 * 1 for DDR2
657 */
658 #if defined(CONFIG_SYS_FSL_DDR1)
659 popts->dqs_config = 0;
660 #elif defined(CONFIG_SYS_FSL_DDR2) || defined(CONFIG_SYS_FSL_DDR3)
661 popts->dqs_config = 1;
662 #endif
663
664 /* Choose self-refresh during sleep. */
665 popts->self_refresh_in_sleep = 1;
666
667 /* Choose dynamic power management mode. */
668 popts->dynamic_power = 0;
669
670 /*
671 * check first dimm for primary sdram width
672 * presuming all dimms are similar
673 * 0 = 64-bit, 1 = 32-bit, 2 = 16-bit
674 */
675 #if defined(CONFIG_SYS_FSL_DDR1) || defined(CONFIG_SYS_FSL_DDR2)
676 if (pdimm[0].n_ranks != 0) {
677 if ((pdimm[0].data_width >= 64) && \
678 (pdimm[0].data_width <= 72))
679 popts->data_bus_width = 0;
680 else if ((pdimm[0].data_width >= 32) || \
681 (pdimm[0].data_width <= 40))
682 popts->data_bus_width = 1;
683 else {
684 panic("Error: data width %u is invalid!\n",
685 pdimm[0].data_width);
686 }
687 }
688 #else
689 if (pdimm[0].n_ranks != 0) {
690 if (pdimm[0].primary_sdram_width == 64)
691 popts->data_bus_width = 0;
692 else if (pdimm[0].primary_sdram_width == 32)
693 popts->data_bus_width = 1;
694 else if (pdimm[0].primary_sdram_width == 16)
695 popts->data_bus_width = 2;
696 else {
697 panic("Error: primary sdram width %u is invalid!\n",
698 pdimm[0].primary_sdram_width);
699 }
700 }
701 #endif
702
703 popts->x4_en = (pdimm[0].device_width == 4) ? 1 : 0;
704
705 /* Choose burst length. */
706 #if defined(CONFIG_SYS_FSL_DDR3)
707 #if defined(CONFIG_E500MC)
708 popts->otf_burst_chop_en = 0; /* on-the-fly burst chop disable */
709 popts->burst_length = DDR_BL8; /* Fixed 8-beat burst len */
710 #else
711 if ((popts->data_bus_width == 1) || (popts->data_bus_width == 2)) {
712 /* 32-bit or 16-bit bus */
713 popts->otf_burst_chop_en = 0;
714 popts->burst_length = DDR_BL8;
715 } else {
716 popts->otf_burst_chop_en = 1; /* on-the-fly burst chop */
717 popts->burst_length = DDR_OTF; /* on-the-fly BC4 and BL8 */
718 }
719 #endif
720 #else
721 popts->burst_length = DDR_BL4; /* has to be 4 for DDR2 */
722 #endif
723
724 /* Choose ddr controller address mirror mode */
725 #if defined(CONFIG_SYS_FSL_DDR3)
726 popts->mirrored_dimm = pdimm[0].mirrored_dimm;
727 #endif
728
729 /* Global Timing Parameters. */
730 debug("mclk_ps = %u ps\n", get_memory_clk_period_ps());
731
732 /* Pick a caslat override. */
733 popts->cas_latency_override = 0;
734 popts->cas_latency_override_value = 3;
735 if (popts->cas_latency_override) {
736 debug("using caslat override value = %u\n",
737 popts->cas_latency_override_value);
738 }
739
740 /* Decide whether to use the computed derated latency */
741 popts->use_derated_caslat = 0;
742
743 /* Choose an additive latency. */
744 popts->additive_latency_override = 0;
745 popts->additive_latency_override_value = 3;
746 if (popts->additive_latency_override) {
747 debug("using additive latency override value = %u\n",
748 popts->additive_latency_override_value);
749 }
750
751 /*
752 * 2T_EN setting
753 *
754 * Factors to consider for 2T_EN:
755 * - number of DIMMs installed
756 * - number of components, number of active ranks
757 * - how much time you want to spend playing around
758 */
759 popts->twot_en = 0;
760 popts->threet_en = 0;
761
762 /* for RDIMM, address parity enable */
763 popts->ap_en = 1;
764
765 /*
766 * BSTTOPRE precharge interval
767 *
768 * Set this to 0 for global auto precharge
769 *
770 * FIXME: Should this be configured in picoseconds?
771 * Why it should be in ps: better understanding of this
772 * relative to actual DRAM timing parameters such as tRAS.
773 * e.g. tRAS(min) = 40 ns
774 */
775 popts->bstopre = 0x100;
776
777 /* Minimum CKE pulse width -- tCKE(MIN) */
778 popts->tcke_clock_pulse_width_ps
779 = mclk_to_picos(FSL_DDR_MIN_TCKE_PULSE_WIDTH_DDR);
780
781 /*
782 * Window for four activates -- tFAW
783 *
784 * FIXME: UM: applies only to DDR2/DDR3 with eight logical banks only
785 * FIXME: varies depending upon number of column addresses or data
786 * FIXME: width, was considering looking at pdimm->primary_sdram_width
787 */
788 #if defined(CONFIG_SYS_FSL_DDR1)
789 popts->tfaw_window_four_activates_ps = mclk_to_picos(1);
790
791 #elif defined(CONFIG_SYS_FSL_DDR2)
792 /*
793 * x4/x8; some datasheets have 35000
794 * x16 wide columns only? Use 50000?
795 */
796 popts->tfaw_window_four_activates_ps = 37500;
797
798 #elif defined(CONFIG_SYS_FSL_DDR3)
799 popts->tfaw_window_four_activates_ps = pdimm[0].tfaw_ps;
800 #endif
801 popts->zq_en = 0;
802 popts->wrlvl_en = 0;
803 #if defined(CONFIG_SYS_FSL_DDR3)
804 /*
805 * due to ddr3 dimm is fly-by topology
806 * we suggest to enable write leveling to
807 * meet the tQDSS under different loading.
808 */
809 popts->wrlvl_en = 1;
810 popts->zq_en = 1;
811 popts->wrlvl_override = 0;
812 #endif
813
814 /*
815 * Check interleaving configuration from environment.
816 * Please refer to doc/README.fsl-ddr for the detail.
817 *
818 * If memory controller interleaving is enabled, then the data
819 * bus widths must be programmed identically for all memory controllers.
820 *
821 * Attempt to set all controllers to the same chip select
822 * interleaving mode. It will do a best effort to get the
823 * requested ranks interleaved together such that the result
824 * should be a subset of the requested configuration.
825 *
826 * if CONFIG_SYS_FSL_DDR_INTLV_256B is defined, mandatory interleaving
827 * with 256 Byte is enabled.
828 */
829 #if (CONFIG_NUM_DDR_CONTROLLERS > 1)
830 if (!hwconfig_sub_f("fsl_ddr", "ctlr_intlv", buf))
831 #ifdef CONFIG_SYS_FSL_DDR_INTLV_256B
832 ;
833 #else
834 goto done;
835 #endif
836 if (pdimm[0].n_ranks == 0) {
837 printf("There is no rank on CS0 for controller %d.\n", ctrl_num);
838 popts->memctl_interleaving = 0;
839 goto done;
840 }
841 popts->memctl_interleaving = 1;
842 #ifdef CONFIG_SYS_FSL_DDR_INTLV_256B
843 popts->memctl_interleaving_mode = FSL_DDR_256B_INTERLEAVING;
844 popts->memctl_interleaving = 1;
845 debug("256 Byte interleaving\n");
846 goto done;
847 #endif
848 /*
849 * test null first. if CONFIG_HWCONFIG is not defined
850 * hwconfig_arg_cmp returns non-zero
851 */
852 if (hwconfig_subarg_cmp_f("fsl_ddr", "ctlr_intlv",
853 "null", buf)) {
854 popts->memctl_interleaving = 0;
855 debug("memory controller interleaving disabled.\n");
856 } else if (hwconfig_subarg_cmp_f("fsl_ddr",
857 "ctlr_intlv",
858 "cacheline", buf)) {
859 popts->memctl_interleaving_mode =
860 ((CONFIG_NUM_DDR_CONTROLLERS == 3) && ctrl_num == 2) ?
861 0 : FSL_DDR_CACHE_LINE_INTERLEAVING;
862 popts->memctl_interleaving =
863 ((CONFIG_NUM_DDR_CONTROLLERS == 3) && ctrl_num == 2) ?
864 0 : 1;
865 } else if (hwconfig_subarg_cmp_f("fsl_ddr",
866 "ctlr_intlv",
867 "page", buf)) {
868 popts->memctl_interleaving_mode =
869 ((CONFIG_NUM_DDR_CONTROLLERS == 3) && ctrl_num == 2) ?
870 0 : FSL_DDR_PAGE_INTERLEAVING;
871 popts->memctl_interleaving =
872 ((CONFIG_NUM_DDR_CONTROLLERS == 3) && ctrl_num == 2) ?
873 0 : 1;
874 } else if (hwconfig_subarg_cmp_f("fsl_ddr",
875 "ctlr_intlv",
876 "bank", buf)) {
877 popts->memctl_interleaving_mode =
878 ((CONFIG_NUM_DDR_CONTROLLERS == 3) && ctrl_num == 2) ?
879 0 : FSL_DDR_BANK_INTERLEAVING;
880 popts->memctl_interleaving =
881 ((CONFIG_NUM_DDR_CONTROLLERS == 3) && ctrl_num == 2) ?
882 0 : 1;
883 } else if (hwconfig_subarg_cmp_f("fsl_ddr",
884 "ctlr_intlv",
885 "superbank", buf)) {
886 popts->memctl_interleaving_mode =
887 ((CONFIG_NUM_DDR_CONTROLLERS == 3) && ctrl_num == 2) ?
888 0 : FSL_DDR_SUPERBANK_INTERLEAVING;
889 popts->memctl_interleaving =
890 ((CONFIG_NUM_DDR_CONTROLLERS == 3) && ctrl_num == 2) ?
891 0 : 1;
892 #if (CONFIG_NUM_DDR_CONTROLLERS == 3)
893 } else if (hwconfig_subarg_cmp_f("fsl_ddr",
894 "ctlr_intlv",
895 "3way_1KB", buf)) {
896 popts->memctl_interleaving_mode =
897 FSL_DDR_3WAY_1KB_INTERLEAVING;
898 } else if (hwconfig_subarg_cmp_f("fsl_ddr",
899 "ctlr_intlv",
900 "3way_4KB", buf)) {
901 popts->memctl_interleaving_mode =
902 FSL_DDR_3WAY_4KB_INTERLEAVING;
903 } else if (hwconfig_subarg_cmp_f("fsl_ddr",
904 "ctlr_intlv",
905 "3way_8KB", buf)) {
906 popts->memctl_interleaving_mode =
907 FSL_DDR_3WAY_8KB_INTERLEAVING;
908 #elif (CONFIG_NUM_DDR_CONTROLLERS == 4)
909 } else if (hwconfig_subarg_cmp_f("fsl_ddr",
910 "ctlr_intlv",
911 "4way_1KB", buf)) {
912 popts->memctl_interleaving_mode =
913 FSL_DDR_4WAY_1KB_INTERLEAVING;
914 } else if (hwconfig_subarg_cmp_f("fsl_ddr",
915 "ctlr_intlv",
916 "4way_4KB", buf)) {
917 popts->memctl_interleaving_mode =
918 FSL_DDR_4WAY_4KB_INTERLEAVING;
919 } else if (hwconfig_subarg_cmp_f("fsl_ddr",
920 "ctlr_intlv",
921 "4way_8KB", buf)) {
922 popts->memctl_interleaving_mode =
923 FSL_DDR_4WAY_8KB_INTERLEAVING;
924 #endif
925 } else {
926 popts->memctl_interleaving = 0;
927 printf("hwconfig has unrecognized parameter for ctlr_intlv.\n");
928 }
929 done:
930 #endif
931 if ((hwconfig_sub_f("fsl_ddr", "bank_intlv", buf)) &&
932 (CONFIG_CHIP_SELECTS_PER_CTRL > 1)) {
933 /* test null first. if CONFIG_HWCONFIG is not defined,
934 * hwconfig_subarg_cmp_f returns non-zero */
935 if (hwconfig_subarg_cmp_f("fsl_ddr", "bank_intlv",
936 "null", buf))
937 debug("bank interleaving disabled.\n");
938 else if (hwconfig_subarg_cmp_f("fsl_ddr", "bank_intlv",
939 "cs0_cs1", buf))
940 popts->ba_intlv_ctl = FSL_DDR_CS0_CS1;
941 else if (hwconfig_subarg_cmp_f("fsl_ddr", "bank_intlv",
942 "cs2_cs3", buf))
943 popts->ba_intlv_ctl = FSL_DDR_CS2_CS3;
944 else if (hwconfig_subarg_cmp_f("fsl_ddr", "bank_intlv",
945 "cs0_cs1_and_cs2_cs3", buf))
946 popts->ba_intlv_ctl = FSL_DDR_CS0_CS1_AND_CS2_CS3;
947 else if (hwconfig_subarg_cmp_f("fsl_ddr", "bank_intlv",
948 "cs0_cs1_cs2_cs3", buf))
949 popts->ba_intlv_ctl = FSL_DDR_CS0_CS1_CS2_CS3;
950 else if (hwconfig_subarg_cmp_f("fsl_ddr", "bank_intlv",
951 "auto", buf))
952 popts->ba_intlv_ctl = auto_bank_intlv(pdimm);
953 else
954 printf("hwconfig has unrecognized parameter for bank_intlv.\n");
955 switch (popts->ba_intlv_ctl & FSL_DDR_CS0_CS1_CS2_CS3) {
956 case FSL_DDR_CS0_CS1_CS2_CS3:
957 #if (CONFIG_DIMM_SLOTS_PER_CTLR == 1)
958 if (pdimm[0].n_ranks < 4) {
959 popts->ba_intlv_ctl = 0;
960 printf("Not enough bank(chip-select) for "
961 "CS0+CS1+CS2+CS3 on controller %d, "
962 "interleaving disabled!\n", ctrl_num);
963 }
964 #elif (CONFIG_DIMM_SLOTS_PER_CTLR == 2)
965 #ifdef CONFIG_FSL_DDR_FIRST_SLOT_QUAD_CAPABLE
966 if (pdimm[0].n_ranks == 4)
967 break;
968 #endif
969 if ((pdimm[0].n_ranks < 2) && (pdimm[1].n_ranks < 2)) {
970 popts->ba_intlv_ctl = 0;
971 printf("Not enough bank(chip-select) for "
972 "CS0+CS1+CS2+CS3 on controller %d, "
973 "interleaving disabled!\n", ctrl_num);
974 }
975 if (pdimm[0].capacity != pdimm[1].capacity) {
976 popts->ba_intlv_ctl = 0;
977 printf("Not identical DIMM size for "
978 "CS0+CS1+CS2+CS3 on controller %d, "
979 "interleaving disabled!\n", ctrl_num);
980 }
981 #endif
982 break;
983 case FSL_DDR_CS0_CS1:
984 if (pdimm[0].n_ranks < 2) {
985 popts->ba_intlv_ctl = 0;
986 printf("Not enough bank(chip-select) for "
987 "CS0+CS1 on controller %d, "
988 "interleaving disabled!\n", ctrl_num);
989 }
990 break;
991 case FSL_DDR_CS2_CS3:
992 #if (CONFIG_DIMM_SLOTS_PER_CTLR == 1)
993 if (pdimm[0].n_ranks < 4) {
994 popts->ba_intlv_ctl = 0;
995 printf("Not enough bank(chip-select) for CS2+CS3 "
996 "on controller %d, interleaving disabled!\n", ctrl_num);
997 }
998 #elif (CONFIG_DIMM_SLOTS_PER_CTLR == 2)
999 if (pdimm[1].n_ranks < 2) {
1000 popts->ba_intlv_ctl = 0;
1001 printf("Not enough bank(chip-select) for CS2+CS3 "
1002 "on controller %d, interleaving disabled!\n", ctrl_num);
1003 }
1004 #endif
1005 break;
1006 case FSL_DDR_CS0_CS1_AND_CS2_CS3:
1007 #if (CONFIG_DIMM_SLOTS_PER_CTLR == 1)
1008 if (pdimm[0].n_ranks < 4) {
1009 popts->ba_intlv_ctl = 0;
1010 printf("Not enough bank(CS) for CS0+CS1 and "
1011 "CS2+CS3 on controller %d, "
1012 "interleaving disabled!\n", ctrl_num);
1013 }
1014 #elif (CONFIG_DIMM_SLOTS_PER_CTLR == 2)
1015 if ((pdimm[0].n_ranks < 2) || (pdimm[1].n_ranks < 2)) {
1016 popts->ba_intlv_ctl = 0;
1017 printf("Not enough bank(CS) for CS0+CS1 and "
1018 "CS2+CS3 on controller %d, "
1019 "interleaving disabled!\n", ctrl_num);
1020 }
1021 #endif
1022 break;
1023 default:
1024 popts->ba_intlv_ctl = 0;
1025 break;
1026 }
1027 }
1028
1029 if (hwconfig_sub_f("fsl_ddr", "addr_hash", buf)) {
1030 if (hwconfig_subarg_cmp_f("fsl_ddr", "addr_hash", "null", buf))
1031 popts->addr_hash = 0;
1032 else if (hwconfig_subarg_cmp_f("fsl_ddr", "addr_hash",
1033 "true", buf))
1034 popts->addr_hash = 1;
1035 }
1036
1037 if (pdimm[0].n_ranks == 4)
1038 popts->quad_rank_present = 1;
1039
1040 ddr_freq = get_ddr_freq(0) / 1000000;
1041 if (popts->registered_dimm_en) {
1042 popts->rcw_override = 1;
1043 popts->rcw_1 = 0x000a5a00;
1044 if (ddr_freq <= 800)
1045 popts->rcw_2 = 0x00000000;
1046 else if (ddr_freq <= 1066)
1047 popts->rcw_2 = 0x00100000;
1048 else if (ddr_freq <= 1333)
1049 popts->rcw_2 = 0x00200000;
1050 else
1051 popts->rcw_2 = 0x00300000;
1052 }
1053
1054 fsl_ddr_board_options(popts, pdimm, ctrl_num);
1055
1056 return 0;
1057 }
1058
1059 void check_interleaving_options(fsl_ddr_info_t *pinfo)
1060 {
1061 int i, j, k, check_n_ranks, intlv_invalid = 0;
1062 unsigned int check_intlv, check_n_row_addr, check_n_col_addr;
1063 unsigned long long check_rank_density;
1064 struct dimm_params_s *dimm;
1065 /*
1066 * Check if all controllers are configured for memory
1067 * controller interleaving. Identical dimms are recommended. At least
1068 * the size, row and col address should be checked.
1069 */
1070 j = 0;
1071 check_n_ranks = pinfo->dimm_params[0][0].n_ranks;
1072 check_rank_density = pinfo->dimm_params[0][0].rank_density;
1073 check_n_row_addr = pinfo->dimm_params[0][0].n_row_addr;
1074 check_n_col_addr = pinfo->dimm_params[0][0].n_col_addr;
1075 check_intlv = pinfo->memctl_opts[0].memctl_interleaving_mode;
1076 for (i = 0; i < CONFIG_NUM_DDR_CONTROLLERS; i++) {
1077 dimm = &pinfo->dimm_params[i][0];
1078 if (!pinfo->memctl_opts[i].memctl_interleaving) {
1079 continue;
1080 } else if (((check_rank_density != dimm->rank_density) ||
1081 (check_n_ranks != dimm->n_ranks) ||
1082 (check_n_row_addr != dimm->n_row_addr) ||
1083 (check_n_col_addr != dimm->n_col_addr) ||
1084 (check_intlv !=
1085 pinfo->memctl_opts[i].memctl_interleaving_mode))){
1086 intlv_invalid = 1;
1087 break;
1088 } else {
1089 j++;
1090 }
1091
1092 }
1093 if (intlv_invalid) {
1094 for (i = 0; i < CONFIG_NUM_DDR_CONTROLLERS; i++)
1095 pinfo->memctl_opts[i].memctl_interleaving = 0;
1096 printf("Not all DIMMs are identical. "
1097 "Memory controller interleaving disabled.\n");
1098 } else {
1099 switch (check_intlv) {
1100 case FSL_DDR_256B_INTERLEAVING:
1101 case FSL_DDR_CACHE_LINE_INTERLEAVING:
1102 case FSL_DDR_PAGE_INTERLEAVING:
1103 case FSL_DDR_BANK_INTERLEAVING:
1104 case FSL_DDR_SUPERBANK_INTERLEAVING:
1105 if (3 == CONFIG_NUM_DDR_CONTROLLERS)
1106 k = 2;
1107 else
1108 k = CONFIG_NUM_DDR_CONTROLLERS;
1109 break;
1110 case FSL_DDR_3WAY_1KB_INTERLEAVING:
1111 case FSL_DDR_3WAY_4KB_INTERLEAVING:
1112 case FSL_DDR_3WAY_8KB_INTERLEAVING:
1113 case FSL_DDR_4WAY_1KB_INTERLEAVING:
1114 case FSL_DDR_4WAY_4KB_INTERLEAVING:
1115 case FSL_DDR_4WAY_8KB_INTERLEAVING:
1116 default:
1117 k = CONFIG_NUM_DDR_CONTROLLERS;
1118 break;
1119 }
1120 debug("%d of %d controllers are interleaving.\n", j, k);
1121 if (j && (j != k)) {
1122 for (i = 0; i < CONFIG_NUM_DDR_CONTROLLERS; i++)
1123 pinfo->memctl_opts[i].memctl_interleaving = 0;
1124 printf("Not all controllers have compatible "
1125 "interleaving mode. All disabled.\n");
1126 }
1127 }
1128 debug("Checking interleaving options completed\n");
1129 }
1130
1131 int fsl_use_spd(void)
1132 {
1133 int use_spd = 0;
1134
1135 #ifdef CONFIG_DDR_SPD
1136 char buffer[HWCONFIG_BUFFER_SIZE];
1137 char *buf = NULL;
1138
1139 /*
1140 * Extract hwconfig from environment since we have not properly setup
1141 * the environment but need it for ddr config params
1142 */
1143 if (getenv_f("hwconfig", buffer, sizeof(buffer)) > 0)
1144 buf = buffer;
1145
1146 /* if hwconfig is not enabled, or "sdram" is not defined, use spd */
1147 if (hwconfig_sub_f("fsl_ddr", "sdram", buf)) {
1148 if (hwconfig_subarg_cmp_f("fsl_ddr", "sdram", "spd", buf))
1149 use_spd = 1;
1150 else if (hwconfig_subarg_cmp_f("fsl_ddr", "sdram",
1151 "fixed", buf))
1152 use_spd = 0;
1153 else
1154 use_spd = 1;
1155 } else
1156 use_spd = 1;
1157 #endif
1158
1159 return use_spd;
1160 }