]> git.ipfire.org Git - people/ms/u-boot.git/blob - drivers/ddr/fsl/lc_common_dimm_params.c
Merge branch 'master' of git://www.denx.de/git/u-boot-arc
[people/ms/u-boot.git] / drivers / ddr / fsl / lc_common_dimm_params.c
1 /*
2 * Copyright 2008-2014 Freescale Semiconductor, Inc.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * Version 2 as published by the Free Software Foundation.
7 */
8
9 #include <common.h>
10 #include <fsl_ddr_sdram.h>
11
12 #include <fsl_ddr.h>
13
14 #if defined(CONFIG_SYS_FSL_DDR3) || defined(CONFIG_SYS_FSL_DDR4)
15 static unsigned int
16 compute_cas_latency(const dimm_params_t *dimm_params,
17 common_timing_params_t *outpdimm,
18 unsigned int number_of_dimms)
19 {
20 unsigned int i;
21 unsigned int common_caslat;
22 unsigned int caslat_actual;
23 unsigned int retry = 16;
24 unsigned int tmp;
25 const unsigned int mclk_ps = get_memory_clk_period_ps();
26 #ifdef CONFIG_SYS_FSL_DDR3
27 const unsigned int taamax = 20000;
28 #else
29 const unsigned int taamax = 18000;
30 #endif
31
32 /* compute the common CAS latency supported between slots */
33 tmp = dimm_params[0].caslat_x;
34 for (i = 1; i < number_of_dimms; i++) {
35 if (dimm_params[i].n_ranks)
36 tmp &= dimm_params[i].caslat_x;
37 }
38 common_caslat = tmp;
39
40 /* validate if the memory clk is in the range of dimms */
41 if (mclk_ps < outpdimm->tckmin_x_ps) {
42 printf("DDR clock (MCLK cycle %u ps) is faster than "
43 "the slowest DIMM(s) (tCKmin %u ps) can support.\n",
44 mclk_ps, outpdimm->tckmin_x_ps);
45 }
46 #ifdef CONFIG_SYS_FSL_DDR4
47 if (mclk_ps > outpdimm->tckmax_ps) {
48 printf("DDR clock (MCLK cycle %u ps) is slower than DIMM(s) (tCKmax %u ps) can support.\n",
49 mclk_ps, outpdimm->tckmax_ps);
50 }
51 #endif
52 /* determine the acutal cas latency */
53 caslat_actual = (outpdimm->taamin_ps + mclk_ps - 1) / mclk_ps;
54 /* check if the dimms support the CAS latency */
55 while (!(common_caslat & (1 << caslat_actual)) && retry > 0) {
56 caslat_actual++;
57 retry--;
58 }
59 /* once the caculation of caslat_actual is completed
60 * we must verify that this CAS latency value does not
61 * exceed tAAmax, which is 20 ns for all DDR3 speed grades,
62 * 18ns for all DDR4 speed grades.
63 */
64 if (caslat_actual * mclk_ps > taamax) {
65 printf("The choosen cas latency %d is too large\n",
66 caslat_actual);
67 }
68 outpdimm->lowest_common_spd_caslat = caslat_actual;
69 debug("lowest_common_spd_caslat is 0x%x\n", caslat_actual);
70
71 return 0;
72 }
73 #else /* for DDR1 and DDR2 */
74 static unsigned int
75 compute_cas_latency(const dimm_params_t *dimm_params,
76 common_timing_params_t *outpdimm,
77 unsigned int number_of_dimms)
78 {
79 int i;
80 const unsigned int mclk_ps = get_memory_clk_period_ps();
81 unsigned int lowest_good_caslat;
82 unsigned int not_ok;
83 unsigned int temp1, temp2;
84
85 debug("using mclk_ps = %u\n", mclk_ps);
86 if (mclk_ps > outpdimm->tckmax_ps) {
87 printf("Warning: DDR clock (%u ps) is slower than DIMM(s) (tCKmax %u ps)\n",
88 mclk_ps, outpdimm->tckmax_ps);
89 }
90
91 /*
92 * Compute a CAS latency suitable for all DIMMs
93 *
94 * Strategy for SPD-defined latencies: compute only
95 * CAS latency defined by all DIMMs.
96 */
97
98 /*
99 * Step 1: find CAS latency common to all DIMMs using bitwise
100 * operation.
101 */
102 temp1 = 0xFF;
103 for (i = 0; i < number_of_dimms; i++) {
104 if (dimm_params[i].n_ranks) {
105 temp2 = 0;
106 temp2 |= 1 << dimm_params[i].caslat_x;
107 temp2 |= 1 << dimm_params[i].caslat_x_minus_1;
108 temp2 |= 1 << dimm_params[i].caslat_x_minus_2;
109 /*
110 * If there was no entry for X-2 (X-1) in
111 * the SPD, then caslat_x_minus_2
112 * (caslat_x_minus_1) contains either 255 or
113 * 0xFFFFFFFF because that's what the glorious
114 * __ilog2 function returns for an input of 0.
115 * On 32-bit PowerPC, left shift counts with bit
116 * 26 set (that the value of 255 or 0xFFFFFFFF
117 * will have), cause the destination register to
118 * be 0. That is why this works.
119 */
120 temp1 &= temp2;
121 }
122 }
123
124 /*
125 * Step 2: check each common CAS latency against tCK of each
126 * DIMM's SPD.
127 */
128 lowest_good_caslat = 0;
129 temp2 = 0;
130 while (temp1) {
131 not_ok = 0;
132 temp2 = __ilog2(temp1);
133 debug("checking common caslat = %u\n", temp2);
134
135 /* Check if this CAS latency will work on all DIMMs at tCK. */
136 for (i = 0; i < number_of_dimms; i++) {
137 if (!dimm_params[i].n_ranks)
138 continue;
139
140 if (dimm_params[i].caslat_x == temp2) {
141 if (mclk_ps >= dimm_params[i].tckmin_x_ps) {
142 debug("CL = %u ok on DIMM %u at tCK=%u ps with tCKmin_X_ps of %u\n",
143 temp2, i, mclk_ps,
144 dimm_params[i].tckmin_x_ps);
145 continue;
146 } else {
147 not_ok++;
148 }
149 }
150
151 if (dimm_params[i].caslat_x_minus_1 == temp2) {
152 unsigned int tckmin_x_minus_1_ps
153 = dimm_params[i].tckmin_x_minus_1_ps;
154 if (mclk_ps >= tckmin_x_minus_1_ps) {
155 debug("CL = %u ok on DIMM %u at tCK=%u ps with tckmin_x_minus_1_ps of %u\n",
156 temp2, i, mclk_ps,
157 tckmin_x_minus_1_ps);
158 continue;
159 } else {
160 not_ok++;
161 }
162 }
163
164 if (dimm_params[i].caslat_x_minus_2 == temp2) {
165 unsigned int tckmin_x_minus_2_ps
166 = dimm_params[i].tckmin_x_minus_2_ps;
167 if (mclk_ps >= tckmin_x_minus_2_ps) {
168 debug("CL = %u ok on DIMM %u at tCK=%u ps with tckmin_x_minus_2_ps of %u\n",
169 temp2, i, mclk_ps,
170 tckmin_x_minus_2_ps);
171 continue;
172 } else {
173 not_ok++;
174 }
175 }
176 }
177
178 if (!not_ok)
179 lowest_good_caslat = temp2;
180
181 temp1 &= ~(1 << temp2);
182 }
183
184 debug("lowest common SPD-defined CAS latency = %u\n",
185 lowest_good_caslat);
186 outpdimm->lowest_common_spd_caslat = lowest_good_caslat;
187
188
189 /*
190 * Compute a common 'de-rated' CAS latency.
191 *
192 * The strategy here is to find the *highest* dereated cas latency
193 * with the assumption that all of the DIMMs will support a dereated
194 * CAS latency higher than or equal to their lowest dereated value.
195 */
196 temp1 = 0;
197 for (i = 0; i < number_of_dimms; i++)
198 temp1 = max(temp1, dimm_params[i].caslat_lowest_derated);
199
200 outpdimm->highest_common_derated_caslat = temp1;
201 debug("highest common dereated CAS latency = %u\n", temp1);
202
203 return 0;
204 }
205 #endif
206
207 /*
208 * compute_lowest_common_dimm_parameters()
209 *
210 * Determine the worst-case DIMM timing parameters from the set of DIMMs
211 * whose parameters have been computed into the array pointed to
212 * by dimm_params.
213 */
214 unsigned int
215 compute_lowest_common_dimm_parameters(const dimm_params_t *dimm_params,
216 common_timing_params_t *outpdimm,
217 const unsigned int number_of_dimms)
218 {
219 unsigned int i, j;
220
221 unsigned int tckmin_x_ps = 0;
222 unsigned int tckmax_ps = 0xFFFFFFFF;
223 unsigned int trcd_ps = 0;
224 unsigned int trp_ps = 0;
225 unsigned int tras_ps = 0;
226 #if defined(CONFIG_SYS_FSL_DDR3) || defined(CONFIG_SYS_FSL_DDR4)
227 unsigned int taamin_ps = 0;
228 #endif
229 #ifdef CONFIG_SYS_FSL_DDR4
230 unsigned int twr_ps = 15000;
231 unsigned int trfc1_ps = 0;
232 unsigned int trfc2_ps = 0;
233 unsigned int trfc4_ps = 0;
234 unsigned int trrds_ps = 0;
235 unsigned int trrdl_ps = 0;
236 unsigned int tccdl_ps = 0;
237 #else
238 unsigned int twr_ps = 0;
239 unsigned int twtr_ps = 0;
240 unsigned int trfc_ps = 0;
241 unsigned int trrd_ps = 0;
242 unsigned int trtp_ps = 0;
243 #endif
244 unsigned int trc_ps = 0;
245 unsigned int refresh_rate_ps = 0;
246 unsigned int extended_op_srt = 1;
247 #if defined(CONFIG_SYS_FSL_DDR1) || defined(CONFIG_SYS_FSL_DDR2)
248 unsigned int tis_ps = 0;
249 unsigned int tih_ps = 0;
250 unsigned int tds_ps = 0;
251 unsigned int tdh_ps = 0;
252 unsigned int tdqsq_max_ps = 0;
253 unsigned int tqhs_ps = 0;
254 #endif
255 unsigned int temp1, temp2;
256 unsigned int additive_latency = 0;
257
258 temp1 = 0;
259 for (i = 0; i < number_of_dimms; i++) {
260 /*
261 * If there are no ranks on this DIMM,
262 * it probably doesn't exist, so skip it.
263 */
264 if (dimm_params[i].n_ranks == 0) {
265 temp1++;
266 continue;
267 }
268 if (dimm_params[i].n_ranks == 4 && i != 0) {
269 printf("Found Quad-rank DIMM in wrong bank, ignored."
270 " Software may not run as expected.\n");
271 temp1++;
272 continue;
273 }
274
275 /*
276 * check if quad-rank DIMM is plugged if
277 * CONFIG_CHIP_SELECT_QUAD_CAPABLE is not defined
278 * Only the board with proper design is capable
279 */
280 #ifndef CONFIG_FSL_DDR_FIRST_SLOT_QUAD_CAPABLE
281 if (dimm_params[i].n_ranks == 4 && \
282 CONFIG_CHIP_SELECTS_PER_CTRL/CONFIG_DIMM_SLOTS_PER_CTLR < 4) {
283 printf("Found Quad-rank DIMM, not able to support.");
284 temp1++;
285 continue;
286 }
287 #endif
288 /*
289 * Find minimum tckmax_ps to find fastest slow speed,
290 * i.e., this is the slowest the whole system can go.
291 */
292 tckmax_ps = min(tckmax_ps, dimm_params[i].tckmax_ps);
293 #if defined(CONFIG_SYS_FSL_DDR3) || defined(CONFIG_SYS_FSL_DDR4)
294 taamin_ps = max(taamin_ps, dimm_params[i].taa_ps);
295 #endif
296 tckmin_x_ps = max(tckmin_x_ps, dimm_params[i].tckmin_x_ps);
297 trcd_ps = max(trcd_ps, dimm_params[i].trcd_ps);
298 trp_ps = max(trp_ps, dimm_params[i].trp_ps);
299 tras_ps = max(tras_ps, dimm_params[i].tras_ps);
300 #ifdef CONFIG_SYS_FSL_DDR4
301 trfc1_ps = max(trfc1_ps, dimm_params[i].trfc1_ps);
302 trfc2_ps = max(trfc2_ps, dimm_params[i].trfc2_ps);
303 trfc4_ps = max(trfc4_ps, dimm_params[i].trfc4_ps);
304 trrds_ps = max(trrds_ps, dimm_params[i].trrds_ps);
305 trrdl_ps = max(trrdl_ps, dimm_params[i].trrdl_ps);
306 tccdl_ps = max(tccdl_ps, dimm_params[i].tccdl_ps);
307 #else
308 twr_ps = max(twr_ps, dimm_params[i].twr_ps);
309 twtr_ps = max(twtr_ps, dimm_params[i].twtr_ps);
310 trfc_ps = max(trfc_ps, dimm_params[i].trfc_ps);
311 trrd_ps = max(trrd_ps, dimm_params[i].trrd_ps);
312 trtp_ps = max(trtp_ps, dimm_params[i].trtp_ps);
313 #endif
314 trc_ps = max(trc_ps, dimm_params[i].trc_ps);
315 #if defined(CONFIG_SYS_FSL_DDR1) || defined(CONFIG_SYS_FSL_DDR2)
316 tis_ps = max(tis_ps, dimm_params[i].tis_ps);
317 tih_ps = max(tih_ps, dimm_params[i].tih_ps);
318 tds_ps = max(tds_ps, dimm_params[i].tds_ps);
319 tdh_ps = max(tdh_ps, dimm_params[i].tdh_ps);
320 tqhs_ps = max(tqhs_ps, dimm_params[i].tqhs_ps);
321 /*
322 * Find maximum tdqsq_max_ps to find slowest.
323 *
324 * FIXME: is finding the slowest value the correct
325 * strategy for this parameter?
326 */
327 tdqsq_max_ps = max(tdqsq_max_ps, dimm_params[i].tdqsq_max_ps);
328 #endif
329 refresh_rate_ps = max(refresh_rate_ps,
330 dimm_params[i].refresh_rate_ps);
331 /* extended_op_srt is either 0 or 1, 0 having priority */
332 extended_op_srt = min(extended_op_srt,
333 dimm_params[i].extended_op_srt);
334 }
335
336 outpdimm->ndimms_present = number_of_dimms - temp1;
337
338 if (temp1 == number_of_dimms) {
339 debug("no dimms this memory controller\n");
340 return 0;
341 }
342
343 outpdimm->tckmin_x_ps = tckmin_x_ps;
344 outpdimm->tckmax_ps = tckmax_ps;
345 #if defined(CONFIG_SYS_FSL_DDR3) || defined(CONFIG_SYS_FSL_DDR4)
346 outpdimm->taamin_ps = taamin_ps;
347 #endif
348 outpdimm->trcd_ps = trcd_ps;
349 outpdimm->trp_ps = trp_ps;
350 outpdimm->tras_ps = tras_ps;
351 #ifdef CONFIG_SYS_FSL_DDR4
352 outpdimm->trfc1_ps = trfc1_ps;
353 outpdimm->trfc2_ps = trfc2_ps;
354 outpdimm->trfc4_ps = trfc4_ps;
355 outpdimm->trrds_ps = trrds_ps;
356 outpdimm->trrdl_ps = trrdl_ps;
357 outpdimm->tccdl_ps = tccdl_ps;
358 #else
359 outpdimm->twtr_ps = twtr_ps;
360 outpdimm->trfc_ps = trfc_ps;
361 outpdimm->trrd_ps = trrd_ps;
362 outpdimm->trtp_ps = trtp_ps;
363 #endif
364 outpdimm->twr_ps = twr_ps;
365 outpdimm->trc_ps = trc_ps;
366 outpdimm->refresh_rate_ps = refresh_rate_ps;
367 outpdimm->extended_op_srt = extended_op_srt;
368 #if defined(CONFIG_SYS_FSL_DDR1) || defined(CONFIG_SYS_FSL_DDR2)
369 outpdimm->tis_ps = tis_ps;
370 outpdimm->tih_ps = tih_ps;
371 outpdimm->tds_ps = tds_ps;
372 outpdimm->tdh_ps = tdh_ps;
373 outpdimm->tdqsq_max_ps = tdqsq_max_ps;
374 outpdimm->tqhs_ps = tqhs_ps;
375 #endif
376
377 /* Determine common burst length for all DIMMs. */
378 temp1 = 0xff;
379 for (i = 0; i < number_of_dimms; i++) {
380 if (dimm_params[i].n_ranks) {
381 temp1 &= dimm_params[i].burst_lengths_bitmask;
382 }
383 }
384 outpdimm->all_dimms_burst_lengths_bitmask = temp1;
385
386 /* Determine if all DIMMs registered buffered. */
387 temp1 = temp2 = 0;
388 for (i = 0; i < number_of_dimms; i++) {
389 if (dimm_params[i].n_ranks) {
390 if (dimm_params[i].registered_dimm) {
391 temp1 = 1;
392 #ifndef CONFIG_SPL_BUILD
393 printf("Detected RDIMM %s\n",
394 dimm_params[i].mpart);
395 #endif
396 } else {
397 temp2 = 1;
398 #ifndef CONFIG_SPL_BUILD
399 printf("Detected UDIMM %s\n",
400 dimm_params[i].mpart);
401 #endif
402 }
403 }
404 }
405
406 outpdimm->all_dimms_registered = 0;
407 outpdimm->all_dimms_unbuffered = 0;
408 if (temp1 && !temp2) {
409 outpdimm->all_dimms_registered = 1;
410 } else if (!temp1 && temp2) {
411 outpdimm->all_dimms_unbuffered = 1;
412 } else {
413 printf("ERROR: Mix of registered buffered and unbuffered "
414 "DIMMs detected!\n");
415 }
416
417 temp1 = 0;
418 if (outpdimm->all_dimms_registered)
419 for (j = 0; j < 16; j++) {
420 outpdimm->rcw[j] = dimm_params[0].rcw[j];
421 for (i = 1; i < number_of_dimms; i++) {
422 if (!dimm_params[i].n_ranks)
423 continue;
424 if (dimm_params[i].rcw[j] != dimm_params[0].rcw[j]) {
425 temp1 = 1;
426 break;
427 }
428 }
429 }
430
431 if (temp1 != 0)
432 printf("ERROR: Mix different RDIMM detected!\n");
433
434 /* calculate cas latency for all DDR types */
435 if (compute_cas_latency(dimm_params, outpdimm, number_of_dimms))
436 return 1;
437
438 /* Determine if all DIMMs ECC capable. */
439 temp1 = 1;
440 for (i = 0; i < number_of_dimms; i++) {
441 if (dimm_params[i].n_ranks &&
442 !(dimm_params[i].edc_config & EDC_ECC)) {
443 temp1 = 0;
444 break;
445 }
446 }
447 if (temp1) {
448 debug("all DIMMs ECC capable\n");
449 } else {
450 debug("Warning: not all DIMMs ECC capable, cant enable ECC\n");
451 }
452 outpdimm->all_dimms_ecc_capable = temp1;
453
454 /*
455 * Compute additive latency.
456 *
457 * For DDR1, additive latency should be 0.
458 *
459 * For DDR2, with ODT enabled, use "a value" less than ACTTORW,
460 * which comes from Trcd, and also note that:
461 * add_lat + caslat must be >= 4
462 *
463 * For DDR3, we use the AL=0
464 *
465 * When to use additive latency for DDR2:
466 *
467 * I. Because you are using CL=3 and need to do ODT on writes and
468 * want functionality.
469 * 1. Are you going to use ODT? (Does your board not have
470 * additional termination circuitry for DQ, DQS, DQS_,
471 * DM, RDQS, RDQS_ for x4/x8 configs?)
472 * 2. If so, is your lowest supported CL going to be 3?
473 * 3. If so, then you must set AL=1 because
474 *
475 * WL >= 3 for ODT on writes
476 * RL = AL + CL
477 * WL = RL - 1
478 * ->
479 * WL = AL + CL - 1
480 * AL + CL - 1 >= 3
481 * AL + CL >= 4
482 * QED
483 *
484 * RL >= 3 for ODT on reads
485 * RL = AL + CL
486 *
487 * Since CL aren't usually less than 2, AL=0 is a minimum,
488 * so the WL-derived AL should be the -- FIXME?
489 *
490 * II. Because you are using auto-precharge globally and want to
491 * use additive latency (posted CAS) to get more bandwidth.
492 * 1. Are you going to use auto-precharge mode globally?
493 *
494 * Use addtivie latency and compute AL to be 1 cycle less than
495 * tRCD, i.e. the READ or WRITE command is in the cycle
496 * immediately following the ACTIVATE command..
497 *
498 * III. Because you feel like it or want to do some sort of
499 * degraded-performance experiment.
500 * 1. Do you just want to use additive latency because you feel
501 * like it?
502 *
503 * Validation: AL is less than tRCD, and within the other
504 * read-to-precharge constraints.
505 */
506
507 additive_latency = 0;
508
509 #if defined(CONFIG_SYS_FSL_DDR2)
510 if ((outpdimm->lowest_common_spd_caslat < 4) &&
511 (picos_to_mclk(trcd_ps) > outpdimm->lowest_common_spd_caslat)) {
512 additive_latency = picos_to_mclk(trcd_ps) -
513 outpdimm->lowest_common_spd_caslat;
514 if (mclk_to_picos(additive_latency) > trcd_ps) {
515 additive_latency = picos_to_mclk(trcd_ps);
516 debug("setting additive_latency to %u because it was "
517 " greater than tRCD_ps\n", additive_latency);
518 }
519 }
520 #endif
521
522 /*
523 * Validate additive latency
524 *
525 * AL <= tRCD(min)
526 */
527 if (mclk_to_picos(additive_latency) > trcd_ps) {
528 printf("Error: invalid additive latency exceeds tRCD(min).\n");
529 return 1;
530 }
531
532 /*
533 * RL = CL + AL; RL >= 3 for ODT_RD_CFG to be enabled
534 * WL = RL - 1; WL >= 3 for ODT_WL_CFG to be enabled
535 * ADD_LAT (the register) must be set to a value less
536 * than ACTTORW if WL = 1, then AL must be set to 1
537 * RD_TO_PRE (the register) must be set to a minimum
538 * tRTP + AL if AL is nonzero
539 */
540
541 /*
542 * Additive latency will be applied only if the memctl option to
543 * use it.
544 */
545 outpdimm->additive_latency = additive_latency;
546
547 debug("tCKmin_ps = %u\n", outpdimm->tckmin_x_ps);
548 debug("trcd_ps = %u\n", outpdimm->trcd_ps);
549 debug("trp_ps = %u\n", outpdimm->trp_ps);
550 debug("tras_ps = %u\n", outpdimm->tras_ps);
551 #ifdef CONFIG_SYS_FSL_DDR4
552 debug("trfc1_ps = %u\n", trfc1_ps);
553 debug("trfc2_ps = %u\n", trfc2_ps);
554 debug("trfc4_ps = %u\n", trfc4_ps);
555 debug("trrds_ps = %u\n", trrds_ps);
556 debug("trrdl_ps = %u\n", trrdl_ps);
557 debug("tccdl_ps = %u\n", tccdl_ps);
558 #else
559 debug("twtr_ps = %u\n", outpdimm->twtr_ps);
560 debug("trfc_ps = %u\n", outpdimm->trfc_ps);
561 debug("trrd_ps = %u\n", outpdimm->trrd_ps);
562 #endif
563 debug("twr_ps = %u\n", outpdimm->twr_ps);
564 debug("trc_ps = %u\n", outpdimm->trc_ps);
565
566 return 0;
567 }