2 * Copyright 2008-2014 Freescale Semiconductor, Inc.
4 * SPDX-License-Identifier: GPL-2.0
8 #include <fsl_ddr_sdram.h>
12 #if defined(CONFIG_SYS_FSL_DDR3) || defined(CONFIG_SYS_FSL_DDR4)
14 compute_cas_latency(const unsigned int ctrl_num
,
15 const dimm_params_t
*dimm_params
,
16 common_timing_params_t
*outpdimm
,
17 unsigned int number_of_dimms
)
20 unsigned int common_caslat
;
21 unsigned int caslat_actual
;
22 unsigned int retry
= 16;
23 unsigned int tmp
= ~0;
24 const unsigned int mclk_ps
= get_memory_clk_period_ps(ctrl_num
);
25 #ifdef CONFIG_SYS_FSL_DDR3
26 const unsigned int taamax
= 20000;
28 const unsigned int taamax
= 18000;
31 /* compute the common CAS latency supported between slots */
32 for (i
= 0; i
< number_of_dimms
; i
++) {
33 if (dimm_params
[i
].n_ranks
)
34 tmp
&= dimm_params
[i
].caslat_x
;
38 /* validate if the memory clk is in the range of dimms */
39 if (mclk_ps
< outpdimm
->tckmin_x_ps
) {
40 printf("DDR clock (MCLK cycle %u ps) is faster than "
41 "the slowest DIMM(s) (tCKmin %u ps) can support.\n",
42 mclk_ps
, outpdimm
->tckmin_x_ps
);
44 #ifdef CONFIG_SYS_FSL_DDR4
45 if (mclk_ps
> outpdimm
->tckmax_ps
) {
46 printf("DDR clock (MCLK cycle %u ps) is slower than DIMM(s) (tCKmax %u ps) can support.\n",
47 mclk_ps
, outpdimm
->tckmax_ps
);
50 /* determine the acutal cas latency */
51 caslat_actual
= (outpdimm
->taamin_ps
+ mclk_ps
- 1) / mclk_ps
;
52 /* check if the dimms support the CAS latency */
53 while (!(common_caslat
& (1 << caslat_actual
)) && retry
> 0) {
57 /* once the caculation of caslat_actual is completed
58 * we must verify that this CAS latency value does not
59 * exceed tAAmax, which is 20 ns for all DDR3 speed grades,
60 * 18ns for all DDR4 speed grades.
62 if (caslat_actual
* mclk_ps
> taamax
) {
63 printf("The choosen cas latency %d is too large\n",
66 outpdimm
->lowest_common_spd_caslat
= caslat_actual
;
67 debug("lowest_common_spd_caslat is 0x%x\n", caslat_actual
);
71 #else /* for DDR1 and DDR2 */
73 compute_cas_latency(const unsigned int ctrl_num
,
74 const dimm_params_t
*dimm_params
,
75 common_timing_params_t
*outpdimm
,
76 unsigned int number_of_dimms
)
79 const unsigned int mclk_ps
= get_memory_clk_period_ps(ctrl_num
);
80 unsigned int lowest_good_caslat
;
82 unsigned int temp1
, temp2
;
84 debug("using mclk_ps = %u\n", mclk_ps
);
85 if (mclk_ps
> outpdimm
->tckmax_ps
) {
86 printf("Warning: DDR clock (%u ps) is slower than DIMM(s) (tCKmax %u ps)\n",
87 mclk_ps
, outpdimm
->tckmax_ps
);
91 * Compute a CAS latency suitable for all DIMMs
93 * Strategy for SPD-defined latencies: compute only
94 * CAS latency defined by all DIMMs.
98 * Step 1: find CAS latency common to all DIMMs using bitwise
102 for (i
= 0; i
< number_of_dimms
; i
++) {
103 if (dimm_params
[i
].n_ranks
) {
105 temp2
|= 1 << dimm_params
[i
].caslat_x
;
106 temp2
|= 1 << dimm_params
[i
].caslat_x_minus_1
;
107 temp2
|= 1 << dimm_params
[i
].caslat_x_minus_2
;
109 * If there was no entry for X-2 (X-1) in
110 * the SPD, then caslat_x_minus_2
111 * (caslat_x_minus_1) contains either 255 or
112 * 0xFFFFFFFF because that's what the glorious
113 * __ilog2 function returns for an input of 0.
114 * On 32-bit PowerPC, left shift counts with bit
115 * 26 set (that the value of 255 or 0xFFFFFFFF
116 * will have), cause the destination register to
117 * be 0. That is why this works.
124 * Step 2: check each common CAS latency against tCK of each
127 lowest_good_caslat
= 0;
131 temp2
= __ilog2(temp1
);
132 debug("checking common caslat = %u\n", temp2
);
134 /* Check if this CAS latency will work on all DIMMs at tCK. */
135 for (i
= 0; i
< number_of_dimms
; i
++) {
136 if (!dimm_params
[i
].n_ranks
)
139 if (dimm_params
[i
].caslat_x
== temp2
) {
140 if (mclk_ps
>= dimm_params
[i
].tckmin_x_ps
) {
141 debug("CL = %u ok on DIMM %u at tCK=%u ps with tCKmin_X_ps of %u\n",
143 dimm_params
[i
].tckmin_x_ps
);
150 if (dimm_params
[i
].caslat_x_minus_1
== temp2
) {
151 unsigned int tckmin_x_minus_1_ps
152 = dimm_params
[i
].tckmin_x_minus_1_ps
;
153 if (mclk_ps
>= tckmin_x_minus_1_ps
) {
154 debug("CL = %u ok on DIMM %u at tCK=%u ps with tckmin_x_minus_1_ps of %u\n",
156 tckmin_x_minus_1_ps
);
163 if (dimm_params
[i
].caslat_x_minus_2
== temp2
) {
164 unsigned int tckmin_x_minus_2_ps
165 = dimm_params
[i
].tckmin_x_minus_2_ps
;
166 if (mclk_ps
>= tckmin_x_minus_2_ps
) {
167 debug("CL = %u ok on DIMM %u at tCK=%u ps with tckmin_x_minus_2_ps of %u\n",
169 tckmin_x_minus_2_ps
);
178 lowest_good_caslat
= temp2
;
180 temp1
&= ~(1 << temp2
);
183 debug("lowest common SPD-defined CAS latency = %u\n",
185 outpdimm
->lowest_common_spd_caslat
= lowest_good_caslat
;
189 * Compute a common 'de-rated' CAS latency.
191 * The strategy here is to find the *highest* dereated cas latency
192 * with the assumption that all of the DIMMs will support a dereated
193 * CAS latency higher than or equal to their lowest dereated value.
196 for (i
= 0; i
< number_of_dimms
; i
++)
197 temp1
= max(temp1
, dimm_params
[i
].caslat_lowest_derated
);
199 outpdimm
->highest_common_derated_caslat
= temp1
;
200 debug("highest common dereated CAS latency = %u\n", temp1
);
207 * compute_lowest_common_dimm_parameters()
209 * Determine the worst-case DIMM timing parameters from the set of DIMMs
210 * whose parameters have been computed into the array pointed to
214 compute_lowest_common_dimm_parameters(const unsigned int ctrl_num
,
215 const dimm_params_t
*dimm_params
,
216 common_timing_params_t
*outpdimm
,
217 const unsigned int number_of_dimms
)
221 unsigned int tckmin_x_ps
= 0;
222 unsigned int tckmax_ps
= 0xFFFFFFFF;
223 unsigned int trcd_ps
= 0;
224 unsigned int trp_ps
= 0;
225 unsigned int tras_ps
= 0;
226 #if defined(CONFIG_SYS_FSL_DDR3) || defined(CONFIG_SYS_FSL_DDR4)
227 unsigned int taamin_ps
= 0;
229 #ifdef CONFIG_SYS_FSL_DDR4
230 unsigned int twr_ps
= 15000;
231 unsigned int trfc1_ps
= 0;
232 unsigned int trfc2_ps
= 0;
233 unsigned int trfc4_ps
= 0;
234 unsigned int trrds_ps
= 0;
235 unsigned int trrdl_ps
= 0;
236 unsigned int tccdl_ps
= 0;
238 unsigned int twr_ps
= 0;
239 unsigned int twtr_ps
= 0;
240 unsigned int trfc_ps
= 0;
241 unsigned int trrd_ps
= 0;
242 unsigned int trtp_ps
= 0;
244 unsigned int trc_ps
= 0;
245 unsigned int refresh_rate_ps
= 0;
246 unsigned int extended_op_srt
= 1;
247 #if defined(CONFIG_SYS_FSL_DDR1) || defined(CONFIG_SYS_FSL_DDR2)
248 unsigned int tis_ps
= 0;
249 unsigned int tih_ps
= 0;
250 unsigned int tds_ps
= 0;
251 unsigned int tdh_ps
= 0;
252 unsigned int tdqsq_max_ps
= 0;
253 unsigned int tqhs_ps
= 0;
255 unsigned int temp1
, temp2
;
256 unsigned int additive_latency
= 0;
259 for (i
= 0; i
< number_of_dimms
; i
++) {
261 * If there are no ranks on this DIMM,
262 * it probably doesn't exist, so skip it.
264 if (dimm_params
[i
].n_ranks
== 0) {
268 if (dimm_params
[i
].n_ranks
== 4 && i
!= 0) {
269 printf("Found Quad-rank DIMM in wrong bank, ignored."
270 " Software may not run as expected.\n");
276 * check if quad-rank DIMM is plugged if
277 * CONFIG_CHIP_SELECT_QUAD_CAPABLE is not defined
278 * Only the board with proper design is capable
280 #ifndef CONFIG_FSL_DDR_FIRST_SLOT_QUAD_CAPABLE
281 if (dimm_params
[i
].n_ranks
== 4 && \
282 CONFIG_CHIP_SELECTS_PER_CTRL
/CONFIG_DIMM_SLOTS_PER_CTLR
< 4) {
283 printf("Found Quad-rank DIMM, not able to support.");
289 * Find minimum tckmax_ps to find fastest slow speed,
290 * i.e., this is the slowest the whole system can go.
292 tckmax_ps
= min(tckmax_ps
,
293 (unsigned int)dimm_params
[i
].tckmax_ps
);
294 #if defined(CONFIG_SYS_FSL_DDR3) || defined(CONFIG_SYS_FSL_DDR4)
295 taamin_ps
= max(taamin_ps
,
296 (unsigned int)dimm_params
[i
].taa_ps
);
298 tckmin_x_ps
= max(tckmin_x_ps
,
299 (unsigned int)dimm_params
[i
].tckmin_x_ps
);
300 trcd_ps
= max(trcd_ps
, (unsigned int)dimm_params
[i
].trcd_ps
);
301 trp_ps
= max(trp_ps
, (unsigned int)dimm_params
[i
].trp_ps
);
302 tras_ps
= max(tras_ps
, (unsigned int)dimm_params
[i
].tras_ps
);
303 #ifdef CONFIG_SYS_FSL_DDR4
304 trfc1_ps
= max(trfc1_ps
,
305 (unsigned int)dimm_params
[i
].trfc1_ps
);
306 trfc2_ps
= max(trfc2_ps
,
307 (unsigned int)dimm_params
[i
].trfc2_ps
);
308 trfc4_ps
= max(trfc4_ps
,
309 (unsigned int)dimm_params
[i
].trfc4_ps
);
310 trrds_ps
= max(trrds_ps
,
311 (unsigned int)dimm_params
[i
].trrds_ps
);
312 trrdl_ps
= max(trrdl_ps
,
313 (unsigned int)dimm_params
[i
].trrdl_ps
);
314 tccdl_ps
= max(tccdl_ps
,
315 (unsigned int)dimm_params
[i
].tccdl_ps
);
317 twr_ps
= max(twr_ps
, (unsigned int)dimm_params
[i
].twr_ps
);
318 twtr_ps
= max(twtr_ps
, (unsigned int)dimm_params
[i
].twtr_ps
);
319 trfc_ps
= max(trfc_ps
, (unsigned int)dimm_params
[i
].trfc_ps
);
320 trrd_ps
= max(trrd_ps
, (unsigned int)dimm_params
[i
].trrd_ps
);
321 trtp_ps
= max(trtp_ps
, (unsigned int)dimm_params
[i
].trtp_ps
);
323 trc_ps
= max(trc_ps
, (unsigned int)dimm_params
[i
].trc_ps
);
324 #if defined(CONFIG_SYS_FSL_DDR1) || defined(CONFIG_SYS_FSL_DDR2)
325 tis_ps
= max(tis_ps
, (unsigned int)dimm_params
[i
].tis_ps
);
326 tih_ps
= max(tih_ps
, (unsigned int)dimm_params
[i
].tih_ps
);
327 tds_ps
= max(tds_ps
, (unsigned int)dimm_params
[i
].tds_ps
);
328 tdh_ps
= max(tdh_ps
, (unsigned int)dimm_params
[i
].tdh_ps
);
329 tqhs_ps
= max(tqhs_ps
, (unsigned int)dimm_params
[i
].tqhs_ps
);
331 * Find maximum tdqsq_max_ps to find slowest.
333 * FIXME: is finding the slowest value the correct
334 * strategy for this parameter?
336 tdqsq_max_ps
= max(tdqsq_max_ps
,
337 (unsigned int)dimm_params
[i
].tdqsq_max_ps
);
339 refresh_rate_ps
= max(refresh_rate_ps
,
340 (unsigned int)dimm_params
[i
].refresh_rate_ps
);
341 /* extended_op_srt is either 0 or 1, 0 having priority */
342 extended_op_srt
= min(extended_op_srt
,
343 (unsigned int)dimm_params
[i
].extended_op_srt
);
346 outpdimm
->ndimms_present
= number_of_dimms
- temp1
;
348 if (temp1
== number_of_dimms
) {
349 debug("no dimms this memory controller\n");
353 outpdimm
->tckmin_x_ps
= tckmin_x_ps
;
354 outpdimm
->tckmax_ps
= tckmax_ps
;
355 #if defined(CONFIG_SYS_FSL_DDR3) || defined(CONFIG_SYS_FSL_DDR4)
356 outpdimm
->taamin_ps
= taamin_ps
;
358 outpdimm
->trcd_ps
= trcd_ps
;
359 outpdimm
->trp_ps
= trp_ps
;
360 outpdimm
->tras_ps
= tras_ps
;
361 #ifdef CONFIG_SYS_FSL_DDR4
362 outpdimm
->trfc1_ps
= trfc1_ps
;
363 outpdimm
->trfc2_ps
= trfc2_ps
;
364 outpdimm
->trfc4_ps
= trfc4_ps
;
365 outpdimm
->trrds_ps
= trrds_ps
;
366 outpdimm
->trrdl_ps
= trrdl_ps
;
367 outpdimm
->tccdl_ps
= tccdl_ps
;
369 outpdimm
->twtr_ps
= twtr_ps
;
370 outpdimm
->trfc_ps
= trfc_ps
;
371 outpdimm
->trrd_ps
= trrd_ps
;
372 outpdimm
->trtp_ps
= trtp_ps
;
374 outpdimm
->twr_ps
= twr_ps
;
375 outpdimm
->trc_ps
= trc_ps
;
376 outpdimm
->refresh_rate_ps
= refresh_rate_ps
;
377 outpdimm
->extended_op_srt
= extended_op_srt
;
378 #if defined(CONFIG_SYS_FSL_DDR1) || defined(CONFIG_SYS_FSL_DDR2)
379 outpdimm
->tis_ps
= tis_ps
;
380 outpdimm
->tih_ps
= tih_ps
;
381 outpdimm
->tds_ps
= tds_ps
;
382 outpdimm
->tdh_ps
= tdh_ps
;
383 outpdimm
->tdqsq_max_ps
= tdqsq_max_ps
;
384 outpdimm
->tqhs_ps
= tqhs_ps
;
387 /* Determine common burst length for all DIMMs. */
389 for (i
= 0; i
< number_of_dimms
; i
++) {
390 if (dimm_params
[i
].n_ranks
) {
391 temp1
&= dimm_params
[i
].burst_lengths_bitmask
;
394 outpdimm
->all_dimms_burst_lengths_bitmask
= temp1
;
396 /* Determine if all DIMMs registered buffered. */
398 for (i
= 0; i
< number_of_dimms
; i
++) {
399 if (dimm_params
[i
].n_ranks
) {
400 if (dimm_params
[i
].registered_dimm
) {
402 #ifndef CONFIG_SPL_BUILD
403 printf("Detected RDIMM %s\n",
404 dimm_params
[i
].mpart
);
408 #ifndef CONFIG_SPL_BUILD
409 printf("Detected UDIMM %s\n",
410 dimm_params
[i
].mpart
);
416 outpdimm
->all_dimms_registered
= 0;
417 outpdimm
->all_dimms_unbuffered
= 0;
418 if (temp1
&& !temp2
) {
419 outpdimm
->all_dimms_registered
= 1;
420 } else if (!temp1
&& temp2
) {
421 outpdimm
->all_dimms_unbuffered
= 1;
423 printf("ERROR: Mix of registered buffered and unbuffered "
424 "DIMMs detected!\n");
428 if (outpdimm
->all_dimms_registered
)
429 for (j
= 0; j
< 16; j
++) {
430 outpdimm
->rcw
[j
] = dimm_params
[0].rcw
[j
];
431 for (i
= 1; i
< number_of_dimms
; i
++) {
432 if (!dimm_params
[i
].n_ranks
)
434 if (dimm_params
[i
].rcw
[j
] != dimm_params
[0].rcw
[j
]) {
442 printf("ERROR: Mix different RDIMM detected!\n");
444 /* calculate cas latency for all DDR types */
445 if (compute_cas_latency(ctrl_num
, dimm_params
,
446 outpdimm
, number_of_dimms
))
449 /* Determine if all DIMMs ECC capable. */
451 for (i
= 0; i
< number_of_dimms
; i
++) {
452 if (dimm_params
[i
].n_ranks
&&
453 !(dimm_params
[i
].edc_config
& EDC_ECC
)) {
459 debug("all DIMMs ECC capable\n");
461 debug("Warning: not all DIMMs ECC capable, cant enable ECC\n");
463 outpdimm
->all_dimms_ecc_capable
= temp1
;
466 * Compute additive latency.
468 * For DDR1, additive latency should be 0.
470 * For DDR2, with ODT enabled, use "a value" less than ACTTORW,
471 * which comes from Trcd, and also note that:
472 * add_lat + caslat must be >= 4
474 * For DDR3, we use the AL=0
476 * When to use additive latency for DDR2:
478 * I. Because you are using CL=3 and need to do ODT on writes and
479 * want functionality.
480 * 1. Are you going to use ODT? (Does your board not have
481 * additional termination circuitry for DQ, DQS, DQS_,
482 * DM, RDQS, RDQS_ for x4/x8 configs?)
483 * 2. If so, is your lowest supported CL going to be 3?
484 * 3. If so, then you must set AL=1 because
486 * WL >= 3 for ODT on writes
495 * RL >= 3 for ODT on reads
498 * Since CL aren't usually less than 2, AL=0 is a minimum,
499 * so the WL-derived AL should be the -- FIXME?
501 * II. Because you are using auto-precharge globally and want to
502 * use additive latency (posted CAS) to get more bandwidth.
503 * 1. Are you going to use auto-precharge mode globally?
505 * Use addtivie latency and compute AL to be 1 cycle less than
506 * tRCD, i.e. the READ or WRITE command is in the cycle
507 * immediately following the ACTIVATE command..
509 * III. Because you feel like it or want to do some sort of
510 * degraded-performance experiment.
511 * 1. Do you just want to use additive latency because you feel
514 * Validation: AL is less than tRCD, and within the other
515 * read-to-precharge constraints.
518 additive_latency
= 0;
520 #if defined(CONFIG_SYS_FSL_DDR2)
521 if ((outpdimm
->lowest_common_spd_caslat
< 4) &&
522 (picos_to_mclk(ctrl_num
, trcd_ps
) >
523 outpdimm
->lowest_common_spd_caslat
)) {
524 additive_latency
= picos_to_mclk(ctrl_num
, trcd_ps
) -
525 outpdimm
->lowest_common_spd_caslat
;
526 if (mclk_to_picos(ctrl_num
, additive_latency
) > trcd_ps
) {
527 additive_latency
= picos_to_mclk(ctrl_num
, trcd_ps
);
528 debug("setting additive_latency to %u because it was "
529 " greater than tRCD_ps\n", additive_latency
);
535 * Validate additive latency
539 if (mclk_to_picos(ctrl_num
, additive_latency
) > trcd_ps
) {
540 printf("Error: invalid additive latency exceeds tRCD(min).\n");
545 * RL = CL + AL; RL >= 3 for ODT_RD_CFG to be enabled
546 * WL = RL - 1; WL >= 3 for ODT_WL_CFG to be enabled
547 * ADD_LAT (the register) must be set to a value less
548 * than ACTTORW if WL = 1, then AL must be set to 1
549 * RD_TO_PRE (the register) must be set to a minimum
550 * tRTP + AL if AL is nonzero
554 * Additive latency will be applied only if the memctl option to
557 outpdimm
->additive_latency
= additive_latency
;
559 debug("tCKmin_ps = %u\n", outpdimm
->tckmin_x_ps
);
560 debug("trcd_ps = %u\n", outpdimm
->trcd_ps
);
561 debug("trp_ps = %u\n", outpdimm
->trp_ps
);
562 debug("tras_ps = %u\n", outpdimm
->tras_ps
);
563 #ifdef CONFIG_SYS_FSL_DDR4
564 debug("trfc1_ps = %u\n", trfc1_ps
);
565 debug("trfc2_ps = %u\n", trfc2_ps
);
566 debug("trfc4_ps = %u\n", trfc4_ps
);
567 debug("trrds_ps = %u\n", trrds_ps
);
568 debug("trrdl_ps = %u\n", trrdl_ps
);
569 debug("tccdl_ps = %u\n", tccdl_ps
);
571 debug("twtr_ps = %u\n", outpdimm
->twtr_ps
);
572 debug("trfc_ps = %u\n", outpdimm
->trfc_ps
);
573 debug("trrd_ps = %u\n", outpdimm
->trrd_ps
);
575 debug("twr_ps = %u\n", outpdimm
->twr_ps
);
576 debug("trc_ps = %u\n", outpdimm
->trc_ps
);