]> git.ipfire.org Git - people/ms/u-boot.git/blob - drivers/ddr/altera/sequencer.c
254b1308ffd2a930c51ff1e97903f84e20b3832d
[people/ms/u-boot.git] / drivers / ddr / altera / sequencer.c
1 /*
2 * Copyright Altera Corporation (C) 2012-2015
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7 #include <common.h>
8 #include <asm/io.h>
9 #include <asm/arch/sdram.h>
10 #include <errno.h>
11 #include "sequencer.h"
12
13 static struct socfpga_sdr_rw_load_manager *sdr_rw_load_mgr_regs =
14 (struct socfpga_sdr_rw_load_manager *)
15 (SDR_PHYGRP_RWMGRGRP_ADDRESS | 0x800);
16 static struct socfpga_sdr_rw_load_jump_manager *sdr_rw_load_jump_mgr_regs =
17 (struct socfpga_sdr_rw_load_jump_manager *)
18 (SDR_PHYGRP_RWMGRGRP_ADDRESS | 0xC00);
19 static struct socfpga_sdr_reg_file *sdr_reg_file =
20 (struct socfpga_sdr_reg_file *)SDR_PHYGRP_REGFILEGRP_ADDRESS;
21 static struct socfpga_sdr_scc_mgr *sdr_scc_mgr =
22 (struct socfpga_sdr_scc_mgr *)
23 (SDR_PHYGRP_SCCGRP_ADDRESS | 0xe00);
24 static struct socfpga_phy_mgr_cmd *phy_mgr_cmd =
25 (struct socfpga_phy_mgr_cmd *)SDR_PHYGRP_PHYMGRGRP_ADDRESS;
26 static struct socfpga_phy_mgr_cfg *phy_mgr_cfg =
27 (struct socfpga_phy_mgr_cfg *)
28 (SDR_PHYGRP_PHYMGRGRP_ADDRESS | 0x40);
29 static struct socfpga_data_mgr *data_mgr =
30 (struct socfpga_data_mgr *)SDR_PHYGRP_DATAMGRGRP_ADDRESS;
31 static struct socfpga_sdr_ctrl *sdr_ctrl =
32 (struct socfpga_sdr_ctrl *)SDR_CTRLGRP_ADDRESS;
33
34 const struct socfpga_sdram_rw_mgr_config *rwcfg;
35 const struct socfpga_sdram_io_config *iocfg;
36 const struct socfpga_sdram_misc_config *misccfg;
37
38 #define DELTA_D 1
39
40 /*
41 * In order to reduce ROM size, most of the selectable calibration steps are
42 * decided at compile time based on the user's calibration mode selection,
43 * as captured by the STATIC_CALIB_STEPS selection below.
44 *
45 * However, to support simulation-time selection of fast simulation mode, where
46 * we skip everything except the bare minimum, we need a few of the steps to
47 * be dynamic. In those cases, we either use the DYNAMIC_CALIB_STEPS for the
48 * check, which is based on the rtl-supplied value, or we dynamically compute
49 * the value to use based on the dynamically-chosen calibration mode
50 */
51
52 #define DLEVEL 0
53 #define STATIC_IN_RTL_SIM 0
54 #define STATIC_SKIP_DELAY_LOOPS 0
55
56 #define STATIC_CALIB_STEPS (STATIC_IN_RTL_SIM | CALIB_SKIP_FULL_TEST | \
57 STATIC_SKIP_DELAY_LOOPS)
58
59 /* calibration steps requested by the rtl */
60 u16 dyn_calib_steps;
61
62 /*
63 * To make CALIB_SKIP_DELAY_LOOPS a dynamic conditional option
64 * instead of static, we use boolean logic to select between
65 * non-skip and skip values
66 *
67 * The mask is set to include all bits when not-skipping, but is
68 * zero when skipping
69 */
70
71 u16 skip_delay_mask; /* mask off bits when skipping/not-skipping */
72
73 #define SKIP_DELAY_LOOP_VALUE_OR_ZERO(non_skip_value) \
74 ((non_skip_value) & skip_delay_mask)
75
76 struct gbl_type *gbl;
77 struct param_type *param;
78
79 static void set_failing_group_stage(u32 group, u32 stage,
80 u32 substage)
81 {
82 /*
83 * Only set the global stage if there was not been any other
84 * failing group
85 */
86 if (gbl->error_stage == CAL_STAGE_NIL) {
87 gbl->error_substage = substage;
88 gbl->error_stage = stage;
89 gbl->error_group = group;
90 }
91 }
92
93 static void reg_file_set_group(u16 set_group)
94 {
95 clrsetbits_le32(&sdr_reg_file->cur_stage, 0xffff0000, set_group << 16);
96 }
97
98 static void reg_file_set_stage(u8 set_stage)
99 {
100 clrsetbits_le32(&sdr_reg_file->cur_stage, 0xffff, set_stage & 0xff);
101 }
102
103 static void reg_file_set_sub_stage(u8 set_sub_stage)
104 {
105 set_sub_stage &= 0xff;
106 clrsetbits_le32(&sdr_reg_file->cur_stage, 0xff00, set_sub_stage << 8);
107 }
108
109 /**
110 * phy_mgr_initialize() - Initialize PHY Manager
111 *
112 * Initialize PHY Manager.
113 */
114 static void phy_mgr_initialize(void)
115 {
116 u32 ratio;
117
118 debug("%s:%d\n", __func__, __LINE__);
119 /* Calibration has control over path to memory */
120 /*
121 * In Hard PHY this is a 2-bit control:
122 * 0: AFI Mux Select
123 * 1: DDIO Mux Select
124 */
125 writel(0x3, &phy_mgr_cfg->mux_sel);
126
127 /* USER memory clock is not stable we begin initialization */
128 writel(0, &phy_mgr_cfg->reset_mem_stbl);
129
130 /* USER calibration status all set to zero */
131 writel(0, &phy_mgr_cfg->cal_status);
132
133 writel(0, &phy_mgr_cfg->cal_debug_info);
134
135 /* Init params only if we do NOT skip calibration. */
136 if ((dyn_calib_steps & CALIB_SKIP_ALL) == CALIB_SKIP_ALL)
137 return;
138
139 ratio = rwcfg->mem_dq_per_read_dqs /
140 rwcfg->mem_virtual_groups_per_read_dqs;
141 param->read_correct_mask_vg = (1 << ratio) - 1;
142 param->write_correct_mask_vg = (1 << ratio) - 1;
143 param->read_correct_mask = (1 << rwcfg->mem_dq_per_read_dqs) - 1;
144 param->write_correct_mask = (1 << rwcfg->mem_dq_per_write_dqs) - 1;
145 }
146
147 /**
148 * set_rank_and_odt_mask() - Set Rank and ODT mask
149 * @rank: Rank mask
150 * @odt_mode: ODT mode, OFF or READ_WRITE
151 *
152 * Set Rank and ODT mask (On-Die Termination).
153 */
154 static void set_rank_and_odt_mask(const u32 rank, const u32 odt_mode)
155 {
156 u32 odt_mask_0 = 0;
157 u32 odt_mask_1 = 0;
158 u32 cs_and_odt_mask;
159
160 if (odt_mode == RW_MGR_ODT_MODE_OFF) {
161 odt_mask_0 = 0x0;
162 odt_mask_1 = 0x0;
163 } else { /* RW_MGR_ODT_MODE_READ_WRITE */
164 switch (rwcfg->mem_number_of_ranks) {
165 case 1: /* 1 Rank */
166 /* Read: ODT = 0 ; Write: ODT = 1 */
167 odt_mask_0 = 0x0;
168 odt_mask_1 = 0x1;
169 break;
170 case 2: /* 2 Ranks */
171 if (rwcfg->mem_number_of_cs_per_dimm == 1) {
172 /*
173 * - Dual-Slot , Single-Rank (1 CS per DIMM)
174 * OR
175 * - RDIMM, 4 total CS (2 CS per DIMM, 2 DIMM)
176 *
177 * Since MEM_NUMBER_OF_RANKS is 2, they
178 * are both single rank with 2 CS each
179 * (special for RDIMM).
180 *
181 * Read: Turn on ODT on the opposite rank
182 * Write: Turn on ODT on all ranks
183 */
184 odt_mask_0 = 0x3 & ~(1 << rank);
185 odt_mask_1 = 0x3;
186 } else {
187 /*
188 * - Single-Slot , Dual-Rank (2 CS per DIMM)
189 *
190 * Read: Turn on ODT off on all ranks
191 * Write: Turn on ODT on active rank
192 */
193 odt_mask_0 = 0x0;
194 odt_mask_1 = 0x3 & (1 << rank);
195 }
196 break;
197 case 4: /* 4 Ranks */
198 /* Read:
199 * ----------+-----------------------+
200 * | ODT |
201 * Read From +-----------------------+
202 * Rank | 3 | 2 | 1 | 0 |
203 * ----------+-----+-----+-----+-----+
204 * 0 | 0 | 1 | 0 | 0 |
205 * 1 | 1 | 0 | 0 | 0 |
206 * 2 | 0 | 0 | 0 | 1 |
207 * 3 | 0 | 0 | 1 | 0 |
208 * ----------+-----+-----+-----+-----+
209 *
210 * Write:
211 * ----------+-----------------------+
212 * | ODT |
213 * Write To +-----------------------+
214 * Rank | 3 | 2 | 1 | 0 |
215 * ----------+-----+-----+-----+-----+
216 * 0 | 0 | 1 | 0 | 1 |
217 * 1 | 1 | 0 | 1 | 0 |
218 * 2 | 0 | 1 | 0 | 1 |
219 * 3 | 1 | 0 | 1 | 0 |
220 * ----------+-----+-----+-----+-----+
221 */
222 switch (rank) {
223 case 0:
224 odt_mask_0 = 0x4;
225 odt_mask_1 = 0x5;
226 break;
227 case 1:
228 odt_mask_0 = 0x8;
229 odt_mask_1 = 0xA;
230 break;
231 case 2:
232 odt_mask_0 = 0x1;
233 odt_mask_1 = 0x5;
234 break;
235 case 3:
236 odt_mask_0 = 0x2;
237 odt_mask_1 = 0xA;
238 break;
239 }
240 break;
241 }
242 }
243
244 cs_and_odt_mask = (0xFF & ~(1 << rank)) |
245 ((0xFF & odt_mask_0) << 8) |
246 ((0xFF & odt_mask_1) << 16);
247 writel(cs_and_odt_mask, SDR_PHYGRP_RWMGRGRP_ADDRESS |
248 RW_MGR_SET_CS_AND_ODT_MASK_OFFSET);
249 }
250
251 /**
252 * scc_mgr_set() - Set SCC Manager register
253 * @off: Base offset in SCC Manager space
254 * @grp: Read/Write group
255 * @val: Value to be set
256 *
257 * This function sets the SCC Manager (Scan Chain Control Manager) register.
258 */
259 static void scc_mgr_set(u32 off, u32 grp, u32 val)
260 {
261 writel(val, SDR_PHYGRP_SCCGRP_ADDRESS | off | (grp << 2));
262 }
263
264 /**
265 * scc_mgr_initialize() - Initialize SCC Manager registers
266 *
267 * Initialize SCC Manager registers.
268 */
269 static void scc_mgr_initialize(void)
270 {
271 /*
272 * Clear register file for HPS. 16 (2^4) is the size of the
273 * full register file in the scc mgr:
274 * RFILE_DEPTH = 1 + log2(MEM_DQ_PER_DQS + 1 + MEM_DM_PER_DQS +
275 * MEM_IF_READ_DQS_WIDTH - 1);
276 */
277 int i;
278
279 for (i = 0; i < 16; i++) {
280 debug_cond(DLEVEL == 1, "%s:%d: Clearing SCC RFILE index %u\n",
281 __func__, __LINE__, i);
282 scc_mgr_set(SCC_MGR_HHP_RFILE_OFFSET, i, 0);
283 }
284 }
285
286 static void scc_mgr_set_dqdqs_output_phase(u32 write_group, u32 phase)
287 {
288 scc_mgr_set(SCC_MGR_DQDQS_OUT_PHASE_OFFSET, write_group, phase);
289 }
290
291 static void scc_mgr_set_dqs_bus_in_delay(u32 read_group, u32 delay)
292 {
293 scc_mgr_set(SCC_MGR_DQS_IN_DELAY_OFFSET, read_group, delay);
294 }
295
296 static void scc_mgr_set_dqs_en_phase(u32 read_group, u32 phase)
297 {
298 scc_mgr_set(SCC_MGR_DQS_EN_PHASE_OFFSET, read_group, phase);
299 }
300
301 static void scc_mgr_set_dqs_en_delay(u32 read_group, u32 delay)
302 {
303 scc_mgr_set(SCC_MGR_DQS_EN_DELAY_OFFSET, read_group, delay);
304 }
305
306 static void scc_mgr_set_dqs_io_in_delay(u32 delay)
307 {
308 scc_mgr_set(SCC_MGR_IO_IN_DELAY_OFFSET, rwcfg->mem_dq_per_write_dqs,
309 delay);
310 }
311
312 static void scc_mgr_set_dq_in_delay(u32 dq_in_group, u32 delay)
313 {
314 scc_mgr_set(SCC_MGR_IO_IN_DELAY_OFFSET, dq_in_group, delay);
315 }
316
317 static void scc_mgr_set_dq_out1_delay(u32 dq_in_group, u32 delay)
318 {
319 scc_mgr_set(SCC_MGR_IO_OUT1_DELAY_OFFSET, dq_in_group, delay);
320 }
321
322 static void scc_mgr_set_dqs_out1_delay(u32 delay)
323 {
324 scc_mgr_set(SCC_MGR_IO_OUT1_DELAY_OFFSET, rwcfg->mem_dq_per_write_dqs,
325 delay);
326 }
327
328 static void scc_mgr_set_dm_out1_delay(u32 dm, u32 delay)
329 {
330 scc_mgr_set(SCC_MGR_IO_OUT1_DELAY_OFFSET,
331 rwcfg->mem_dq_per_write_dqs + 1 + dm,
332 delay);
333 }
334
335 /* load up dqs config settings */
336 static void scc_mgr_load_dqs(u32 dqs)
337 {
338 writel(dqs, &sdr_scc_mgr->dqs_ena);
339 }
340
341 /* load up dqs io config settings */
342 static void scc_mgr_load_dqs_io(void)
343 {
344 writel(0, &sdr_scc_mgr->dqs_io_ena);
345 }
346
347 /* load up dq config settings */
348 static void scc_mgr_load_dq(u32 dq_in_group)
349 {
350 writel(dq_in_group, &sdr_scc_mgr->dq_ena);
351 }
352
353 /* load up dm config settings */
354 static void scc_mgr_load_dm(u32 dm)
355 {
356 writel(dm, &sdr_scc_mgr->dm_ena);
357 }
358
359 /**
360 * scc_mgr_set_all_ranks() - Set SCC Manager register for all ranks
361 * @off: Base offset in SCC Manager space
362 * @grp: Read/Write group
363 * @val: Value to be set
364 * @update: If non-zero, trigger SCC Manager update for all ranks
365 *
366 * This function sets the SCC Manager (Scan Chain Control Manager) register
367 * and optionally triggers the SCC update for all ranks.
368 */
369 static void scc_mgr_set_all_ranks(const u32 off, const u32 grp, const u32 val,
370 const int update)
371 {
372 u32 r;
373
374 for (r = 0; r < rwcfg->mem_number_of_ranks;
375 r += NUM_RANKS_PER_SHADOW_REG) {
376 scc_mgr_set(off, grp, val);
377
378 if (update || (r == 0)) {
379 writel(grp, &sdr_scc_mgr->dqs_ena);
380 writel(0, &sdr_scc_mgr->update);
381 }
382 }
383 }
384
385 static void scc_mgr_set_dqs_en_phase_all_ranks(u32 read_group, u32 phase)
386 {
387 /*
388 * USER although the h/w doesn't support different phases per
389 * shadow register, for simplicity our scc manager modeling
390 * keeps different phase settings per shadow reg, and it's
391 * important for us to keep them in sync to match h/w.
392 * for efficiency, the scan chain update should occur only
393 * once to sr0.
394 */
395 scc_mgr_set_all_ranks(SCC_MGR_DQS_EN_PHASE_OFFSET,
396 read_group, phase, 0);
397 }
398
399 static void scc_mgr_set_dqdqs_output_phase_all_ranks(u32 write_group,
400 u32 phase)
401 {
402 /*
403 * USER although the h/w doesn't support different phases per
404 * shadow register, for simplicity our scc manager modeling
405 * keeps different phase settings per shadow reg, and it's
406 * important for us to keep them in sync to match h/w.
407 * for efficiency, the scan chain update should occur only
408 * once to sr0.
409 */
410 scc_mgr_set_all_ranks(SCC_MGR_DQDQS_OUT_PHASE_OFFSET,
411 write_group, phase, 0);
412 }
413
414 static void scc_mgr_set_dqs_en_delay_all_ranks(u32 read_group,
415 u32 delay)
416 {
417 /*
418 * In shadow register mode, the T11 settings are stored in
419 * registers in the core, which are updated by the DQS_ENA
420 * signals. Not issuing the SCC_MGR_UPD command allows us to
421 * save lots of rank switching overhead, by calling
422 * select_shadow_regs_for_update with update_scan_chains
423 * set to 0.
424 */
425 scc_mgr_set_all_ranks(SCC_MGR_DQS_EN_DELAY_OFFSET,
426 read_group, delay, 1);
427 }
428
429 /**
430 * scc_mgr_set_oct_out1_delay() - Set OCT output delay
431 * @write_group: Write group
432 * @delay: Delay value
433 *
434 * This function sets the OCT output delay in SCC manager.
435 */
436 static void scc_mgr_set_oct_out1_delay(const u32 write_group, const u32 delay)
437 {
438 const int ratio = rwcfg->mem_if_read_dqs_width /
439 rwcfg->mem_if_write_dqs_width;
440 const int base = write_group * ratio;
441 int i;
442 /*
443 * Load the setting in the SCC manager
444 * Although OCT affects only write data, the OCT delay is controlled
445 * by the DQS logic block which is instantiated once per read group.
446 * For protocols where a write group consists of multiple read groups,
447 * the setting must be set multiple times.
448 */
449 for (i = 0; i < ratio; i++)
450 scc_mgr_set(SCC_MGR_OCT_OUT1_DELAY_OFFSET, base + i, delay);
451 }
452
453 /**
454 * scc_mgr_set_hhp_extras() - Set HHP extras.
455 *
456 * Load the fixed setting in the SCC manager HHP extras.
457 */
458 static void scc_mgr_set_hhp_extras(void)
459 {
460 /*
461 * Load the fixed setting in the SCC manager
462 * bits: 0:0 = 1'b1 - DQS bypass
463 * bits: 1:1 = 1'b1 - DQ bypass
464 * bits: 4:2 = 3'b001 - rfifo_mode
465 * bits: 6:5 = 2'b01 - rfifo clock_select
466 * bits: 7:7 = 1'b0 - separate gating from ungating setting
467 * bits: 8:8 = 1'b0 - separate OE from Output delay setting
468 */
469 const u32 value = (0 << 8) | (0 << 7) | (1 << 5) |
470 (1 << 2) | (1 << 1) | (1 << 0);
471 const u32 addr = SDR_PHYGRP_SCCGRP_ADDRESS |
472 SCC_MGR_HHP_GLOBALS_OFFSET |
473 SCC_MGR_HHP_EXTRAS_OFFSET;
474
475 debug_cond(DLEVEL == 1, "%s:%d Setting HHP Extras\n",
476 __func__, __LINE__);
477 writel(value, addr);
478 debug_cond(DLEVEL == 1, "%s:%d Done Setting HHP Extras\n",
479 __func__, __LINE__);
480 }
481
482 /**
483 * scc_mgr_zero_all() - Zero all DQS config
484 *
485 * Zero all DQS config.
486 */
487 static void scc_mgr_zero_all(void)
488 {
489 int i, r;
490
491 /*
492 * USER Zero all DQS config settings, across all groups and all
493 * shadow registers
494 */
495 for (r = 0; r < rwcfg->mem_number_of_ranks;
496 r += NUM_RANKS_PER_SHADOW_REG) {
497 for (i = 0; i < rwcfg->mem_if_read_dqs_width; i++) {
498 /*
499 * The phases actually don't exist on a per-rank basis,
500 * but there's no harm updating them several times, so
501 * let's keep the code simple.
502 */
503 scc_mgr_set_dqs_bus_in_delay(i, iocfg->dqs_in_reserve);
504 scc_mgr_set_dqs_en_phase(i, 0);
505 scc_mgr_set_dqs_en_delay(i, 0);
506 }
507
508 for (i = 0; i < rwcfg->mem_if_write_dqs_width; i++) {
509 scc_mgr_set_dqdqs_output_phase(i, 0);
510 /* Arria V/Cyclone V don't have out2. */
511 scc_mgr_set_oct_out1_delay(i, iocfg->dqs_out_reserve);
512 }
513 }
514
515 /* Multicast to all DQS group enables. */
516 writel(0xff, &sdr_scc_mgr->dqs_ena);
517 writel(0, &sdr_scc_mgr->update);
518 }
519
520 /**
521 * scc_set_bypass_mode() - Set bypass mode and trigger SCC update
522 * @write_group: Write group
523 *
524 * Set bypass mode and trigger SCC update.
525 */
526 static void scc_set_bypass_mode(const u32 write_group)
527 {
528 /* Multicast to all DQ enables. */
529 writel(0xff, &sdr_scc_mgr->dq_ena);
530 writel(0xff, &sdr_scc_mgr->dm_ena);
531
532 /* Update current DQS IO enable. */
533 writel(0, &sdr_scc_mgr->dqs_io_ena);
534
535 /* Update the DQS logic. */
536 writel(write_group, &sdr_scc_mgr->dqs_ena);
537
538 /* Hit update. */
539 writel(0, &sdr_scc_mgr->update);
540 }
541
542 /**
543 * scc_mgr_load_dqs_for_write_group() - Load DQS settings for Write Group
544 * @write_group: Write group
545 *
546 * Load DQS settings for Write Group, do not trigger SCC update.
547 */
548 static void scc_mgr_load_dqs_for_write_group(const u32 write_group)
549 {
550 const int ratio = rwcfg->mem_if_read_dqs_width /
551 rwcfg->mem_if_write_dqs_width;
552 const int base = write_group * ratio;
553 int i;
554 /*
555 * Load the setting in the SCC manager
556 * Although OCT affects only write data, the OCT delay is controlled
557 * by the DQS logic block which is instantiated once per read group.
558 * For protocols where a write group consists of multiple read groups,
559 * the setting must be set multiple times.
560 */
561 for (i = 0; i < ratio; i++)
562 writel(base + i, &sdr_scc_mgr->dqs_ena);
563 }
564
565 /**
566 * scc_mgr_zero_group() - Zero all configs for a group
567 *
568 * Zero DQ, DM, DQS and OCT configs for a group.
569 */
570 static void scc_mgr_zero_group(const u32 write_group, const int out_only)
571 {
572 int i, r;
573
574 for (r = 0; r < rwcfg->mem_number_of_ranks;
575 r += NUM_RANKS_PER_SHADOW_REG) {
576 /* Zero all DQ config settings. */
577 for (i = 0; i < rwcfg->mem_dq_per_write_dqs; i++) {
578 scc_mgr_set_dq_out1_delay(i, 0);
579 if (!out_only)
580 scc_mgr_set_dq_in_delay(i, 0);
581 }
582
583 /* Multicast to all DQ enables. */
584 writel(0xff, &sdr_scc_mgr->dq_ena);
585
586 /* Zero all DM config settings. */
587 for (i = 0; i < RW_MGR_NUM_DM_PER_WRITE_GROUP; i++)
588 scc_mgr_set_dm_out1_delay(i, 0);
589
590 /* Multicast to all DM enables. */
591 writel(0xff, &sdr_scc_mgr->dm_ena);
592
593 /* Zero all DQS IO settings. */
594 if (!out_only)
595 scc_mgr_set_dqs_io_in_delay(0);
596
597 /* Arria V/Cyclone V don't have out2. */
598 scc_mgr_set_dqs_out1_delay(iocfg->dqs_out_reserve);
599 scc_mgr_set_oct_out1_delay(write_group, iocfg->dqs_out_reserve);
600 scc_mgr_load_dqs_for_write_group(write_group);
601
602 /* Multicast to all DQS IO enables (only 1 in total). */
603 writel(0, &sdr_scc_mgr->dqs_io_ena);
604
605 /* Hit update to zero everything. */
606 writel(0, &sdr_scc_mgr->update);
607 }
608 }
609
610 /*
611 * apply and load a particular input delay for the DQ pins in a group
612 * group_bgn is the index of the first dq pin (in the write group)
613 */
614 static void scc_mgr_apply_group_dq_in_delay(u32 group_bgn, u32 delay)
615 {
616 u32 i, p;
617
618 for (i = 0, p = group_bgn; i < rwcfg->mem_dq_per_read_dqs; i++, p++) {
619 scc_mgr_set_dq_in_delay(p, delay);
620 scc_mgr_load_dq(p);
621 }
622 }
623
624 /**
625 * scc_mgr_apply_group_dq_out1_delay() - Apply and load an output delay for the DQ pins in a group
626 * @delay: Delay value
627 *
628 * Apply and load a particular output delay for the DQ pins in a group.
629 */
630 static void scc_mgr_apply_group_dq_out1_delay(const u32 delay)
631 {
632 int i;
633
634 for (i = 0; i < rwcfg->mem_dq_per_write_dqs; i++) {
635 scc_mgr_set_dq_out1_delay(i, delay);
636 scc_mgr_load_dq(i);
637 }
638 }
639
640 /* apply and load a particular output delay for the DM pins in a group */
641 static void scc_mgr_apply_group_dm_out1_delay(u32 delay1)
642 {
643 u32 i;
644
645 for (i = 0; i < RW_MGR_NUM_DM_PER_WRITE_GROUP; i++) {
646 scc_mgr_set_dm_out1_delay(i, delay1);
647 scc_mgr_load_dm(i);
648 }
649 }
650
651
652 /* apply and load delay on both DQS and OCT out1 */
653 static void scc_mgr_apply_group_dqs_io_and_oct_out1(u32 write_group,
654 u32 delay)
655 {
656 scc_mgr_set_dqs_out1_delay(delay);
657 scc_mgr_load_dqs_io();
658
659 scc_mgr_set_oct_out1_delay(write_group, delay);
660 scc_mgr_load_dqs_for_write_group(write_group);
661 }
662
663 /**
664 * scc_mgr_apply_group_all_out_delay_add() - Apply a delay to the entire output side: DQ, DM, DQS, OCT
665 * @write_group: Write group
666 * @delay: Delay value
667 *
668 * Apply a delay to the entire output side: DQ, DM, DQS, OCT.
669 */
670 static void scc_mgr_apply_group_all_out_delay_add(const u32 write_group,
671 const u32 delay)
672 {
673 u32 i, new_delay;
674
675 /* DQ shift */
676 for (i = 0; i < rwcfg->mem_dq_per_write_dqs; i++)
677 scc_mgr_load_dq(i);
678
679 /* DM shift */
680 for (i = 0; i < RW_MGR_NUM_DM_PER_WRITE_GROUP; i++)
681 scc_mgr_load_dm(i);
682
683 /* DQS shift */
684 new_delay = READ_SCC_DQS_IO_OUT2_DELAY + delay;
685 if (new_delay > iocfg->io_out2_delay_max) {
686 debug_cond(DLEVEL == 1,
687 "%s:%d (%u, %u) DQS: %u > %d; adding %u to OUT1\n",
688 __func__, __LINE__, write_group, delay, new_delay,
689 iocfg->io_out2_delay_max,
690 new_delay - iocfg->io_out2_delay_max);
691 new_delay -= iocfg->io_out2_delay_max;
692 scc_mgr_set_dqs_out1_delay(new_delay);
693 }
694
695 scc_mgr_load_dqs_io();
696
697 /* OCT shift */
698 new_delay = READ_SCC_OCT_OUT2_DELAY + delay;
699 if (new_delay > iocfg->io_out2_delay_max) {
700 debug_cond(DLEVEL == 1,
701 "%s:%d (%u, %u) DQS: %u > %d; adding %u to OUT1\n",
702 __func__, __LINE__, write_group, delay,
703 new_delay, iocfg->io_out2_delay_max,
704 new_delay - iocfg->io_out2_delay_max);
705 new_delay -= iocfg->io_out2_delay_max;
706 scc_mgr_set_oct_out1_delay(write_group, new_delay);
707 }
708
709 scc_mgr_load_dqs_for_write_group(write_group);
710 }
711
712 /**
713 * scc_mgr_apply_group_all_out_delay_add() - Apply a delay to the entire output side to all ranks
714 * @write_group: Write group
715 * @delay: Delay value
716 *
717 * Apply a delay to the entire output side (DQ, DM, DQS, OCT) to all ranks.
718 */
719 static void
720 scc_mgr_apply_group_all_out_delay_add_all_ranks(const u32 write_group,
721 const u32 delay)
722 {
723 int r;
724
725 for (r = 0; r < rwcfg->mem_number_of_ranks;
726 r += NUM_RANKS_PER_SHADOW_REG) {
727 scc_mgr_apply_group_all_out_delay_add(write_group, delay);
728 writel(0, &sdr_scc_mgr->update);
729 }
730 }
731
732 /**
733 * set_jump_as_return() - Return instruction optimization
734 *
735 * Optimization used to recover some slots in ddr3 inst_rom could be
736 * applied to other protocols if we wanted to
737 */
738 static void set_jump_as_return(void)
739 {
740 /*
741 * To save space, we replace return with jump to special shared
742 * RETURN instruction so we set the counter to large value so that
743 * we always jump.
744 */
745 writel(0xff, &sdr_rw_load_mgr_regs->load_cntr0);
746 writel(rwcfg->rreturn, &sdr_rw_load_jump_mgr_regs->load_jump_add0);
747 }
748
749 /**
750 * delay_for_n_mem_clocks() - Delay for N memory clocks
751 * @clocks: Length of the delay
752 *
753 * Delay for N memory clocks.
754 */
755 static void delay_for_n_mem_clocks(const u32 clocks)
756 {
757 u32 afi_clocks;
758 u16 c_loop;
759 u8 inner;
760 u8 outer;
761
762 debug("%s:%d: clocks=%u ... start\n", __func__, __LINE__, clocks);
763
764 /* Scale (rounding up) to get afi clocks. */
765 afi_clocks = DIV_ROUND_UP(clocks, misccfg->afi_rate_ratio);
766 if (afi_clocks) /* Temporary underflow protection */
767 afi_clocks--;
768
769 /*
770 * Note, we don't bother accounting for being off a little
771 * bit because of a few extra instructions in outer loops.
772 * Note, the loops have a test at the end, and do the test
773 * before the decrement, and so always perform the loop
774 * 1 time more than the counter value
775 */
776 c_loop = afi_clocks >> 16;
777 outer = c_loop ? 0xff : (afi_clocks >> 8);
778 inner = outer ? 0xff : afi_clocks;
779
780 /*
781 * rom instructions are structured as follows:
782 *
783 * IDLE_LOOP2: jnz cntr0, TARGET_A
784 * IDLE_LOOP1: jnz cntr1, TARGET_B
785 * return
786 *
787 * so, when doing nested loops, TARGET_A is set to IDLE_LOOP2, and
788 * TARGET_B is set to IDLE_LOOP2 as well
789 *
790 * if we have no outer loop, though, then we can use IDLE_LOOP1 only,
791 * and set TARGET_B to IDLE_LOOP1 and we skip IDLE_LOOP2 entirely
792 *
793 * a little confusing, but it helps save precious space in the inst_rom
794 * and sequencer rom and keeps the delays more accurate and reduces
795 * overhead
796 */
797 if (afi_clocks < 0x100) {
798 writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(inner),
799 &sdr_rw_load_mgr_regs->load_cntr1);
800
801 writel(rwcfg->idle_loop1,
802 &sdr_rw_load_jump_mgr_regs->load_jump_add1);
803
804 writel(rwcfg->idle_loop1, SDR_PHYGRP_RWMGRGRP_ADDRESS |
805 RW_MGR_RUN_SINGLE_GROUP_OFFSET);
806 } else {
807 writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(inner),
808 &sdr_rw_load_mgr_regs->load_cntr0);
809
810 writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(outer),
811 &sdr_rw_load_mgr_regs->load_cntr1);
812
813 writel(rwcfg->idle_loop2,
814 &sdr_rw_load_jump_mgr_regs->load_jump_add0);
815
816 writel(rwcfg->idle_loop2,
817 &sdr_rw_load_jump_mgr_regs->load_jump_add1);
818
819 do {
820 writel(rwcfg->idle_loop2,
821 SDR_PHYGRP_RWMGRGRP_ADDRESS |
822 RW_MGR_RUN_SINGLE_GROUP_OFFSET);
823 } while (c_loop-- != 0);
824 }
825 debug("%s:%d clocks=%u ... end\n", __func__, __LINE__, clocks);
826 }
827
828 /**
829 * rw_mgr_mem_init_load_regs() - Load instruction registers
830 * @cntr0: Counter 0 value
831 * @cntr1: Counter 1 value
832 * @cntr2: Counter 2 value
833 * @jump: Jump instruction value
834 *
835 * Load instruction registers.
836 */
837 static void rw_mgr_mem_init_load_regs(u32 cntr0, u32 cntr1, u32 cntr2, u32 jump)
838 {
839 u32 grpaddr = SDR_PHYGRP_RWMGRGRP_ADDRESS |
840 RW_MGR_RUN_SINGLE_GROUP_OFFSET;
841
842 /* Load counters */
843 writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(cntr0),
844 &sdr_rw_load_mgr_regs->load_cntr0);
845 writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(cntr1),
846 &sdr_rw_load_mgr_regs->load_cntr1);
847 writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(cntr2),
848 &sdr_rw_load_mgr_regs->load_cntr2);
849
850 /* Load jump address */
851 writel(jump, &sdr_rw_load_jump_mgr_regs->load_jump_add0);
852 writel(jump, &sdr_rw_load_jump_mgr_regs->load_jump_add1);
853 writel(jump, &sdr_rw_load_jump_mgr_regs->load_jump_add2);
854
855 /* Execute count instruction */
856 writel(jump, grpaddr);
857 }
858
859 /**
860 * rw_mgr_mem_load_user() - Load user calibration values
861 * @fin1: Final instruction 1
862 * @fin2: Final instruction 2
863 * @precharge: If 1, precharge the banks at the end
864 *
865 * Load user calibration values and optionally precharge the banks.
866 */
867 static void rw_mgr_mem_load_user(const u32 fin1, const u32 fin2,
868 const int precharge)
869 {
870 u32 grpaddr = SDR_PHYGRP_RWMGRGRP_ADDRESS |
871 RW_MGR_RUN_SINGLE_GROUP_OFFSET;
872 u32 r;
873
874 for (r = 0; r < rwcfg->mem_number_of_ranks; r++) {
875 /* set rank */
876 set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_OFF);
877
878 /* precharge all banks ... */
879 if (precharge)
880 writel(rwcfg->precharge_all, grpaddr);
881
882 /*
883 * USER Use Mirror-ed commands for odd ranks if address
884 * mirrorring is on
885 */
886 if ((rwcfg->mem_address_mirroring >> r) & 0x1) {
887 set_jump_as_return();
888 writel(rwcfg->mrs2_mirr, grpaddr);
889 delay_for_n_mem_clocks(4);
890 set_jump_as_return();
891 writel(rwcfg->mrs3_mirr, grpaddr);
892 delay_for_n_mem_clocks(4);
893 set_jump_as_return();
894 writel(rwcfg->mrs1_mirr, grpaddr);
895 delay_for_n_mem_clocks(4);
896 set_jump_as_return();
897 writel(fin1, grpaddr);
898 } else {
899 set_jump_as_return();
900 writel(rwcfg->mrs2, grpaddr);
901 delay_for_n_mem_clocks(4);
902 set_jump_as_return();
903 writel(rwcfg->mrs3, grpaddr);
904 delay_for_n_mem_clocks(4);
905 set_jump_as_return();
906 writel(rwcfg->mrs1, grpaddr);
907 set_jump_as_return();
908 writel(fin2, grpaddr);
909 }
910
911 if (precharge)
912 continue;
913
914 set_jump_as_return();
915 writel(rwcfg->zqcl, grpaddr);
916
917 /* tZQinit = tDLLK = 512 ck cycles */
918 delay_for_n_mem_clocks(512);
919 }
920 }
921
922 /**
923 * rw_mgr_mem_initialize() - Initialize RW Manager
924 *
925 * Initialize RW Manager.
926 */
927 static void rw_mgr_mem_initialize(void)
928 {
929 debug("%s:%d\n", __func__, __LINE__);
930
931 /* The reset / cke part of initialization is broadcasted to all ranks */
932 writel(RW_MGR_RANK_ALL, SDR_PHYGRP_RWMGRGRP_ADDRESS |
933 RW_MGR_SET_CS_AND_ODT_MASK_OFFSET);
934
935 /*
936 * Here's how you load register for a loop
937 * Counters are located @ 0x800
938 * Jump address are located @ 0xC00
939 * For both, registers 0 to 3 are selected using bits 3 and 2, like
940 * in 0x800, 0x804, 0x808, 0x80C and 0xC00, 0xC04, 0xC08, 0xC0C
941 * I know this ain't pretty, but Avalon bus throws away the 2 least
942 * significant bits
943 */
944
945 /* Start with memory RESET activated */
946
947 /* tINIT = 200us */
948
949 /*
950 * 200us @ 266MHz (3.75 ns) ~ 54000 clock cycles
951 * If a and b are the number of iteration in 2 nested loops
952 * it takes the following number of cycles to complete the operation:
953 * number_of_cycles = ((2 + n) * a + 2) * b
954 * where n is the number of instruction in the inner loop
955 * One possible solution is n = 0 , a = 256 , b = 106 => a = FF,
956 * b = 6A
957 */
958 rw_mgr_mem_init_load_regs(misccfg->tinit_cntr0_val,
959 misccfg->tinit_cntr1_val,
960 misccfg->tinit_cntr2_val,
961 rwcfg->init_reset_0_cke_0);
962
963 /* Indicate that memory is stable. */
964 writel(1, &phy_mgr_cfg->reset_mem_stbl);
965
966 /*
967 * transition the RESET to high
968 * Wait for 500us
969 */
970
971 /*
972 * 500us @ 266MHz (3.75 ns) ~ 134000 clock cycles
973 * If a and b are the number of iteration in 2 nested loops
974 * it takes the following number of cycles to complete the operation
975 * number_of_cycles = ((2 + n) * a + 2) * b
976 * where n is the number of instruction in the inner loop
977 * One possible solution is n = 2 , a = 131 , b = 256 => a = 83,
978 * b = FF
979 */
980 rw_mgr_mem_init_load_regs(misccfg->treset_cntr0_val,
981 misccfg->treset_cntr1_val,
982 misccfg->treset_cntr2_val,
983 rwcfg->init_reset_1_cke_0);
984
985 /* Bring up clock enable. */
986
987 /* tXRP < 250 ck cycles */
988 delay_for_n_mem_clocks(250);
989
990 rw_mgr_mem_load_user(rwcfg->mrs0_dll_reset_mirr, rwcfg->mrs0_dll_reset,
991 0);
992 }
993
994 /**
995 * rw_mgr_mem_handoff() - Hand off the memory to user
996 *
997 * At the end of calibration we have to program the user settings in
998 * and hand off the memory to the user.
999 */
1000 static void rw_mgr_mem_handoff(void)
1001 {
1002 rw_mgr_mem_load_user(rwcfg->mrs0_user_mirr, rwcfg->mrs0_user, 1);
1003 /*
1004 * Need to wait tMOD (12CK or 15ns) time before issuing other
1005 * commands, but we will have plenty of NIOS cycles before actual
1006 * handoff so its okay.
1007 */
1008 }
1009
1010 /**
1011 * rw_mgr_mem_calibrate_write_test_issue() - Issue write test command
1012 * @group: Write Group
1013 * @use_dm: Use DM
1014 *
1015 * Issue write test command. Two variants are provided, one that just tests
1016 * a write pattern and another that tests datamask functionality.
1017 */
1018 static void rw_mgr_mem_calibrate_write_test_issue(u32 group,
1019 u32 test_dm)
1020 {
1021 const u32 quick_write_mode =
1022 (STATIC_CALIB_STEPS & CALIB_SKIP_WRITES) &&
1023 misccfg->enable_super_quick_calibration;
1024 u32 mcc_instruction;
1025 u32 rw_wl_nop_cycles;
1026
1027 /*
1028 * Set counter and jump addresses for the right
1029 * number of NOP cycles.
1030 * The number of supported NOP cycles can range from -1 to infinity
1031 * Three different cases are handled:
1032 *
1033 * 1. For a number of NOP cycles greater than 0, the RW Mgr looping
1034 * mechanism will be used to insert the right number of NOPs
1035 *
1036 * 2. For a number of NOP cycles equals to 0, the micro-instruction
1037 * issuing the write command will jump straight to the
1038 * micro-instruction that turns on DQS (for DDRx), or outputs write
1039 * data (for RLD), skipping
1040 * the NOP micro-instruction all together
1041 *
1042 * 3. A number of NOP cycles equal to -1 indicates that DQS must be
1043 * turned on in the same micro-instruction that issues the write
1044 * command. Then we need
1045 * to directly jump to the micro-instruction that sends out the data
1046 *
1047 * NOTE: Implementing this mechanism uses 2 RW Mgr jump-counters
1048 * (2 and 3). One jump-counter (0) is used to perform multiple
1049 * write-read operations.
1050 * one counter left to issue this command in "multiple-group" mode
1051 */
1052
1053 rw_wl_nop_cycles = gbl->rw_wl_nop_cycles;
1054
1055 if (rw_wl_nop_cycles == -1) {
1056 /*
1057 * CNTR 2 - We want to execute the special write operation that
1058 * turns on DQS right away and then skip directly to the
1059 * instruction that sends out the data. We set the counter to a
1060 * large number so that the jump is always taken.
1061 */
1062 writel(0xFF, &sdr_rw_load_mgr_regs->load_cntr2);
1063
1064 /* CNTR 3 - Not used */
1065 if (test_dm) {
1066 mcc_instruction = rwcfg->lfsr_wr_rd_dm_bank_0_wl_1;
1067 writel(rwcfg->lfsr_wr_rd_dm_bank_0_data,
1068 &sdr_rw_load_jump_mgr_regs->load_jump_add2);
1069 writel(rwcfg->lfsr_wr_rd_dm_bank_0_nop,
1070 &sdr_rw_load_jump_mgr_regs->load_jump_add3);
1071 } else {
1072 mcc_instruction = rwcfg->lfsr_wr_rd_bank_0_wl_1;
1073 writel(rwcfg->lfsr_wr_rd_bank_0_data,
1074 &sdr_rw_load_jump_mgr_regs->load_jump_add2);
1075 writel(rwcfg->lfsr_wr_rd_bank_0_nop,
1076 &sdr_rw_load_jump_mgr_regs->load_jump_add3);
1077 }
1078 } else if (rw_wl_nop_cycles == 0) {
1079 /*
1080 * CNTR 2 - We want to skip the NOP operation and go straight
1081 * to the DQS enable instruction. We set the counter to a large
1082 * number so that the jump is always taken.
1083 */
1084 writel(0xFF, &sdr_rw_load_mgr_regs->load_cntr2);
1085
1086 /* CNTR 3 - Not used */
1087 if (test_dm) {
1088 mcc_instruction = rwcfg->lfsr_wr_rd_dm_bank_0;
1089 writel(rwcfg->lfsr_wr_rd_dm_bank_0_dqs,
1090 &sdr_rw_load_jump_mgr_regs->load_jump_add2);
1091 } else {
1092 mcc_instruction = rwcfg->lfsr_wr_rd_bank_0;
1093 writel(rwcfg->lfsr_wr_rd_bank_0_dqs,
1094 &sdr_rw_load_jump_mgr_regs->load_jump_add2);
1095 }
1096 } else {
1097 /*
1098 * CNTR 2 - In this case we want to execute the next instruction
1099 * and NOT take the jump. So we set the counter to 0. The jump
1100 * address doesn't count.
1101 */
1102 writel(0x0, &sdr_rw_load_mgr_regs->load_cntr2);
1103 writel(0x0, &sdr_rw_load_jump_mgr_regs->load_jump_add2);
1104
1105 /*
1106 * CNTR 3 - Set the nop counter to the number of cycles we
1107 * need to loop for, minus 1.
1108 */
1109 writel(rw_wl_nop_cycles - 1, &sdr_rw_load_mgr_regs->load_cntr3);
1110 if (test_dm) {
1111 mcc_instruction = rwcfg->lfsr_wr_rd_dm_bank_0;
1112 writel(rwcfg->lfsr_wr_rd_dm_bank_0_nop,
1113 &sdr_rw_load_jump_mgr_regs->load_jump_add3);
1114 } else {
1115 mcc_instruction = rwcfg->lfsr_wr_rd_bank_0;
1116 writel(rwcfg->lfsr_wr_rd_bank_0_nop,
1117 &sdr_rw_load_jump_mgr_regs->load_jump_add3);
1118 }
1119 }
1120
1121 writel(0, SDR_PHYGRP_RWMGRGRP_ADDRESS |
1122 RW_MGR_RESET_READ_DATAPATH_OFFSET);
1123
1124 if (quick_write_mode)
1125 writel(0x08, &sdr_rw_load_mgr_regs->load_cntr0);
1126 else
1127 writel(0x40, &sdr_rw_load_mgr_regs->load_cntr0);
1128
1129 writel(mcc_instruction, &sdr_rw_load_jump_mgr_regs->load_jump_add0);
1130
1131 /*
1132 * CNTR 1 - This is used to ensure enough time elapses
1133 * for read data to come back.
1134 */
1135 writel(0x30, &sdr_rw_load_mgr_regs->load_cntr1);
1136
1137 if (test_dm) {
1138 writel(rwcfg->lfsr_wr_rd_dm_bank_0_wait,
1139 &sdr_rw_load_jump_mgr_regs->load_jump_add1);
1140 } else {
1141 writel(rwcfg->lfsr_wr_rd_bank_0_wait,
1142 &sdr_rw_load_jump_mgr_regs->load_jump_add1);
1143 }
1144
1145 writel(mcc_instruction, (SDR_PHYGRP_RWMGRGRP_ADDRESS |
1146 RW_MGR_RUN_SINGLE_GROUP_OFFSET) +
1147 (group << 2));
1148 }
1149
1150 /**
1151 * rw_mgr_mem_calibrate_write_test() - Test writes, check for single/multiple pass
1152 * @rank_bgn: Rank number
1153 * @write_group: Write Group
1154 * @use_dm: Use DM
1155 * @all_correct: All bits must be correct in the mask
1156 * @bit_chk: Resulting bit mask after the test
1157 * @all_ranks: Test all ranks
1158 *
1159 * Test writes, can check for a single bit pass or multiple bit pass.
1160 */
1161 static int
1162 rw_mgr_mem_calibrate_write_test(const u32 rank_bgn, const u32 write_group,
1163 const u32 use_dm, const u32 all_correct,
1164 u32 *bit_chk, const u32 all_ranks)
1165 {
1166 const u32 rank_end = all_ranks ?
1167 rwcfg->mem_number_of_ranks :
1168 (rank_bgn + NUM_RANKS_PER_SHADOW_REG);
1169 const u32 shift_ratio = rwcfg->mem_dq_per_write_dqs /
1170 rwcfg->mem_virtual_groups_per_write_dqs;
1171 const u32 correct_mask_vg = param->write_correct_mask_vg;
1172
1173 u32 tmp_bit_chk, base_rw_mgr;
1174 int vg, r;
1175
1176 *bit_chk = param->write_correct_mask;
1177
1178 for (r = rank_bgn; r < rank_end; r++) {
1179 /* Set rank */
1180 set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_READ_WRITE);
1181
1182 tmp_bit_chk = 0;
1183 for (vg = rwcfg->mem_virtual_groups_per_write_dqs - 1;
1184 vg >= 0; vg--) {
1185 /* Reset the FIFOs to get pointers to known state. */
1186 writel(0, &phy_mgr_cmd->fifo_reset);
1187
1188 rw_mgr_mem_calibrate_write_test_issue(
1189 write_group *
1190 rwcfg->mem_virtual_groups_per_write_dqs + vg,
1191 use_dm);
1192
1193 base_rw_mgr = readl(SDR_PHYGRP_RWMGRGRP_ADDRESS);
1194 tmp_bit_chk <<= shift_ratio;
1195 tmp_bit_chk |= (correct_mask_vg & ~(base_rw_mgr));
1196 }
1197
1198 *bit_chk &= tmp_bit_chk;
1199 }
1200
1201 set_rank_and_odt_mask(0, RW_MGR_ODT_MODE_OFF);
1202 if (all_correct) {
1203 debug_cond(DLEVEL == 2,
1204 "write_test(%u,%u,ALL) : %u == %u => %i\n",
1205 write_group, use_dm, *bit_chk,
1206 param->write_correct_mask,
1207 *bit_chk == param->write_correct_mask);
1208 return *bit_chk == param->write_correct_mask;
1209 } else {
1210 debug_cond(DLEVEL == 2,
1211 "write_test(%u,%u,ONE) : %u != %i => %i\n",
1212 write_group, use_dm, *bit_chk, 0, *bit_chk != 0);
1213 return *bit_chk != 0x00;
1214 }
1215 }
1216
1217 /**
1218 * rw_mgr_mem_calibrate_read_test_patterns() - Read back test patterns
1219 * @rank_bgn: Rank number
1220 * @group: Read/Write Group
1221 * @all_ranks: Test all ranks
1222 *
1223 * Performs a guaranteed read on the patterns we are going to use during a
1224 * read test to ensure memory works.
1225 */
1226 static int
1227 rw_mgr_mem_calibrate_read_test_patterns(const u32 rank_bgn, const u32 group,
1228 const u32 all_ranks)
1229 {
1230 const u32 addr = SDR_PHYGRP_RWMGRGRP_ADDRESS |
1231 RW_MGR_RUN_SINGLE_GROUP_OFFSET;
1232 const u32 addr_offset =
1233 (group * rwcfg->mem_virtual_groups_per_read_dqs) << 2;
1234 const u32 rank_end = all_ranks ?
1235 rwcfg->mem_number_of_ranks :
1236 (rank_bgn + NUM_RANKS_PER_SHADOW_REG);
1237 const u32 shift_ratio = rwcfg->mem_dq_per_read_dqs /
1238 rwcfg->mem_virtual_groups_per_read_dqs;
1239 const u32 correct_mask_vg = param->read_correct_mask_vg;
1240
1241 u32 tmp_bit_chk, base_rw_mgr, bit_chk;
1242 int vg, r;
1243 int ret = 0;
1244
1245 bit_chk = param->read_correct_mask;
1246
1247 for (r = rank_bgn; r < rank_end; r++) {
1248 /* Set rank */
1249 set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_READ_WRITE);
1250
1251 /* Load up a constant bursts of read commands */
1252 writel(0x20, &sdr_rw_load_mgr_regs->load_cntr0);
1253 writel(rwcfg->guaranteed_read,
1254 &sdr_rw_load_jump_mgr_regs->load_jump_add0);
1255
1256 writel(0x20, &sdr_rw_load_mgr_regs->load_cntr1);
1257 writel(rwcfg->guaranteed_read_cont,
1258 &sdr_rw_load_jump_mgr_regs->load_jump_add1);
1259
1260 tmp_bit_chk = 0;
1261 for (vg = rwcfg->mem_virtual_groups_per_read_dqs - 1;
1262 vg >= 0; vg--) {
1263 /* Reset the FIFOs to get pointers to known state. */
1264 writel(0, &phy_mgr_cmd->fifo_reset);
1265 writel(0, SDR_PHYGRP_RWMGRGRP_ADDRESS |
1266 RW_MGR_RESET_READ_DATAPATH_OFFSET);
1267 writel(rwcfg->guaranteed_read,
1268 addr + addr_offset + (vg << 2));
1269
1270 base_rw_mgr = readl(SDR_PHYGRP_RWMGRGRP_ADDRESS);
1271 tmp_bit_chk <<= shift_ratio;
1272 tmp_bit_chk |= correct_mask_vg & ~base_rw_mgr;
1273 }
1274
1275 bit_chk &= tmp_bit_chk;
1276 }
1277
1278 writel(rwcfg->clear_dqs_enable, addr + (group << 2));
1279
1280 set_rank_and_odt_mask(0, RW_MGR_ODT_MODE_OFF);
1281
1282 if (bit_chk != param->read_correct_mask)
1283 ret = -EIO;
1284
1285 debug_cond(DLEVEL == 1,
1286 "%s:%d test_load_patterns(%u,ALL) => (%u == %u) => %i\n",
1287 __func__, __LINE__, group, bit_chk,
1288 param->read_correct_mask, ret);
1289
1290 return ret;
1291 }
1292
1293 /**
1294 * rw_mgr_mem_calibrate_read_load_patterns() - Load up the patterns for read test
1295 * @rank_bgn: Rank number
1296 * @all_ranks: Test all ranks
1297 *
1298 * Load up the patterns we are going to use during a read test.
1299 */
1300 static void rw_mgr_mem_calibrate_read_load_patterns(const u32 rank_bgn,
1301 const int all_ranks)
1302 {
1303 const u32 rank_end = all_ranks ?
1304 rwcfg->mem_number_of_ranks :
1305 (rank_bgn + NUM_RANKS_PER_SHADOW_REG);
1306 u32 r;
1307
1308 debug("%s:%d\n", __func__, __LINE__);
1309
1310 for (r = rank_bgn; r < rank_end; r++) {
1311 /* set rank */
1312 set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_READ_WRITE);
1313
1314 /* Load up a constant bursts */
1315 writel(0x20, &sdr_rw_load_mgr_regs->load_cntr0);
1316
1317 writel(rwcfg->guaranteed_write_wait0,
1318 &sdr_rw_load_jump_mgr_regs->load_jump_add0);
1319
1320 writel(0x20, &sdr_rw_load_mgr_regs->load_cntr1);
1321
1322 writel(rwcfg->guaranteed_write_wait1,
1323 &sdr_rw_load_jump_mgr_regs->load_jump_add1);
1324
1325 writel(0x04, &sdr_rw_load_mgr_regs->load_cntr2);
1326
1327 writel(rwcfg->guaranteed_write_wait2,
1328 &sdr_rw_load_jump_mgr_regs->load_jump_add2);
1329
1330 writel(0x04, &sdr_rw_load_mgr_regs->load_cntr3);
1331
1332 writel(rwcfg->guaranteed_write_wait3,
1333 &sdr_rw_load_jump_mgr_regs->load_jump_add3);
1334
1335 writel(rwcfg->guaranteed_write, SDR_PHYGRP_RWMGRGRP_ADDRESS |
1336 RW_MGR_RUN_SINGLE_GROUP_OFFSET);
1337 }
1338
1339 set_rank_and_odt_mask(0, RW_MGR_ODT_MODE_OFF);
1340 }
1341
1342 /**
1343 * rw_mgr_mem_calibrate_read_test() - Perform READ test on single rank
1344 * @rank_bgn: Rank number
1345 * @group: Read/Write group
1346 * @num_tries: Number of retries of the test
1347 * @all_correct: All bits must be correct in the mask
1348 * @bit_chk: Resulting bit mask after the test
1349 * @all_groups: Test all R/W groups
1350 * @all_ranks: Test all ranks
1351 *
1352 * Try a read and see if it returns correct data back. Test has dummy reads
1353 * inserted into the mix used to align DQS enable. Test has more thorough
1354 * checks than the regular read test.
1355 */
1356 static int
1357 rw_mgr_mem_calibrate_read_test(const u32 rank_bgn, const u32 group,
1358 const u32 num_tries, const u32 all_correct,
1359 u32 *bit_chk,
1360 const u32 all_groups, const u32 all_ranks)
1361 {
1362 const u32 rank_end = all_ranks ? rwcfg->mem_number_of_ranks :
1363 (rank_bgn + NUM_RANKS_PER_SHADOW_REG);
1364 const u32 quick_read_mode =
1365 ((STATIC_CALIB_STEPS & CALIB_SKIP_DELAY_SWEEPS) &&
1366 misccfg->enable_super_quick_calibration);
1367 u32 correct_mask_vg = param->read_correct_mask_vg;
1368 u32 tmp_bit_chk;
1369 u32 base_rw_mgr;
1370 u32 addr;
1371
1372 int r, vg, ret;
1373
1374 *bit_chk = param->read_correct_mask;
1375
1376 for (r = rank_bgn; r < rank_end; r++) {
1377 /* set rank */
1378 set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_READ_WRITE);
1379
1380 writel(0x10, &sdr_rw_load_mgr_regs->load_cntr1);
1381
1382 writel(rwcfg->read_b2b_wait1,
1383 &sdr_rw_load_jump_mgr_regs->load_jump_add1);
1384
1385 writel(0x10, &sdr_rw_load_mgr_regs->load_cntr2);
1386 writel(rwcfg->read_b2b_wait2,
1387 &sdr_rw_load_jump_mgr_regs->load_jump_add2);
1388
1389 if (quick_read_mode)
1390 writel(0x1, &sdr_rw_load_mgr_regs->load_cntr0);
1391 /* need at least two (1+1) reads to capture failures */
1392 else if (all_groups)
1393 writel(0x06, &sdr_rw_load_mgr_regs->load_cntr0);
1394 else
1395 writel(0x32, &sdr_rw_load_mgr_regs->load_cntr0);
1396
1397 writel(rwcfg->read_b2b,
1398 &sdr_rw_load_jump_mgr_regs->load_jump_add0);
1399 if (all_groups)
1400 writel(rwcfg->mem_if_read_dqs_width *
1401 rwcfg->mem_virtual_groups_per_read_dqs - 1,
1402 &sdr_rw_load_mgr_regs->load_cntr3);
1403 else
1404 writel(0x0, &sdr_rw_load_mgr_regs->load_cntr3);
1405
1406 writel(rwcfg->read_b2b,
1407 &sdr_rw_load_jump_mgr_regs->load_jump_add3);
1408
1409 tmp_bit_chk = 0;
1410 for (vg = rwcfg->mem_virtual_groups_per_read_dqs - 1; vg >= 0;
1411 vg--) {
1412 /* Reset the FIFOs to get pointers to known state. */
1413 writel(0, &phy_mgr_cmd->fifo_reset);
1414 writel(0, SDR_PHYGRP_RWMGRGRP_ADDRESS |
1415 RW_MGR_RESET_READ_DATAPATH_OFFSET);
1416
1417 if (all_groups) {
1418 addr = SDR_PHYGRP_RWMGRGRP_ADDRESS |
1419 RW_MGR_RUN_ALL_GROUPS_OFFSET;
1420 } else {
1421 addr = SDR_PHYGRP_RWMGRGRP_ADDRESS |
1422 RW_MGR_RUN_SINGLE_GROUP_OFFSET;
1423 }
1424
1425 writel(rwcfg->read_b2b, addr +
1426 ((group *
1427 rwcfg->mem_virtual_groups_per_read_dqs +
1428 vg) << 2));
1429
1430 base_rw_mgr = readl(SDR_PHYGRP_RWMGRGRP_ADDRESS);
1431 tmp_bit_chk <<= rwcfg->mem_dq_per_read_dqs /
1432 rwcfg->mem_virtual_groups_per_read_dqs;
1433 tmp_bit_chk |= correct_mask_vg & ~(base_rw_mgr);
1434 }
1435
1436 *bit_chk &= tmp_bit_chk;
1437 }
1438
1439 addr = SDR_PHYGRP_RWMGRGRP_ADDRESS | RW_MGR_RUN_SINGLE_GROUP_OFFSET;
1440 writel(rwcfg->clear_dqs_enable, addr + (group << 2));
1441
1442 set_rank_and_odt_mask(0, RW_MGR_ODT_MODE_OFF);
1443
1444 if (all_correct) {
1445 ret = (*bit_chk == param->read_correct_mask);
1446 debug_cond(DLEVEL == 2,
1447 "%s:%d read_test(%u,ALL,%u) => (%u == %u) => %i\n",
1448 __func__, __LINE__, group, all_groups, *bit_chk,
1449 param->read_correct_mask, ret);
1450 } else {
1451 ret = (*bit_chk != 0x00);
1452 debug_cond(DLEVEL == 2,
1453 "%s:%d read_test(%u,ONE,%u) => (%u != %u) => %i\n",
1454 __func__, __LINE__, group, all_groups, *bit_chk,
1455 0, ret);
1456 }
1457
1458 return ret;
1459 }
1460
1461 /**
1462 * rw_mgr_mem_calibrate_read_test_all_ranks() - Perform READ test on all ranks
1463 * @grp: Read/Write group
1464 * @num_tries: Number of retries of the test
1465 * @all_correct: All bits must be correct in the mask
1466 * @all_groups: Test all R/W groups
1467 *
1468 * Perform a READ test across all memory ranks.
1469 */
1470 static int
1471 rw_mgr_mem_calibrate_read_test_all_ranks(const u32 grp, const u32 num_tries,
1472 const u32 all_correct,
1473 const u32 all_groups)
1474 {
1475 u32 bit_chk;
1476 return rw_mgr_mem_calibrate_read_test(0, grp, num_tries, all_correct,
1477 &bit_chk, all_groups, 1);
1478 }
1479
1480 /**
1481 * rw_mgr_incr_vfifo() - Increase VFIFO value
1482 * @grp: Read/Write group
1483 *
1484 * Increase VFIFO value.
1485 */
1486 static void rw_mgr_incr_vfifo(const u32 grp)
1487 {
1488 writel(grp, &phy_mgr_cmd->inc_vfifo_hard_phy);
1489 }
1490
1491 /**
1492 * rw_mgr_decr_vfifo() - Decrease VFIFO value
1493 * @grp: Read/Write group
1494 *
1495 * Decrease VFIFO value.
1496 */
1497 static void rw_mgr_decr_vfifo(const u32 grp)
1498 {
1499 u32 i;
1500
1501 for (i = 0; i < misccfg->read_valid_fifo_size - 1; i++)
1502 rw_mgr_incr_vfifo(grp);
1503 }
1504
1505 /**
1506 * find_vfifo_failing_read() - Push VFIFO to get a failing read
1507 * @grp: Read/Write group
1508 *
1509 * Push VFIFO until a failing read happens.
1510 */
1511 static int find_vfifo_failing_read(const u32 grp)
1512 {
1513 u32 v, ret, fail_cnt = 0;
1514
1515 for (v = 0; v < misccfg->read_valid_fifo_size; v++) {
1516 debug_cond(DLEVEL == 2, "%s:%d: vfifo %u\n",
1517 __func__, __LINE__, v);
1518 ret = rw_mgr_mem_calibrate_read_test_all_ranks(grp, 1,
1519 PASS_ONE_BIT, 0);
1520 if (!ret) {
1521 fail_cnt++;
1522
1523 if (fail_cnt == 2)
1524 return v;
1525 }
1526
1527 /* Fiddle with FIFO. */
1528 rw_mgr_incr_vfifo(grp);
1529 }
1530
1531 /* No failing read found! Something must have gone wrong. */
1532 debug_cond(DLEVEL == 2, "%s:%d: vfifo failed\n", __func__, __LINE__);
1533 return 0;
1534 }
1535
1536 /**
1537 * sdr_find_phase_delay() - Find DQS enable phase or delay
1538 * @working: If 1, look for working phase/delay, if 0, look for non-working
1539 * @delay: If 1, look for delay, if 0, look for phase
1540 * @grp: Read/Write group
1541 * @work: Working window position
1542 * @work_inc: Working window increment
1543 * @pd: DQS Phase/Delay Iterator
1544 *
1545 * Find working or non-working DQS enable phase setting.
1546 */
1547 static int sdr_find_phase_delay(int working, int delay, const u32 grp,
1548 u32 *work, const u32 work_inc, u32 *pd)
1549 {
1550 const u32 max = delay ? iocfg->dqs_en_delay_max :
1551 iocfg->dqs_en_phase_max;
1552 u32 ret;
1553
1554 for (; *pd <= max; (*pd)++) {
1555 if (delay)
1556 scc_mgr_set_dqs_en_delay_all_ranks(grp, *pd);
1557 else
1558 scc_mgr_set_dqs_en_phase_all_ranks(grp, *pd);
1559
1560 ret = rw_mgr_mem_calibrate_read_test_all_ranks(grp, 1,
1561 PASS_ONE_BIT, 0);
1562 if (!working)
1563 ret = !ret;
1564
1565 if (ret)
1566 return 0;
1567
1568 if (work)
1569 *work += work_inc;
1570 }
1571
1572 return -EINVAL;
1573 }
1574 /**
1575 * sdr_find_phase() - Find DQS enable phase
1576 * @working: If 1, look for working phase, if 0, look for non-working phase
1577 * @grp: Read/Write group
1578 * @work: Working window position
1579 * @i: Iterator
1580 * @p: DQS Phase Iterator
1581 *
1582 * Find working or non-working DQS enable phase setting.
1583 */
1584 static int sdr_find_phase(int working, const u32 grp, u32 *work,
1585 u32 *i, u32 *p)
1586 {
1587 const u32 end = misccfg->read_valid_fifo_size + (working ? 0 : 1);
1588 int ret;
1589
1590 for (; *i < end; (*i)++) {
1591 if (working)
1592 *p = 0;
1593
1594 ret = sdr_find_phase_delay(working, 0, grp, work,
1595 iocfg->delay_per_opa_tap, p);
1596 if (!ret)
1597 return 0;
1598
1599 if (*p > iocfg->dqs_en_phase_max) {
1600 /* Fiddle with FIFO. */
1601 rw_mgr_incr_vfifo(grp);
1602 if (!working)
1603 *p = 0;
1604 }
1605 }
1606
1607 return -EINVAL;
1608 }
1609
1610 /**
1611 * sdr_working_phase() - Find working DQS enable phase
1612 * @grp: Read/Write group
1613 * @work_bgn: Working window start position
1614 * @d: dtaps output value
1615 * @p: DQS Phase Iterator
1616 * @i: Iterator
1617 *
1618 * Find working DQS enable phase setting.
1619 */
1620 static int sdr_working_phase(const u32 grp, u32 *work_bgn, u32 *d,
1621 u32 *p, u32 *i)
1622 {
1623 const u32 dtaps_per_ptap = iocfg->delay_per_opa_tap /
1624 iocfg->delay_per_dqs_en_dchain_tap;
1625 int ret;
1626
1627 *work_bgn = 0;
1628
1629 for (*d = 0; *d <= dtaps_per_ptap; (*d)++) {
1630 *i = 0;
1631 scc_mgr_set_dqs_en_delay_all_ranks(grp, *d);
1632 ret = sdr_find_phase(1, grp, work_bgn, i, p);
1633 if (!ret)
1634 return 0;
1635 *work_bgn += iocfg->delay_per_dqs_en_dchain_tap;
1636 }
1637
1638 /* Cannot find working solution */
1639 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: no vfifo/ptap/dtap\n",
1640 __func__, __LINE__);
1641 return -EINVAL;
1642 }
1643
1644 /**
1645 * sdr_backup_phase() - Find DQS enable backup phase
1646 * @grp: Read/Write group
1647 * @work_bgn: Working window start position
1648 * @p: DQS Phase Iterator
1649 *
1650 * Find DQS enable backup phase setting.
1651 */
1652 static void sdr_backup_phase(const u32 grp, u32 *work_bgn, u32 *p)
1653 {
1654 u32 tmp_delay, d;
1655 int ret;
1656
1657 /* Special case code for backing up a phase */
1658 if (*p == 0) {
1659 *p = iocfg->dqs_en_phase_max;
1660 rw_mgr_decr_vfifo(grp);
1661 } else {
1662 (*p)--;
1663 }
1664 tmp_delay = *work_bgn - iocfg->delay_per_opa_tap;
1665 scc_mgr_set_dqs_en_phase_all_ranks(grp, *p);
1666
1667 for (d = 0; d <= iocfg->dqs_en_delay_max && tmp_delay < *work_bgn;
1668 d++) {
1669 scc_mgr_set_dqs_en_delay_all_ranks(grp, d);
1670
1671 ret = rw_mgr_mem_calibrate_read_test_all_ranks(grp, 1,
1672 PASS_ONE_BIT, 0);
1673 if (ret) {
1674 *work_bgn = tmp_delay;
1675 break;
1676 }
1677
1678 tmp_delay += iocfg->delay_per_dqs_en_dchain_tap;
1679 }
1680
1681 /* Restore VFIFO to old state before we decremented it (if needed). */
1682 (*p)++;
1683 if (*p > iocfg->dqs_en_phase_max) {
1684 *p = 0;
1685 rw_mgr_incr_vfifo(grp);
1686 }
1687
1688 scc_mgr_set_dqs_en_delay_all_ranks(grp, 0);
1689 }
1690
1691 /**
1692 * sdr_nonworking_phase() - Find non-working DQS enable phase
1693 * @grp: Read/Write group
1694 * @work_end: Working window end position
1695 * @p: DQS Phase Iterator
1696 * @i: Iterator
1697 *
1698 * Find non-working DQS enable phase setting.
1699 */
1700 static int sdr_nonworking_phase(const u32 grp, u32 *work_end, u32 *p, u32 *i)
1701 {
1702 int ret;
1703
1704 (*p)++;
1705 *work_end += iocfg->delay_per_opa_tap;
1706 if (*p > iocfg->dqs_en_phase_max) {
1707 /* Fiddle with FIFO. */
1708 *p = 0;
1709 rw_mgr_incr_vfifo(grp);
1710 }
1711
1712 ret = sdr_find_phase(0, grp, work_end, i, p);
1713 if (ret) {
1714 /* Cannot see edge of failing read. */
1715 debug_cond(DLEVEL == 2, "%s:%d: end: failed\n",
1716 __func__, __LINE__);
1717 }
1718
1719 return ret;
1720 }
1721
1722 /**
1723 * sdr_find_window_center() - Find center of the working DQS window.
1724 * @grp: Read/Write group
1725 * @work_bgn: First working settings
1726 * @work_end: Last working settings
1727 *
1728 * Find center of the working DQS enable window.
1729 */
1730 static int sdr_find_window_center(const u32 grp, const u32 work_bgn,
1731 const u32 work_end)
1732 {
1733 u32 work_mid;
1734 int tmp_delay = 0;
1735 int i, p, d;
1736
1737 work_mid = (work_bgn + work_end) / 2;
1738
1739 debug_cond(DLEVEL == 2, "work_bgn=%d work_end=%d work_mid=%d\n",
1740 work_bgn, work_end, work_mid);
1741 /* Get the middle delay to be less than a VFIFO delay */
1742 tmp_delay = (iocfg->dqs_en_phase_max + 1) * iocfg->delay_per_opa_tap;
1743
1744 debug_cond(DLEVEL == 2, "vfifo ptap delay %d\n", tmp_delay);
1745 work_mid %= tmp_delay;
1746 debug_cond(DLEVEL == 2, "new work_mid %d\n", work_mid);
1747
1748 tmp_delay = rounddown(work_mid, iocfg->delay_per_opa_tap);
1749 if (tmp_delay > iocfg->dqs_en_phase_max * iocfg->delay_per_opa_tap)
1750 tmp_delay = iocfg->dqs_en_phase_max * iocfg->delay_per_opa_tap;
1751 p = tmp_delay / iocfg->delay_per_opa_tap;
1752
1753 debug_cond(DLEVEL == 2, "new p %d, tmp_delay=%d\n", p, tmp_delay);
1754
1755 d = DIV_ROUND_UP(work_mid - tmp_delay,
1756 iocfg->delay_per_dqs_en_dchain_tap);
1757 if (d > iocfg->dqs_en_delay_max)
1758 d = iocfg->dqs_en_delay_max;
1759 tmp_delay += d * iocfg->delay_per_dqs_en_dchain_tap;
1760
1761 debug_cond(DLEVEL == 2, "new d %d, tmp_delay=%d\n", d, tmp_delay);
1762
1763 scc_mgr_set_dqs_en_phase_all_ranks(grp, p);
1764 scc_mgr_set_dqs_en_delay_all_ranks(grp, d);
1765
1766 /*
1767 * push vfifo until we can successfully calibrate. We can do this
1768 * because the largest possible margin in 1 VFIFO cycle.
1769 */
1770 for (i = 0; i < misccfg->read_valid_fifo_size; i++) {
1771 debug_cond(DLEVEL == 2, "find_dqs_en_phase: center\n");
1772 if (rw_mgr_mem_calibrate_read_test_all_ranks(grp, 1,
1773 PASS_ONE_BIT,
1774 0)) {
1775 debug_cond(DLEVEL == 2,
1776 "%s:%d center: found: ptap=%u dtap=%u\n",
1777 __func__, __LINE__, p, d);
1778 return 0;
1779 }
1780
1781 /* Fiddle with FIFO. */
1782 rw_mgr_incr_vfifo(grp);
1783 }
1784
1785 debug_cond(DLEVEL == 2, "%s:%d center: failed.\n",
1786 __func__, __LINE__);
1787 return -EINVAL;
1788 }
1789
1790 /**
1791 * rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase() - Find a good DQS enable to use
1792 * @grp: Read/Write Group
1793 *
1794 * Find a good DQS enable to use.
1795 */
1796 static int rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase(const u32 grp)
1797 {
1798 u32 d, p, i;
1799 u32 dtaps_per_ptap;
1800 u32 work_bgn, work_end;
1801 u32 found_passing_read, found_failing_read = 0, initial_failing_dtap;
1802 int ret;
1803
1804 debug("%s:%d %u\n", __func__, __LINE__, grp);
1805
1806 reg_file_set_sub_stage(CAL_SUBSTAGE_VFIFO_CENTER);
1807
1808 scc_mgr_set_dqs_en_delay_all_ranks(grp, 0);
1809 scc_mgr_set_dqs_en_phase_all_ranks(grp, 0);
1810
1811 /* Step 0: Determine number of delay taps for each phase tap. */
1812 dtaps_per_ptap = iocfg->delay_per_opa_tap /
1813 iocfg->delay_per_dqs_en_dchain_tap;
1814
1815 /* Step 1: First push vfifo until we get a failing read. */
1816 find_vfifo_failing_read(grp);
1817
1818 /* Step 2: Find first working phase, increment in ptaps. */
1819 work_bgn = 0;
1820 ret = sdr_working_phase(grp, &work_bgn, &d, &p, &i);
1821 if (ret)
1822 return ret;
1823
1824 work_end = work_bgn;
1825
1826 /*
1827 * If d is 0 then the working window covers a phase tap and we can
1828 * follow the old procedure. Otherwise, we've found the beginning
1829 * and we need to increment the dtaps until we find the end.
1830 */
1831 if (d == 0) {
1832 /*
1833 * Step 3a: If we have room, back off by one and
1834 * increment in dtaps.
1835 */
1836 sdr_backup_phase(grp, &work_bgn, &p);
1837
1838 /*
1839 * Step 4a: go forward from working phase to non working
1840 * phase, increment in ptaps.
1841 */
1842 ret = sdr_nonworking_phase(grp, &work_end, &p, &i);
1843 if (ret)
1844 return ret;
1845
1846 /* Step 5a: Back off one from last, increment in dtaps. */
1847
1848 /* Special case code for backing up a phase */
1849 if (p == 0) {
1850 p = iocfg->dqs_en_phase_max;
1851 rw_mgr_decr_vfifo(grp);
1852 } else {
1853 p = p - 1;
1854 }
1855
1856 work_end -= iocfg->delay_per_opa_tap;
1857 scc_mgr_set_dqs_en_phase_all_ranks(grp, p);
1858
1859 d = 0;
1860
1861 debug_cond(DLEVEL == 2, "%s:%d p: ptap=%u\n",
1862 __func__, __LINE__, p);
1863 }
1864
1865 /* The dtap increment to find the failing edge is done here. */
1866 sdr_find_phase_delay(0, 1, grp, &work_end,
1867 iocfg->delay_per_dqs_en_dchain_tap, &d);
1868
1869 /* Go back to working dtap */
1870 if (d != 0)
1871 work_end -= iocfg->delay_per_dqs_en_dchain_tap;
1872
1873 debug_cond(DLEVEL == 2,
1874 "%s:%d p/d: ptap=%u dtap=%u end=%u\n",
1875 __func__, __LINE__, p, d - 1, work_end);
1876
1877 if (work_end < work_bgn) {
1878 /* nil range */
1879 debug_cond(DLEVEL == 2, "%s:%d end-2: failed\n",
1880 __func__, __LINE__);
1881 return -EINVAL;
1882 }
1883
1884 debug_cond(DLEVEL == 2, "%s:%d found range [%u,%u]\n",
1885 __func__, __LINE__, work_bgn, work_end);
1886
1887 /*
1888 * We need to calculate the number of dtaps that equal a ptap.
1889 * To do that we'll back up a ptap and re-find the edge of the
1890 * window using dtaps
1891 */
1892 debug_cond(DLEVEL == 2, "%s:%d calculate dtaps_per_ptap for tracking\n",
1893 __func__, __LINE__);
1894
1895 /* Special case code for backing up a phase */
1896 if (p == 0) {
1897 p = iocfg->dqs_en_phase_max;
1898 rw_mgr_decr_vfifo(grp);
1899 debug_cond(DLEVEL == 2, "%s:%d backedup cycle/phase: p=%u\n",
1900 __func__, __LINE__, p);
1901 } else {
1902 p = p - 1;
1903 debug_cond(DLEVEL == 2, "%s:%d backedup phase only: p=%u",
1904 __func__, __LINE__, p);
1905 }
1906
1907 scc_mgr_set_dqs_en_phase_all_ranks(grp, p);
1908
1909 /*
1910 * Increase dtap until we first see a passing read (in case the
1911 * window is smaller than a ptap), and then a failing read to
1912 * mark the edge of the window again.
1913 */
1914
1915 /* Find a passing read. */
1916 debug_cond(DLEVEL == 2, "%s:%d find passing read\n",
1917 __func__, __LINE__);
1918
1919 initial_failing_dtap = d;
1920
1921 found_passing_read = !sdr_find_phase_delay(1, 1, grp, NULL, 0, &d);
1922 if (found_passing_read) {
1923 /* Find a failing read. */
1924 debug_cond(DLEVEL == 2, "%s:%d find failing read\n",
1925 __func__, __LINE__);
1926 d++;
1927 found_failing_read = !sdr_find_phase_delay(0, 1, grp, NULL, 0,
1928 &d);
1929 } else {
1930 debug_cond(DLEVEL == 1,
1931 "%s:%d failed to calculate dtaps per ptap. Fall back on static value\n",
1932 __func__, __LINE__);
1933 }
1934
1935 /*
1936 * The dynamically calculated dtaps_per_ptap is only valid if we
1937 * found a passing/failing read. If we didn't, it means d hit the max
1938 * (iocfg->dqs_en_delay_max). Otherwise, dtaps_per_ptap retains its
1939 * statically calculated value.
1940 */
1941 if (found_passing_read && found_failing_read)
1942 dtaps_per_ptap = d - initial_failing_dtap;
1943
1944 writel(dtaps_per_ptap, &sdr_reg_file->dtaps_per_ptap);
1945 debug_cond(DLEVEL == 2, "%s:%d dtaps_per_ptap=%u - %u = %u",
1946 __func__, __LINE__, d, initial_failing_dtap, dtaps_per_ptap);
1947
1948 /* Step 6: Find the centre of the window. */
1949 ret = sdr_find_window_center(grp, work_bgn, work_end);
1950
1951 return ret;
1952 }
1953
1954 /**
1955 * search_stop_check() - Check if the detected edge is valid
1956 * @write: Perform read (Stage 2) or write (Stage 3) calibration
1957 * @d: DQS delay
1958 * @rank_bgn: Rank number
1959 * @write_group: Write Group
1960 * @read_group: Read Group
1961 * @bit_chk: Resulting bit mask after the test
1962 * @sticky_bit_chk: Resulting sticky bit mask after the test
1963 * @use_read_test: Perform read test
1964 *
1965 * Test if the found edge is valid.
1966 */
1967 static u32 search_stop_check(const int write, const int d, const int rank_bgn,
1968 const u32 write_group, const u32 read_group,
1969 u32 *bit_chk, u32 *sticky_bit_chk,
1970 const u32 use_read_test)
1971 {
1972 const u32 ratio = rwcfg->mem_if_read_dqs_width /
1973 rwcfg->mem_if_write_dqs_width;
1974 const u32 correct_mask = write ? param->write_correct_mask :
1975 param->read_correct_mask;
1976 const u32 per_dqs = write ? rwcfg->mem_dq_per_write_dqs :
1977 rwcfg->mem_dq_per_read_dqs;
1978 u32 ret;
1979 /*
1980 * Stop searching when the read test doesn't pass AND when
1981 * we've seen a passing read on every bit.
1982 */
1983 if (write) { /* WRITE-ONLY */
1984 ret = !rw_mgr_mem_calibrate_write_test(rank_bgn, write_group,
1985 0, PASS_ONE_BIT,
1986 bit_chk, 0);
1987 } else if (use_read_test) { /* READ-ONLY */
1988 ret = !rw_mgr_mem_calibrate_read_test(rank_bgn, read_group,
1989 NUM_READ_PB_TESTS,
1990 PASS_ONE_BIT, bit_chk,
1991 0, 0);
1992 } else { /* READ-ONLY */
1993 rw_mgr_mem_calibrate_write_test(rank_bgn, write_group, 0,
1994 PASS_ONE_BIT, bit_chk, 0);
1995 *bit_chk = *bit_chk >> (per_dqs *
1996 (read_group - (write_group * ratio)));
1997 ret = (*bit_chk == 0);
1998 }
1999 *sticky_bit_chk = *sticky_bit_chk | *bit_chk;
2000 ret = ret && (*sticky_bit_chk == correct_mask);
2001 debug_cond(DLEVEL == 2,
2002 "%s:%d center(left): dtap=%u => %u == %u && %u",
2003 __func__, __LINE__, d,
2004 *sticky_bit_chk, correct_mask, ret);
2005 return ret;
2006 }
2007
2008 /**
2009 * search_left_edge() - Find left edge of DQ/DQS working phase
2010 * @write: Perform read (Stage 2) or write (Stage 3) calibration
2011 * @rank_bgn: Rank number
2012 * @write_group: Write Group
2013 * @read_group: Read Group
2014 * @test_bgn: Rank number to begin the test
2015 * @sticky_bit_chk: Resulting sticky bit mask after the test
2016 * @left_edge: Left edge of the DQ/DQS phase
2017 * @right_edge: Right edge of the DQ/DQS phase
2018 * @use_read_test: Perform read test
2019 *
2020 * Find left edge of DQ/DQS working phase.
2021 */
2022 static void search_left_edge(const int write, const int rank_bgn,
2023 const u32 write_group, const u32 read_group, const u32 test_bgn,
2024 u32 *sticky_bit_chk,
2025 int *left_edge, int *right_edge, const u32 use_read_test)
2026 {
2027 const u32 delay_max = write ? iocfg->io_out1_delay_max :
2028 iocfg->io_in_delay_max;
2029 const u32 dqs_max = write ? iocfg->io_out1_delay_max :
2030 iocfg->dqs_in_delay_max;
2031 const u32 per_dqs = write ? rwcfg->mem_dq_per_write_dqs :
2032 rwcfg->mem_dq_per_read_dqs;
2033 u32 stop, bit_chk;
2034 int i, d;
2035
2036 for (d = 0; d <= dqs_max; d++) {
2037 if (write)
2038 scc_mgr_apply_group_dq_out1_delay(d);
2039 else
2040 scc_mgr_apply_group_dq_in_delay(test_bgn, d);
2041
2042 writel(0, &sdr_scc_mgr->update);
2043
2044 stop = search_stop_check(write, d, rank_bgn, write_group,
2045 read_group, &bit_chk, sticky_bit_chk,
2046 use_read_test);
2047 if (stop == 1)
2048 break;
2049
2050 /* stop != 1 */
2051 for (i = 0; i < per_dqs; i++) {
2052 if (bit_chk & 1) {
2053 /*
2054 * Remember a passing test as
2055 * the left_edge.
2056 */
2057 left_edge[i] = d;
2058 } else {
2059 /*
2060 * If a left edge has not been seen
2061 * yet, then a future passing test
2062 * will mark this edge as the right
2063 * edge.
2064 */
2065 if (left_edge[i] == delay_max + 1)
2066 right_edge[i] = -(d + 1);
2067 }
2068 bit_chk >>= 1;
2069 }
2070 }
2071
2072 /* Reset DQ delay chains to 0 */
2073 if (write)
2074 scc_mgr_apply_group_dq_out1_delay(0);
2075 else
2076 scc_mgr_apply_group_dq_in_delay(test_bgn, 0);
2077
2078 *sticky_bit_chk = 0;
2079 for (i = per_dqs - 1; i >= 0; i--) {
2080 debug_cond(DLEVEL == 2,
2081 "%s:%d vfifo_center: left_edge[%u]: %d right_edge[%u]: %d\n",
2082 __func__, __LINE__, i, left_edge[i],
2083 i, right_edge[i]);
2084
2085 /*
2086 * Check for cases where we haven't found the left edge,
2087 * which makes our assignment of the the right edge invalid.
2088 * Reset it to the illegal value.
2089 */
2090 if ((left_edge[i] == delay_max + 1) &&
2091 (right_edge[i] != delay_max + 1)) {
2092 right_edge[i] = delay_max + 1;
2093 debug_cond(DLEVEL == 2,
2094 "%s:%d vfifo_center: reset right_edge[%u]: %d\n",
2095 __func__, __LINE__, i, right_edge[i]);
2096 }
2097
2098 /*
2099 * Reset sticky bit
2100 * READ: except for bits where we have seen both
2101 * the left and right edge.
2102 * WRITE: except for bits where we have seen the
2103 * left edge.
2104 */
2105 *sticky_bit_chk <<= 1;
2106 if (write) {
2107 if (left_edge[i] != delay_max + 1)
2108 *sticky_bit_chk |= 1;
2109 } else {
2110 if ((left_edge[i] != delay_max + 1) &&
2111 (right_edge[i] != delay_max + 1))
2112 *sticky_bit_chk |= 1;
2113 }
2114 }
2115 }
2116
2117 /**
2118 * search_right_edge() - Find right edge of DQ/DQS working phase
2119 * @write: Perform read (Stage 2) or write (Stage 3) calibration
2120 * @rank_bgn: Rank number
2121 * @write_group: Write Group
2122 * @read_group: Read Group
2123 * @start_dqs: DQS start phase
2124 * @start_dqs_en: DQS enable start phase
2125 * @sticky_bit_chk: Resulting sticky bit mask after the test
2126 * @left_edge: Left edge of the DQ/DQS phase
2127 * @right_edge: Right edge of the DQ/DQS phase
2128 * @use_read_test: Perform read test
2129 *
2130 * Find right edge of DQ/DQS working phase.
2131 */
2132 static int search_right_edge(const int write, const int rank_bgn,
2133 const u32 write_group, const u32 read_group,
2134 const int start_dqs, const int start_dqs_en,
2135 u32 *sticky_bit_chk,
2136 int *left_edge, int *right_edge, const u32 use_read_test)
2137 {
2138 const u32 delay_max = write ? iocfg->io_out1_delay_max :
2139 iocfg->io_in_delay_max;
2140 const u32 dqs_max = write ? iocfg->io_out1_delay_max :
2141 iocfg->dqs_in_delay_max;
2142 const u32 per_dqs = write ? rwcfg->mem_dq_per_write_dqs :
2143 rwcfg->mem_dq_per_read_dqs;
2144 u32 stop, bit_chk;
2145 int i, d;
2146
2147 for (d = 0; d <= dqs_max - start_dqs; d++) {
2148 if (write) { /* WRITE-ONLY */
2149 scc_mgr_apply_group_dqs_io_and_oct_out1(write_group,
2150 d + start_dqs);
2151 } else { /* READ-ONLY */
2152 scc_mgr_set_dqs_bus_in_delay(read_group, d + start_dqs);
2153 if (iocfg->shift_dqs_en_when_shift_dqs) {
2154 u32 delay = d + start_dqs_en;
2155 if (delay > iocfg->dqs_en_delay_max)
2156 delay = iocfg->dqs_en_delay_max;
2157 scc_mgr_set_dqs_en_delay(read_group, delay);
2158 }
2159 scc_mgr_load_dqs(read_group);
2160 }
2161
2162 writel(0, &sdr_scc_mgr->update);
2163
2164 stop = search_stop_check(write, d, rank_bgn, write_group,
2165 read_group, &bit_chk, sticky_bit_chk,
2166 use_read_test);
2167 if (stop == 1) {
2168 if (write && (d == 0)) { /* WRITE-ONLY */
2169 for (i = 0; i < rwcfg->mem_dq_per_write_dqs;
2170 i++) {
2171 /*
2172 * d = 0 failed, but it passed when
2173 * testing the left edge, so it must be
2174 * marginal, set it to -1
2175 */
2176 if (right_edge[i] == delay_max + 1 &&
2177 left_edge[i] != delay_max + 1)
2178 right_edge[i] = -1;
2179 }
2180 }
2181 break;
2182 }
2183
2184 /* stop != 1 */
2185 for (i = 0; i < per_dqs; i++) {
2186 if (bit_chk & 1) {
2187 /*
2188 * Remember a passing test as
2189 * the right_edge.
2190 */
2191 right_edge[i] = d;
2192 } else {
2193 if (d != 0) {
2194 /*
2195 * If a right edge has not
2196 * been seen yet, then a future
2197 * passing test will mark this
2198 * edge as the left edge.
2199 */
2200 if (right_edge[i] == delay_max + 1)
2201 left_edge[i] = -(d + 1);
2202 } else {
2203 /*
2204 * d = 0 failed, but it passed
2205 * when testing the left edge,
2206 * so it must be marginal, set
2207 * it to -1
2208 */
2209 if (right_edge[i] == delay_max + 1 &&
2210 left_edge[i] != delay_max + 1)
2211 right_edge[i] = -1;
2212 /*
2213 * If a right edge has not been
2214 * seen yet, then a future
2215 * passing test will mark this
2216 * edge as the left edge.
2217 */
2218 else if (right_edge[i] == delay_max + 1)
2219 left_edge[i] = -(d + 1);
2220 }
2221 }
2222
2223 debug_cond(DLEVEL == 2, "%s:%d center[r,d=%u]: ",
2224 __func__, __LINE__, d);
2225 debug_cond(DLEVEL == 2,
2226 "bit_chk_test=%i left_edge[%u]: %d ",
2227 bit_chk & 1, i, left_edge[i]);
2228 debug_cond(DLEVEL == 2, "right_edge[%u]: %d\n", i,
2229 right_edge[i]);
2230 bit_chk >>= 1;
2231 }
2232 }
2233
2234 /* Check that all bits have a window */
2235 for (i = 0; i < per_dqs; i++) {
2236 debug_cond(DLEVEL == 2,
2237 "%s:%d write_center: left_edge[%u]: %d right_edge[%u]: %d",
2238 __func__, __LINE__, i, left_edge[i],
2239 i, right_edge[i]);
2240 if ((left_edge[i] == dqs_max + 1) ||
2241 (right_edge[i] == dqs_max + 1))
2242 return i + 1; /* FIXME: If we fail, retval > 0 */
2243 }
2244
2245 return 0;
2246 }
2247
2248 /**
2249 * get_window_mid_index() - Find the best middle setting of DQ/DQS phase
2250 * @write: Perform read (Stage 2) or write (Stage 3) calibration
2251 * @left_edge: Left edge of the DQ/DQS phase
2252 * @right_edge: Right edge of the DQ/DQS phase
2253 * @mid_min: Best DQ/DQS phase middle setting
2254 *
2255 * Find index and value of the middle of the DQ/DQS working phase.
2256 */
2257 static int get_window_mid_index(const int write, int *left_edge,
2258 int *right_edge, int *mid_min)
2259 {
2260 const u32 per_dqs = write ? rwcfg->mem_dq_per_write_dqs :
2261 rwcfg->mem_dq_per_read_dqs;
2262 int i, mid, min_index;
2263
2264 /* Find middle of window for each DQ bit */
2265 *mid_min = left_edge[0] - right_edge[0];
2266 min_index = 0;
2267 for (i = 1; i < per_dqs; i++) {
2268 mid = left_edge[i] - right_edge[i];
2269 if (mid < *mid_min) {
2270 *mid_min = mid;
2271 min_index = i;
2272 }
2273 }
2274
2275 /*
2276 * -mid_min/2 represents the amount that we need to move DQS.
2277 * If mid_min is odd and positive we'll need to add one to make
2278 * sure the rounding in further calculations is correct (always
2279 * bias to the right), so just add 1 for all positive values.
2280 */
2281 if (*mid_min > 0)
2282 (*mid_min)++;
2283 *mid_min = *mid_min / 2;
2284
2285 debug_cond(DLEVEL == 1, "%s:%d vfifo_center: *mid_min=%d (index=%u)\n",
2286 __func__, __LINE__, *mid_min, min_index);
2287 return min_index;
2288 }
2289
2290 /**
2291 * center_dq_windows() - Center the DQ/DQS windows
2292 * @write: Perform read (Stage 2) or write (Stage 3) calibration
2293 * @left_edge: Left edge of the DQ/DQS phase
2294 * @right_edge: Right edge of the DQ/DQS phase
2295 * @mid_min: Adjusted DQ/DQS phase middle setting
2296 * @orig_mid_min: Original DQ/DQS phase middle setting
2297 * @min_index: DQ/DQS phase middle setting index
2298 * @test_bgn: Rank number to begin the test
2299 * @dq_margin: Amount of shift for the DQ
2300 * @dqs_margin: Amount of shift for the DQS
2301 *
2302 * Align the DQ/DQS windows in each group.
2303 */
2304 static void center_dq_windows(const int write, int *left_edge, int *right_edge,
2305 const int mid_min, const int orig_mid_min,
2306 const int min_index, const int test_bgn,
2307 int *dq_margin, int *dqs_margin)
2308 {
2309 const u32 delay_max = write ? iocfg->io_out1_delay_max :
2310 iocfg->io_in_delay_max;
2311 const u32 per_dqs = write ? rwcfg->mem_dq_per_write_dqs :
2312 rwcfg->mem_dq_per_read_dqs;
2313 const u32 delay_off = write ? SCC_MGR_IO_OUT1_DELAY_OFFSET :
2314 SCC_MGR_IO_IN_DELAY_OFFSET;
2315 const u32 addr = SDR_PHYGRP_SCCGRP_ADDRESS | delay_off;
2316
2317 u32 temp_dq_io_delay1, temp_dq_io_delay2;
2318 int shift_dq, i, p;
2319
2320 /* Initialize data for export structures */
2321 *dqs_margin = delay_max + 1;
2322 *dq_margin = delay_max + 1;
2323
2324 /* add delay to bring centre of all DQ windows to the same "level" */
2325 for (i = 0, p = test_bgn; i < per_dqs; i++, p++) {
2326 /* Use values before divide by 2 to reduce round off error */
2327 shift_dq = (left_edge[i] - right_edge[i] -
2328 (left_edge[min_index] - right_edge[min_index]))/2 +
2329 (orig_mid_min - mid_min);
2330
2331 debug_cond(DLEVEL == 2,
2332 "vfifo_center: before: shift_dq[%u]=%d\n",
2333 i, shift_dq);
2334
2335 temp_dq_io_delay1 = readl(addr + (p << 2));
2336 temp_dq_io_delay2 = readl(addr + (i << 2));
2337
2338 if (shift_dq + temp_dq_io_delay1 > delay_max)
2339 shift_dq = delay_max - temp_dq_io_delay2;
2340 else if (shift_dq + temp_dq_io_delay1 < 0)
2341 shift_dq = -temp_dq_io_delay1;
2342
2343 debug_cond(DLEVEL == 2,
2344 "vfifo_center: after: shift_dq[%u]=%d\n",
2345 i, shift_dq);
2346
2347 if (write)
2348 scc_mgr_set_dq_out1_delay(i,
2349 temp_dq_io_delay1 + shift_dq);
2350 else
2351 scc_mgr_set_dq_in_delay(p,
2352 temp_dq_io_delay1 + shift_dq);
2353
2354 scc_mgr_load_dq(p);
2355
2356 debug_cond(DLEVEL == 2,
2357 "vfifo_center: margin[%u]=[%d,%d]\n", i,
2358 left_edge[i] - shift_dq + (-mid_min),
2359 right_edge[i] + shift_dq - (-mid_min));
2360
2361 /* To determine values for export structures */
2362 if (left_edge[i] - shift_dq + (-mid_min) < *dq_margin)
2363 *dq_margin = left_edge[i] - shift_dq + (-mid_min);
2364
2365 if (right_edge[i] + shift_dq - (-mid_min) < *dqs_margin)
2366 *dqs_margin = right_edge[i] + shift_dq - (-mid_min);
2367 }
2368 }
2369
2370 /**
2371 * rw_mgr_mem_calibrate_vfifo_center() - Per-bit deskew DQ and centering
2372 * @rank_bgn: Rank number
2373 * @rw_group: Read/Write Group
2374 * @test_bgn: Rank at which the test begins
2375 * @use_read_test: Perform a read test
2376 * @update_fom: Update FOM
2377 *
2378 * Per-bit deskew DQ and centering.
2379 */
2380 static int rw_mgr_mem_calibrate_vfifo_center(const u32 rank_bgn,
2381 const u32 rw_group, const u32 test_bgn,
2382 const int use_read_test, const int update_fom)
2383 {
2384 const u32 addr =
2385 SDR_PHYGRP_SCCGRP_ADDRESS + SCC_MGR_DQS_IN_DELAY_OFFSET +
2386 (rw_group << 2);
2387 /*
2388 * Store these as signed since there are comparisons with
2389 * signed numbers.
2390 */
2391 u32 sticky_bit_chk;
2392 int32_t left_edge[rwcfg->mem_dq_per_read_dqs];
2393 int32_t right_edge[rwcfg->mem_dq_per_read_dqs];
2394 int32_t orig_mid_min, mid_min;
2395 int32_t new_dqs, start_dqs, start_dqs_en = 0, final_dqs_en;
2396 int32_t dq_margin, dqs_margin;
2397 int i, min_index;
2398 int ret;
2399
2400 debug("%s:%d: %u %u", __func__, __LINE__, rw_group, test_bgn);
2401
2402 start_dqs = readl(addr);
2403 if (iocfg->shift_dqs_en_when_shift_dqs)
2404 start_dqs_en = readl(addr - iocfg->dqs_en_delay_offset);
2405
2406 /* set the left and right edge of each bit to an illegal value */
2407 /* use (iocfg->io_in_delay_max + 1) as an illegal value */
2408 sticky_bit_chk = 0;
2409 for (i = 0; i < rwcfg->mem_dq_per_read_dqs; i++) {
2410 left_edge[i] = iocfg->io_in_delay_max + 1;
2411 right_edge[i] = iocfg->io_in_delay_max + 1;
2412 }
2413
2414 /* Search for the left edge of the window for each bit */
2415 search_left_edge(0, rank_bgn, rw_group, rw_group, test_bgn,
2416 &sticky_bit_chk,
2417 left_edge, right_edge, use_read_test);
2418
2419
2420 /* Search for the right edge of the window for each bit */
2421 ret = search_right_edge(0, rank_bgn, rw_group, rw_group,
2422 start_dqs, start_dqs_en,
2423 &sticky_bit_chk,
2424 left_edge, right_edge, use_read_test);
2425 if (ret) {
2426 /*
2427 * Restore delay chain settings before letting the loop
2428 * in rw_mgr_mem_calibrate_vfifo to retry different
2429 * dqs/ck relationships.
2430 */
2431 scc_mgr_set_dqs_bus_in_delay(rw_group, start_dqs);
2432 if (iocfg->shift_dqs_en_when_shift_dqs)
2433 scc_mgr_set_dqs_en_delay(rw_group, start_dqs_en);
2434
2435 scc_mgr_load_dqs(rw_group);
2436 writel(0, &sdr_scc_mgr->update);
2437
2438 debug_cond(DLEVEL == 1,
2439 "%s:%d vfifo_center: failed to find edge [%u]: %d %d",
2440 __func__, __LINE__, i, left_edge[i], right_edge[i]);
2441 if (use_read_test) {
2442 set_failing_group_stage(rw_group *
2443 rwcfg->mem_dq_per_read_dqs + i,
2444 CAL_STAGE_VFIFO,
2445 CAL_SUBSTAGE_VFIFO_CENTER);
2446 } else {
2447 set_failing_group_stage(rw_group *
2448 rwcfg->mem_dq_per_read_dqs + i,
2449 CAL_STAGE_VFIFO_AFTER_WRITES,
2450 CAL_SUBSTAGE_VFIFO_CENTER);
2451 }
2452 return -EIO;
2453 }
2454
2455 min_index = get_window_mid_index(0, left_edge, right_edge, &mid_min);
2456
2457 /* Determine the amount we can change DQS (which is -mid_min) */
2458 orig_mid_min = mid_min;
2459 new_dqs = start_dqs - mid_min;
2460 if (new_dqs > iocfg->dqs_in_delay_max)
2461 new_dqs = iocfg->dqs_in_delay_max;
2462 else if (new_dqs < 0)
2463 new_dqs = 0;
2464
2465 mid_min = start_dqs - new_dqs;
2466 debug_cond(DLEVEL == 1, "vfifo_center: new mid_min=%d new_dqs=%d\n",
2467 mid_min, new_dqs);
2468
2469 if (iocfg->shift_dqs_en_when_shift_dqs) {
2470 if (start_dqs_en - mid_min > iocfg->dqs_en_delay_max)
2471 mid_min += start_dqs_en - mid_min -
2472 iocfg->dqs_en_delay_max;
2473 else if (start_dqs_en - mid_min < 0)
2474 mid_min += start_dqs_en - mid_min;
2475 }
2476 new_dqs = start_dqs - mid_min;
2477
2478 debug_cond(DLEVEL == 1,
2479 "vfifo_center: start_dqs=%d start_dqs_en=%d new_dqs=%d mid_min=%d\n",
2480 start_dqs,
2481 iocfg->shift_dqs_en_when_shift_dqs ? start_dqs_en : -1,
2482 new_dqs, mid_min);
2483
2484 /* Add delay to bring centre of all DQ windows to the same "level". */
2485 center_dq_windows(0, left_edge, right_edge, mid_min, orig_mid_min,
2486 min_index, test_bgn, &dq_margin, &dqs_margin);
2487
2488 /* Move DQS-en */
2489 if (iocfg->shift_dqs_en_when_shift_dqs) {
2490 final_dqs_en = start_dqs_en - mid_min;
2491 scc_mgr_set_dqs_en_delay(rw_group, final_dqs_en);
2492 scc_mgr_load_dqs(rw_group);
2493 }
2494
2495 /* Move DQS */
2496 scc_mgr_set_dqs_bus_in_delay(rw_group, new_dqs);
2497 scc_mgr_load_dqs(rw_group);
2498 debug_cond(DLEVEL == 2,
2499 "%s:%d vfifo_center: dq_margin=%d dqs_margin=%d",
2500 __func__, __LINE__, dq_margin, dqs_margin);
2501
2502 /*
2503 * Do not remove this line as it makes sure all of our decisions
2504 * have been applied. Apply the update bit.
2505 */
2506 writel(0, &sdr_scc_mgr->update);
2507
2508 if ((dq_margin < 0) || (dqs_margin < 0))
2509 return -EINVAL;
2510
2511 return 0;
2512 }
2513
2514 /**
2515 * rw_mgr_mem_calibrate_guaranteed_write() - Perform guaranteed write into the device
2516 * @rw_group: Read/Write Group
2517 * @phase: DQ/DQS phase
2518 *
2519 * Because initially no communication ca be reliably performed with the memory
2520 * device, the sequencer uses a guaranteed write mechanism to write data into
2521 * the memory device.
2522 */
2523 static int rw_mgr_mem_calibrate_guaranteed_write(const u32 rw_group,
2524 const u32 phase)
2525 {
2526 int ret;
2527
2528 /* Set a particular DQ/DQS phase. */
2529 scc_mgr_set_dqdqs_output_phase_all_ranks(rw_group, phase);
2530
2531 debug_cond(DLEVEL == 1, "%s:%d guaranteed write: g=%u p=%u\n",
2532 __func__, __LINE__, rw_group, phase);
2533
2534 /*
2535 * Altera EMI_RM 2015.05.04 :: Figure 1-25
2536 * Load up the patterns used by read calibration using the
2537 * current DQDQS phase.
2538 */
2539 rw_mgr_mem_calibrate_read_load_patterns(0, 1);
2540
2541 if (gbl->phy_debug_mode_flags & PHY_DEBUG_DISABLE_GUARANTEED_READ)
2542 return 0;
2543
2544 /*
2545 * Altera EMI_RM 2015.05.04 :: Figure 1-26
2546 * Back-to-Back reads of the patterns used for calibration.
2547 */
2548 ret = rw_mgr_mem_calibrate_read_test_patterns(0, rw_group, 1);
2549 if (ret)
2550 debug_cond(DLEVEL == 1,
2551 "%s:%d Guaranteed read test failed: g=%u p=%u\n",
2552 __func__, __LINE__, rw_group, phase);
2553 return ret;
2554 }
2555
2556 /**
2557 * rw_mgr_mem_calibrate_dqs_enable_calibration() - DQS Enable Calibration
2558 * @rw_group: Read/Write Group
2559 * @test_bgn: Rank at which the test begins
2560 *
2561 * DQS enable calibration ensures reliable capture of the DQ signal without
2562 * glitches on the DQS line.
2563 */
2564 static int rw_mgr_mem_calibrate_dqs_enable_calibration(const u32 rw_group,
2565 const u32 test_bgn)
2566 {
2567 /*
2568 * Altera EMI_RM 2015.05.04 :: Figure 1-27
2569 * DQS and DQS Eanble Signal Relationships.
2570 */
2571
2572 /* We start at zero, so have one less dq to devide among */
2573 const u32 delay_step = iocfg->io_in_delay_max /
2574 (rwcfg->mem_dq_per_read_dqs - 1);
2575 int ret;
2576 u32 i, p, d, r;
2577
2578 debug("%s:%d (%u,%u)\n", __func__, __LINE__, rw_group, test_bgn);
2579
2580 /* Try different dq_in_delays since the DQ path is shorter than DQS. */
2581 for (r = 0; r < rwcfg->mem_number_of_ranks;
2582 r += NUM_RANKS_PER_SHADOW_REG) {
2583 for (i = 0, p = test_bgn, d = 0;
2584 i < rwcfg->mem_dq_per_read_dqs;
2585 i++, p++, d += delay_step) {
2586 debug_cond(DLEVEL == 1,
2587 "%s:%d: g=%u r=%u i=%u p=%u d=%u\n",
2588 __func__, __LINE__, rw_group, r, i, p, d);
2589
2590 scc_mgr_set_dq_in_delay(p, d);
2591 scc_mgr_load_dq(p);
2592 }
2593
2594 writel(0, &sdr_scc_mgr->update);
2595 }
2596
2597 /*
2598 * Try rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase across different
2599 * dq_in_delay values
2600 */
2601 ret = rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase(rw_group);
2602
2603 debug_cond(DLEVEL == 1,
2604 "%s:%d: g=%u found=%u; Reseting delay chain to zero\n",
2605 __func__, __LINE__, rw_group, !ret);
2606
2607 for (r = 0; r < rwcfg->mem_number_of_ranks;
2608 r += NUM_RANKS_PER_SHADOW_REG) {
2609 scc_mgr_apply_group_dq_in_delay(test_bgn, 0);
2610 writel(0, &sdr_scc_mgr->update);
2611 }
2612
2613 return ret;
2614 }
2615
2616 /**
2617 * rw_mgr_mem_calibrate_dq_dqs_centering() - Centering DQ/DQS
2618 * @rw_group: Read/Write Group
2619 * @test_bgn: Rank at which the test begins
2620 * @use_read_test: Perform a read test
2621 * @update_fom: Update FOM
2622 *
2623 * The centerin DQ/DQS stage attempts to align DQ and DQS signals on reads
2624 * within a group.
2625 */
2626 static int
2627 rw_mgr_mem_calibrate_dq_dqs_centering(const u32 rw_group, const u32 test_bgn,
2628 const int use_read_test,
2629 const int update_fom)
2630
2631 {
2632 int ret, grp_calibrated;
2633 u32 rank_bgn, sr;
2634
2635 /*
2636 * Altera EMI_RM 2015.05.04 :: Figure 1-28
2637 * Read per-bit deskew can be done on a per shadow register basis.
2638 */
2639 grp_calibrated = 1;
2640 for (rank_bgn = 0, sr = 0;
2641 rank_bgn < rwcfg->mem_number_of_ranks;
2642 rank_bgn += NUM_RANKS_PER_SHADOW_REG, sr++) {
2643 ret = rw_mgr_mem_calibrate_vfifo_center(rank_bgn, rw_group,
2644 test_bgn,
2645 use_read_test,
2646 update_fom);
2647 if (!ret)
2648 continue;
2649
2650 grp_calibrated = 0;
2651 }
2652
2653 if (!grp_calibrated)
2654 return -EIO;
2655
2656 return 0;
2657 }
2658
2659 /**
2660 * rw_mgr_mem_calibrate_vfifo() - Calibrate the read valid prediction FIFO
2661 * @rw_group: Read/Write Group
2662 * @test_bgn: Rank at which the test begins
2663 *
2664 * Stage 1: Calibrate the read valid prediction FIFO.
2665 *
2666 * This function implements UniPHY calibration Stage 1, as explained in
2667 * detail in Altera EMI_RM 2015.05.04 , "UniPHY Calibration Stages".
2668 *
2669 * - read valid prediction will consist of finding:
2670 * - DQS enable phase and DQS enable delay (DQS Enable Calibration)
2671 * - DQS input phase and DQS input delay (DQ/DQS Centering)
2672 * - we also do a per-bit deskew on the DQ lines.
2673 */
2674 static int rw_mgr_mem_calibrate_vfifo(const u32 rw_group, const u32 test_bgn)
2675 {
2676 u32 p, d;
2677 u32 dtaps_per_ptap;
2678 u32 failed_substage;
2679
2680 int ret;
2681
2682 debug("%s:%d: %u %u\n", __func__, __LINE__, rw_group, test_bgn);
2683
2684 /* Update info for sims */
2685 reg_file_set_group(rw_group);
2686 reg_file_set_stage(CAL_STAGE_VFIFO);
2687 reg_file_set_sub_stage(CAL_SUBSTAGE_GUARANTEED_READ);
2688
2689 failed_substage = CAL_SUBSTAGE_GUARANTEED_READ;
2690
2691 /* USER Determine number of delay taps for each phase tap. */
2692 dtaps_per_ptap = DIV_ROUND_UP(iocfg->delay_per_opa_tap,
2693 iocfg->delay_per_dqs_en_dchain_tap) - 1;
2694
2695 for (d = 0; d <= dtaps_per_ptap; d += 2) {
2696 /*
2697 * In RLDRAMX we may be messing the delay of pins in
2698 * the same write rw_group but outside of the current read
2699 * the rw_group, but that's ok because we haven't calibrated
2700 * output side yet.
2701 */
2702 if (d > 0) {
2703 scc_mgr_apply_group_all_out_delay_add_all_ranks(
2704 rw_group, d);
2705 }
2706
2707 for (p = 0; p <= iocfg->dqdqs_out_phase_max; p++) {
2708 /* 1) Guaranteed Write */
2709 ret = rw_mgr_mem_calibrate_guaranteed_write(rw_group, p);
2710 if (ret)
2711 break;
2712
2713 /* 2) DQS Enable Calibration */
2714 ret = rw_mgr_mem_calibrate_dqs_enable_calibration(rw_group,
2715 test_bgn);
2716 if (ret) {
2717 failed_substage = CAL_SUBSTAGE_DQS_EN_PHASE;
2718 continue;
2719 }
2720
2721 /* 3) Centering DQ/DQS */
2722 /*
2723 * If doing read after write calibration, do not update
2724 * FOM now. Do it then.
2725 */
2726 ret = rw_mgr_mem_calibrate_dq_dqs_centering(rw_group,
2727 test_bgn, 1, 0);
2728 if (ret) {
2729 failed_substage = CAL_SUBSTAGE_VFIFO_CENTER;
2730 continue;
2731 }
2732
2733 /* All done. */
2734 goto cal_done_ok;
2735 }
2736 }
2737
2738 /* Calibration Stage 1 failed. */
2739 set_failing_group_stage(rw_group, CAL_STAGE_VFIFO, failed_substage);
2740 return 0;
2741
2742 /* Calibration Stage 1 completed OK. */
2743 cal_done_ok:
2744 /*
2745 * Reset the delay chains back to zero if they have moved > 1
2746 * (check for > 1 because loop will increase d even when pass in
2747 * first case).
2748 */
2749 if (d > 2)
2750 scc_mgr_zero_group(rw_group, 1);
2751
2752 return 1;
2753 }
2754
2755 /**
2756 * rw_mgr_mem_calibrate_vfifo_end() - DQ/DQS Centering.
2757 * @rw_group: Read/Write Group
2758 * @test_bgn: Rank at which the test begins
2759 *
2760 * Stage 3: DQ/DQS Centering.
2761 *
2762 * This function implements UniPHY calibration Stage 3, as explained in
2763 * detail in Altera EMI_RM 2015.05.04 , "UniPHY Calibration Stages".
2764 */
2765 static int rw_mgr_mem_calibrate_vfifo_end(const u32 rw_group,
2766 const u32 test_bgn)
2767 {
2768 int ret;
2769
2770 debug("%s:%d %u %u", __func__, __LINE__, rw_group, test_bgn);
2771
2772 /* Update info for sims. */
2773 reg_file_set_group(rw_group);
2774 reg_file_set_stage(CAL_STAGE_VFIFO_AFTER_WRITES);
2775 reg_file_set_sub_stage(CAL_SUBSTAGE_VFIFO_CENTER);
2776
2777 ret = rw_mgr_mem_calibrate_dq_dqs_centering(rw_group, test_bgn, 0, 1);
2778 if (ret)
2779 set_failing_group_stage(rw_group,
2780 CAL_STAGE_VFIFO_AFTER_WRITES,
2781 CAL_SUBSTAGE_VFIFO_CENTER);
2782 return ret;
2783 }
2784
2785 /**
2786 * rw_mgr_mem_calibrate_lfifo() - Minimize latency
2787 *
2788 * Stage 4: Minimize latency.
2789 *
2790 * This function implements UniPHY calibration Stage 4, as explained in
2791 * detail in Altera EMI_RM 2015.05.04 , "UniPHY Calibration Stages".
2792 * Calibrate LFIFO to find smallest read latency.
2793 */
2794 static u32 rw_mgr_mem_calibrate_lfifo(void)
2795 {
2796 int found_one = 0;
2797
2798 debug("%s:%d\n", __func__, __LINE__);
2799
2800 /* Update info for sims. */
2801 reg_file_set_stage(CAL_STAGE_LFIFO);
2802 reg_file_set_sub_stage(CAL_SUBSTAGE_READ_LATENCY);
2803
2804 /* Load up the patterns used by read calibration for all ranks */
2805 rw_mgr_mem_calibrate_read_load_patterns(0, 1);
2806
2807 do {
2808 writel(gbl->curr_read_lat, &phy_mgr_cfg->phy_rlat);
2809 debug_cond(DLEVEL == 2, "%s:%d lfifo: read_lat=%u",
2810 __func__, __LINE__, gbl->curr_read_lat);
2811
2812 if (!rw_mgr_mem_calibrate_read_test_all_ranks(0, NUM_READ_TESTS,
2813 PASS_ALL_BITS, 1))
2814 break;
2815
2816 found_one = 1;
2817 /*
2818 * Reduce read latency and see if things are
2819 * working correctly.
2820 */
2821 gbl->curr_read_lat--;
2822 } while (gbl->curr_read_lat > 0);
2823
2824 /* Reset the fifos to get pointers to known state. */
2825 writel(0, &phy_mgr_cmd->fifo_reset);
2826
2827 if (found_one) {
2828 /* Add a fudge factor to the read latency that was determined */
2829 gbl->curr_read_lat += 2;
2830 writel(gbl->curr_read_lat, &phy_mgr_cfg->phy_rlat);
2831 debug_cond(DLEVEL == 2,
2832 "%s:%d lfifo: success: using read_lat=%u\n",
2833 __func__, __LINE__, gbl->curr_read_lat);
2834 } else {
2835 set_failing_group_stage(0xff, CAL_STAGE_LFIFO,
2836 CAL_SUBSTAGE_READ_LATENCY);
2837
2838 debug_cond(DLEVEL == 2,
2839 "%s:%d lfifo: failed at initial read_lat=%u\n",
2840 __func__, __LINE__, gbl->curr_read_lat);
2841 }
2842
2843 return found_one;
2844 }
2845
2846 /**
2847 * search_window() - Search for the/part of the window with DM/DQS shift
2848 * @search_dm: If 1, search for the DM shift, if 0, search for DQS shift
2849 * @rank_bgn: Rank number
2850 * @write_group: Write Group
2851 * @bgn_curr: Current window begin
2852 * @end_curr: Current window end
2853 * @bgn_best: Current best window begin
2854 * @end_best: Current best window end
2855 * @win_best: Size of the best window
2856 * @new_dqs: New DQS value (only applicable if search_dm = 0).
2857 *
2858 * Search for the/part of the window with DM/DQS shift.
2859 */
2860 static void search_window(const int search_dm,
2861 const u32 rank_bgn, const u32 write_group,
2862 int *bgn_curr, int *end_curr, int *bgn_best,
2863 int *end_best, int *win_best, int new_dqs)
2864 {
2865 u32 bit_chk;
2866 const int max = iocfg->io_out1_delay_max - new_dqs;
2867 int d, di;
2868
2869 /* Search for the/part of the window with DM/DQS shift. */
2870 for (di = max; di >= 0; di -= DELTA_D) {
2871 if (search_dm) {
2872 d = di;
2873 scc_mgr_apply_group_dm_out1_delay(d);
2874 } else {
2875 /* For DQS, we go from 0...max */
2876 d = max - di;
2877 /*
2878 * Note: This only shifts DQS, so are we limiting
2879 * ourselves to width of DQ unnecessarily.
2880 */
2881 scc_mgr_apply_group_dqs_io_and_oct_out1(write_group,
2882 d + new_dqs);
2883 }
2884
2885 writel(0, &sdr_scc_mgr->update);
2886
2887 if (rw_mgr_mem_calibrate_write_test(rank_bgn, write_group, 1,
2888 PASS_ALL_BITS, &bit_chk,
2889 0)) {
2890 /* Set current end of the window. */
2891 *end_curr = search_dm ? -d : d;
2892
2893 /*
2894 * If a starting edge of our window has not been seen
2895 * this is our current start of the DM window.
2896 */
2897 if (*bgn_curr == iocfg->io_out1_delay_max + 1)
2898 *bgn_curr = search_dm ? -d : d;
2899
2900 /*
2901 * If current window is bigger than best seen.
2902 * Set best seen to be current window.
2903 */
2904 if ((*end_curr - *bgn_curr + 1) > *win_best) {
2905 *win_best = *end_curr - *bgn_curr + 1;
2906 *bgn_best = *bgn_curr;
2907 *end_best = *end_curr;
2908 }
2909 } else {
2910 /* We just saw a failing test. Reset temp edge. */
2911 *bgn_curr = iocfg->io_out1_delay_max + 1;
2912 *end_curr = iocfg->io_out1_delay_max + 1;
2913
2914 /* Early exit is only applicable to DQS. */
2915 if (search_dm)
2916 continue;
2917
2918 /*
2919 * Early exit optimization: if the remaining delay
2920 * chain space is less than already seen largest
2921 * window we can exit.
2922 */
2923 if (*win_best - 1 > iocfg->io_out1_delay_max - new_dqs - d)
2924 break;
2925 }
2926 }
2927 }
2928
2929 /*
2930 * rw_mgr_mem_calibrate_writes_center() - Center all windows
2931 * @rank_bgn: Rank number
2932 * @write_group: Write group
2933 * @test_bgn: Rank at which the test begins
2934 *
2935 * Center all windows. Do per-bit-deskew to possibly increase size of
2936 * certain windows.
2937 */
2938 static int
2939 rw_mgr_mem_calibrate_writes_center(const u32 rank_bgn, const u32 write_group,
2940 const u32 test_bgn)
2941 {
2942 int i;
2943 u32 sticky_bit_chk;
2944 u32 min_index;
2945 int left_edge[rwcfg->mem_dq_per_write_dqs];
2946 int right_edge[rwcfg->mem_dq_per_write_dqs];
2947 int mid;
2948 int mid_min, orig_mid_min;
2949 int new_dqs, start_dqs;
2950 int dq_margin, dqs_margin, dm_margin;
2951 int bgn_curr = iocfg->io_out1_delay_max + 1;
2952 int end_curr = iocfg->io_out1_delay_max + 1;
2953 int bgn_best = iocfg->io_out1_delay_max + 1;
2954 int end_best = iocfg->io_out1_delay_max + 1;
2955 int win_best = 0;
2956
2957 int ret;
2958
2959 debug("%s:%d %u %u", __func__, __LINE__, write_group, test_bgn);
2960
2961 dm_margin = 0;
2962
2963 start_dqs = readl((SDR_PHYGRP_SCCGRP_ADDRESS |
2964 SCC_MGR_IO_OUT1_DELAY_OFFSET) +
2965 (rwcfg->mem_dq_per_write_dqs << 2));
2966
2967 /* Per-bit deskew. */
2968
2969 /*
2970 * Set the left and right edge of each bit to an illegal value.
2971 * Use (iocfg->io_out1_delay_max + 1) as an illegal value.
2972 */
2973 sticky_bit_chk = 0;
2974 for (i = 0; i < rwcfg->mem_dq_per_write_dqs; i++) {
2975 left_edge[i] = iocfg->io_out1_delay_max + 1;
2976 right_edge[i] = iocfg->io_out1_delay_max + 1;
2977 }
2978
2979 /* Search for the left edge of the window for each bit. */
2980 search_left_edge(1, rank_bgn, write_group, 0, test_bgn,
2981 &sticky_bit_chk,
2982 left_edge, right_edge, 0);
2983
2984 /* Search for the right edge of the window for each bit. */
2985 ret = search_right_edge(1, rank_bgn, write_group, 0,
2986 start_dqs, 0,
2987 &sticky_bit_chk,
2988 left_edge, right_edge, 0);
2989 if (ret) {
2990 set_failing_group_stage(test_bgn + ret - 1, CAL_STAGE_WRITES,
2991 CAL_SUBSTAGE_WRITES_CENTER);
2992 return -EINVAL;
2993 }
2994
2995 min_index = get_window_mid_index(1, left_edge, right_edge, &mid_min);
2996
2997 /* Determine the amount we can change DQS (which is -mid_min). */
2998 orig_mid_min = mid_min;
2999 new_dqs = start_dqs;
3000 mid_min = 0;
3001 debug_cond(DLEVEL == 1,
3002 "%s:%d write_center: start_dqs=%d new_dqs=%d mid_min=%d\n",
3003 __func__, __LINE__, start_dqs, new_dqs, mid_min);
3004
3005 /* Add delay to bring centre of all DQ windows to the same "level". */
3006 center_dq_windows(1, left_edge, right_edge, mid_min, orig_mid_min,
3007 min_index, 0, &dq_margin, &dqs_margin);
3008
3009 /* Move DQS */
3010 scc_mgr_apply_group_dqs_io_and_oct_out1(write_group, new_dqs);
3011 writel(0, &sdr_scc_mgr->update);
3012
3013 /* Centre DM */
3014 debug_cond(DLEVEL == 2, "%s:%d write_center: DM\n", __func__, __LINE__);
3015
3016 /*
3017 * Set the left and right edge of each bit to an illegal value.
3018 * Use (iocfg->io_out1_delay_max + 1) as an illegal value.
3019 */
3020 left_edge[0] = iocfg->io_out1_delay_max + 1;
3021 right_edge[0] = iocfg->io_out1_delay_max + 1;
3022
3023 /* Search for the/part of the window with DM shift. */
3024 search_window(1, rank_bgn, write_group, &bgn_curr, &end_curr,
3025 &bgn_best, &end_best, &win_best, 0);
3026
3027 /* Reset DM delay chains to 0. */
3028 scc_mgr_apply_group_dm_out1_delay(0);
3029
3030 /*
3031 * Check to see if the current window nudges up aganist 0 delay.
3032 * If so we need to continue the search by shifting DQS otherwise DQS
3033 * search begins as a new search.
3034 */
3035 if (end_curr != 0) {
3036 bgn_curr = iocfg->io_out1_delay_max + 1;
3037 end_curr = iocfg->io_out1_delay_max + 1;
3038 }
3039
3040 /* Search for the/part of the window with DQS shifts. */
3041 search_window(0, rank_bgn, write_group, &bgn_curr, &end_curr,
3042 &bgn_best, &end_best, &win_best, new_dqs);
3043
3044 /* Assign left and right edge for cal and reporting. */
3045 left_edge[0] = -1 * bgn_best;
3046 right_edge[0] = end_best;
3047
3048 debug_cond(DLEVEL == 2, "%s:%d dm_calib: left=%d right=%d\n",
3049 __func__, __LINE__, left_edge[0], right_edge[0]);
3050
3051 /* Move DQS (back to orig). */
3052 scc_mgr_apply_group_dqs_io_and_oct_out1(write_group, new_dqs);
3053
3054 /* Move DM */
3055
3056 /* Find middle of window for the DM bit. */
3057 mid = (left_edge[0] - right_edge[0]) / 2;
3058
3059 /* Only move right, since we are not moving DQS/DQ. */
3060 if (mid < 0)
3061 mid = 0;
3062
3063 /* dm_marign should fail if we never find a window. */
3064 if (win_best == 0)
3065 dm_margin = -1;
3066 else
3067 dm_margin = left_edge[0] - mid;
3068
3069 scc_mgr_apply_group_dm_out1_delay(mid);
3070 writel(0, &sdr_scc_mgr->update);
3071
3072 debug_cond(DLEVEL == 2,
3073 "%s:%d dm_calib: left=%d right=%d mid=%d dm_margin=%d\n",
3074 __func__, __LINE__, left_edge[0], right_edge[0],
3075 mid, dm_margin);
3076 /* Export values. */
3077 gbl->fom_out += dq_margin + dqs_margin;
3078
3079 debug_cond(DLEVEL == 2,
3080 "%s:%d write_center: dq_margin=%d dqs_margin=%d dm_margin=%d\n",
3081 __func__, __LINE__, dq_margin, dqs_margin, dm_margin);
3082
3083 /*
3084 * Do not remove this line as it makes sure all of our
3085 * decisions have been applied.
3086 */
3087 writel(0, &sdr_scc_mgr->update);
3088
3089 if ((dq_margin < 0) || (dqs_margin < 0) || (dm_margin < 0))
3090 return -EINVAL;
3091
3092 return 0;
3093 }
3094
3095 /**
3096 * rw_mgr_mem_calibrate_writes() - Write Calibration Part One
3097 * @rank_bgn: Rank number
3098 * @group: Read/Write Group
3099 * @test_bgn: Rank at which the test begins
3100 *
3101 * Stage 2: Write Calibration Part One.
3102 *
3103 * This function implements UniPHY calibration Stage 2, as explained in
3104 * detail in Altera EMI_RM 2015.05.04 , "UniPHY Calibration Stages".
3105 */
3106 static int rw_mgr_mem_calibrate_writes(const u32 rank_bgn, const u32 group,
3107 const u32 test_bgn)
3108 {
3109 int ret;
3110
3111 /* Update info for sims */
3112 debug("%s:%d %u %u\n", __func__, __LINE__, group, test_bgn);
3113
3114 reg_file_set_group(group);
3115 reg_file_set_stage(CAL_STAGE_WRITES);
3116 reg_file_set_sub_stage(CAL_SUBSTAGE_WRITES_CENTER);
3117
3118 ret = rw_mgr_mem_calibrate_writes_center(rank_bgn, group, test_bgn);
3119 if (ret)
3120 set_failing_group_stage(group, CAL_STAGE_WRITES,
3121 CAL_SUBSTAGE_WRITES_CENTER);
3122
3123 return ret;
3124 }
3125
3126 /**
3127 * mem_precharge_and_activate() - Precharge all banks and activate
3128 *
3129 * Precharge all banks and activate row 0 in bank "000..." and bank "111...".
3130 */
3131 static void mem_precharge_and_activate(void)
3132 {
3133 int r;
3134
3135 for (r = 0; r < rwcfg->mem_number_of_ranks; r++) {
3136 /* Set rank. */
3137 set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_OFF);
3138
3139 /* Precharge all banks. */
3140 writel(rwcfg->precharge_all, SDR_PHYGRP_RWMGRGRP_ADDRESS |
3141 RW_MGR_RUN_SINGLE_GROUP_OFFSET);
3142
3143 writel(0x0F, &sdr_rw_load_mgr_regs->load_cntr0);
3144 writel(rwcfg->activate_0_and_1_wait1,
3145 &sdr_rw_load_jump_mgr_regs->load_jump_add0);
3146
3147 writel(0x0F, &sdr_rw_load_mgr_regs->load_cntr1);
3148 writel(rwcfg->activate_0_and_1_wait2,
3149 &sdr_rw_load_jump_mgr_regs->load_jump_add1);
3150
3151 /* Activate rows. */
3152 writel(rwcfg->activate_0_and_1, SDR_PHYGRP_RWMGRGRP_ADDRESS |
3153 RW_MGR_RUN_SINGLE_GROUP_OFFSET);
3154 }
3155 }
3156
3157 /**
3158 * mem_init_latency() - Configure memory RLAT and WLAT settings
3159 *
3160 * Configure memory RLAT and WLAT parameters.
3161 */
3162 static void mem_init_latency(void)
3163 {
3164 /*
3165 * For AV/CV, LFIFO is hardened and always runs at full rate
3166 * so max latency in AFI clocks, used here, is correspondingly
3167 * smaller.
3168 */
3169 const u32 max_latency = (1 << misccfg->max_latency_count_width) - 1;
3170 u32 rlat, wlat;
3171
3172 debug("%s:%d\n", __func__, __LINE__);
3173
3174 /*
3175 * Read in write latency.
3176 * WL for Hard PHY does not include additive latency.
3177 */
3178 wlat = readl(&data_mgr->t_wl_add);
3179 wlat += readl(&data_mgr->mem_t_add);
3180
3181 gbl->rw_wl_nop_cycles = wlat - 1;
3182
3183 /* Read in readl latency. */
3184 rlat = readl(&data_mgr->t_rl_add);
3185
3186 /* Set a pretty high read latency initially. */
3187 gbl->curr_read_lat = rlat + 16;
3188 if (gbl->curr_read_lat > max_latency)
3189 gbl->curr_read_lat = max_latency;
3190
3191 writel(gbl->curr_read_lat, &phy_mgr_cfg->phy_rlat);
3192
3193 /* Advertise write latency. */
3194 writel(wlat, &phy_mgr_cfg->afi_wlat);
3195 }
3196
3197 /**
3198 * @mem_skip_calibrate() - Set VFIFO and LFIFO to instant-on settings
3199 *
3200 * Set VFIFO and LFIFO to instant-on settings in skip calibration mode.
3201 */
3202 static void mem_skip_calibrate(void)
3203 {
3204 u32 vfifo_offset;
3205 u32 i, j, r;
3206
3207 debug("%s:%d\n", __func__, __LINE__);
3208 /* Need to update every shadow register set used by the interface */
3209 for (r = 0; r < rwcfg->mem_number_of_ranks;
3210 r += NUM_RANKS_PER_SHADOW_REG) {
3211 /*
3212 * Set output phase alignment settings appropriate for
3213 * skip calibration.
3214 */
3215 for (i = 0; i < rwcfg->mem_if_read_dqs_width; i++) {
3216 scc_mgr_set_dqs_en_phase(i, 0);
3217 if (iocfg->dll_chain_length == 6)
3218 scc_mgr_set_dqdqs_output_phase(i, 6);
3219 else
3220 scc_mgr_set_dqdqs_output_phase(i, 7);
3221 /*
3222 * Case:33398
3223 *
3224 * Write data arrives to the I/O two cycles before write
3225 * latency is reached (720 deg).
3226 * -> due to bit-slip in a/c bus
3227 * -> to allow board skew where dqs is longer than ck
3228 * -> how often can this happen!?
3229 * -> can claim back some ptaps for high freq
3230 * support if we can relax this, but i digress...
3231 *
3232 * The write_clk leads mem_ck by 90 deg
3233 * The minimum ptap of the OPA is 180 deg
3234 * Each ptap has (360 / IO_DLL_CHAIN_LENGH) deg of delay
3235 * The write_clk is always delayed by 2 ptaps
3236 *
3237 * Hence, to make DQS aligned to CK, we need to delay
3238 * DQS by:
3239 * (720 - 90 - 180 - 2) *
3240 * (360 / iocfg->dll_chain_length)
3241 *
3242 * Dividing the above by (360 / iocfg->dll_chain_length)
3243 * gives us the number of ptaps, which simplies to:
3244 *
3245 * (1.25 * iocfg->dll_chain_length - 2)
3246 */
3247 scc_mgr_set_dqdqs_output_phase(i,
3248 ((125 * iocfg->dll_chain_length) / 100) - 2);
3249 }
3250 writel(0xff, &sdr_scc_mgr->dqs_ena);
3251 writel(0xff, &sdr_scc_mgr->dqs_io_ena);
3252
3253 for (i = 0; i < rwcfg->mem_if_write_dqs_width; i++) {
3254 writel(i, SDR_PHYGRP_SCCGRP_ADDRESS |
3255 SCC_MGR_GROUP_COUNTER_OFFSET);
3256 }
3257 writel(0xff, &sdr_scc_mgr->dq_ena);
3258 writel(0xff, &sdr_scc_mgr->dm_ena);
3259 writel(0, &sdr_scc_mgr->update);
3260 }
3261
3262 /* Compensate for simulation model behaviour */
3263 for (i = 0; i < rwcfg->mem_if_read_dqs_width; i++) {
3264 scc_mgr_set_dqs_bus_in_delay(i, 10);
3265 scc_mgr_load_dqs(i);
3266 }
3267 writel(0, &sdr_scc_mgr->update);
3268
3269 /*
3270 * ArriaV has hard FIFOs that can only be initialized by incrementing
3271 * in sequencer.
3272 */
3273 vfifo_offset = misccfg->calib_vfifo_offset;
3274 for (j = 0; j < vfifo_offset; j++)
3275 writel(0xff, &phy_mgr_cmd->inc_vfifo_hard_phy);
3276 writel(0, &phy_mgr_cmd->fifo_reset);
3277
3278 /*
3279 * For Arria V and Cyclone V with hard LFIFO, we get the skip-cal
3280 * setting from generation-time constant.
3281 */
3282 gbl->curr_read_lat = misccfg->calib_lfifo_offset;
3283 writel(gbl->curr_read_lat, &phy_mgr_cfg->phy_rlat);
3284 }
3285
3286 /**
3287 * mem_calibrate() - Memory calibration entry point.
3288 *
3289 * Perform memory calibration.
3290 */
3291 static u32 mem_calibrate(void)
3292 {
3293 u32 i;
3294 u32 rank_bgn, sr;
3295 u32 write_group, write_test_bgn;
3296 u32 read_group, read_test_bgn;
3297 u32 run_groups, current_run;
3298 u32 failing_groups = 0;
3299 u32 group_failed = 0;
3300
3301 const u32 rwdqs_ratio = rwcfg->mem_if_read_dqs_width /
3302 rwcfg->mem_if_write_dqs_width;
3303
3304 debug("%s:%d\n", __func__, __LINE__);
3305
3306 /* Initialize the data settings */
3307 gbl->error_substage = CAL_SUBSTAGE_NIL;
3308 gbl->error_stage = CAL_STAGE_NIL;
3309 gbl->error_group = 0xff;
3310 gbl->fom_in = 0;
3311 gbl->fom_out = 0;
3312
3313 /* Initialize WLAT and RLAT. */
3314 mem_init_latency();
3315
3316 /* Initialize bit slips. */
3317 mem_precharge_and_activate();
3318
3319 for (i = 0; i < rwcfg->mem_if_read_dqs_width; i++) {
3320 writel(i, SDR_PHYGRP_SCCGRP_ADDRESS |
3321 SCC_MGR_GROUP_COUNTER_OFFSET);
3322 /* Only needed once to set all groups, pins, DQ, DQS, DM. */
3323 if (i == 0)
3324 scc_mgr_set_hhp_extras();
3325
3326 scc_set_bypass_mode(i);
3327 }
3328
3329 /* Calibration is skipped. */
3330 if ((dyn_calib_steps & CALIB_SKIP_ALL) == CALIB_SKIP_ALL) {
3331 /*
3332 * Set VFIFO and LFIFO to instant-on settings in skip
3333 * calibration mode.
3334 */
3335 mem_skip_calibrate();
3336
3337 /*
3338 * Do not remove this line as it makes sure all of our
3339 * decisions have been applied.
3340 */
3341 writel(0, &sdr_scc_mgr->update);
3342 return 1;
3343 }
3344
3345 /* Calibration is not skipped. */
3346 for (i = 0; i < NUM_CALIB_REPEAT; i++) {
3347 /*
3348 * Zero all delay chain/phase settings for all
3349 * groups and all shadow register sets.
3350 */
3351 scc_mgr_zero_all();
3352
3353 run_groups = ~0;
3354
3355 for (write_group = 0, write_test_bgn = 0; write_group
3356 < rwcfg->mem_if_write_dqs_width; write_group++,
3357 write_test_bgn += rwcfg->mem_dq_per_write_dqs) {
3358 /* Initialize the group failure */
3359 group_failed = 0;
3360
3361 current_run = run_groups & ((1 <<
3362 RW_MGR_NUM_DQS_PER_WRITE_GROUP) - 1);
3363 run_groups = run_groups >>
3364 RW_MGR_NUM_DQS_PER_WRITE_GROUP;
3365
3366 if (current_run == 0)
3367 continue;
3368
3369 writel(write_group, SDR_PHYGRP_SCCGRP_ADDRESS |
3370 SCC_MGR_GROUP_COUNTER_OFFSET);
3371 scc_mgr_zero_group(write_group, 0);
3372
3373 for (read_group = write_group * rwdqs_ratio,
3374 read_test_bgn = 0;
3375 read_group < (write_group + 1) * rwdqs_ratio;
3376 read_group++,
3377 read_test_bgn += rwcfg->mem_dq_per_read_dqs) {
3378 if (STATIC_CALIB_STEPS & CALIB_SKIP_VFIFO)
3379 continue;
3380
3381 /* Calibrate the VFIFO */
3382 if (rw_mgr_mem_calibrate_vfifo(read_group,
3383 read_test_bgn))
3384 continue;
3385
3386 if (!(gbl->phy_debug_mode_flags &
3387 PHY_DEBUG_SWEEP_ALL_GROUPS))
3388 return 0;
3389
3390 /* The group failed, we're done. */
3391 goto grp_failed;
3392 }
3393
3394 /* Calibrate the output side */
3395 for (rank_bgn = 0, sr = 0;
3396 rank_bgn < rwcfg->mem_number_of_ranks;
3397 rank_bgn += NUM_RANKS_PER_SHADOW_REG, sr++) {
3398 if (STATIC_CALIB_STEPS & CALIB_SKIP_WRITES)
3399 continue;
3400
3401 /* Not needed in quick mode! */
3402 if (STATIC_CALIB_STEPS &
3403 CALIB_SKIP_DELAY_SWEEPS)
3404 continue;
3405
3406 /* Calibrate WRITEs */
3407 if (!rw_mgr_mem_calibrate_writes(rank_bgn,
3408 write_group,
3409 write_test_bgn))
3410 continue;
3411
3412 group_failed = 1;
3413 if (!(gbl->phy_debug_mode_flags &
3414 PHY_DEBUG_SWEEP_ALL_GROUPS))
3415 return 0;
3416 }
3417
3418 /* Some group failed, we're done. */
3419 if (group_failed)
3420 goto grp_failed;
3421
3422 for (read_group = write_group * rwdqs_ratio,
3423 read_test_bgn = 0;
3424 read_group < (write_group + 1) * rwdqs_ratio;
3425 read_group++,
3426 read_test_bgn += rwcfg->mem_dq_per_read_dqs) {
3427 if (STATIC_CALIB_STEPS & CALIB_SKIP_WRITES)
3428 continue;
3429
3430 if (!rw_mgr_mem_calibrate_vfifo_end(read_group,
3431 read_test_bgn))
3432 continue;
3433
3434 if (!(gbl->phy_debug_mode_flags &
3435 PHY_DEBUG_SWEEP_ALL_GROUPS))
3436 return 0;
3437
3438 /* The group failed, we're done. */
3439 goto grp_failed;
3440 }
3441
3442 /* No group failed, continue as usual. */
3443 continue;
3444
3445 grp_failed: /* A group failed, increment the counter. */
3446 failing_groups++;
3447 }
3448
3449 /*
3450 * USER If there are any failing groups then report
3451 * the failure.
3452 */
3453 if (failing_groups != 0)
3454 return 0;
3455
3456 if (STATIC_CALIB_STEPS & CALIB_SKIP_LFIFO)
3457 continue;
3458
3459 /* Calibrate the LFIFO */
3460 if (!rw_mgr_mem_calibrate_lfifo())
3461 return 0;
3462 }
3463
3464 /*
3465 * Do not remove this line as it makes sure all of our decisions
3466 * have been applied.
3467 */
3468 writel(0, &sdr_scc_mgr->update);
3469 return 1;
3470 }
3471
3472 /**
3473 * run_mem_calibrate() - Perform memory calibration
3474 *
3475 * This function triggers the entire memory calibration procedure.
3476 */
3477 static int run_mem_calibrate(void)
3478 {
3479 int pass;
3480 u32 ctrl_cfg;
3481
3482 debug("%s:%d\n", __func__, __LINE__);
3483
3484 /* Reset pass/fail status shown on afi_cal_success/fail */
3485 writel(PHY_MGR_CAL_RESET, &phy_mgr_cfg->cal_status);
3486
3487 /* Stop tracking manager. */
3488 ctrl_cfg = readl(&sdr_ctrl->ctrl_cfg);
3489 writel(ctrl_cfg & ~SDR_CTRLGRP_CTRLCFG_DQSTRKEN_MASK,
3490 &sdr_ctrl->ctrl_cfg);
3491
3492 phy_mgr_initialize();
3493 rw_mgr_mem_initialize();
3494
3495 /* Perform the actual memory calibration. */
3496 pass = mem_calibrate();
3497
3498 mem_precharge_and_activate();
3499 writel(0, &phy_mgr_cmd->fifo_reset);
3500
3501 /* Handoff. */
3502 rw_mgr_mem_handoff();
3503 /*
3504 * In Hard PHY this is a 2-bit control:
3505 * 0: AFI Mux Select
3506 * 1: DDIO Mux Select
3507 */
3508 writel(0x2, &phy_mgr_cfg->mux_sel);
3509
3510 /* Start tracking manager. */
3511 writel(ctrl_cfg, &sdr_ctrl->ctrl_cfg);
3512
3513 return pass;
3514 }
3515
3516 /**
3517 * debug_mem_calibrate() - Report result of memory calibration
3518 * @pass: Value indicating whether calibration passed or failed
3519 *
3520 * This function reports the results of the memory calibration
3521 * and writes debug information into the register file.
3522 */
3523 static void debug_mem_calibrate(int pass)
3524 {
3525 u32 debug_info;
3526
3527 if (pass) {
3528 printf("%s: CALIBRATION PASSED\n", __FILE__);
3529
3530 gbl->fom_in /= 2;
3531 gbl->fom_out /= 2;
3532
3533 if (gbl->fom_in > 0xff)
3534 gbl->fom_in = 0xff;
3535
3536 if (gbl->fom_out > 0xff)
3537 gbl->fom_out = 0xff;
3538
3539 /* Update the FOM in the register file */
3540 debug_info = gbl->fom_in;
3541 debug_info |= gbl->fom_out << 8;
3542 writel(debug_info, &sdr_reg_file->fom);
3543
3544 writel(debug_info, &phy_mgr_cfg->cal_debug_info);
3545 writel(PHY_MGR_CAL_SUCCESS, &phy_mgr_cfg->cal_status);
3546 } else {
3547 printf("%s: CALIBRATION FAILED\n", __FILE__);
3548
3549 debug_info = gbl->error_stage;
3550 debug_info |= gbl->error_substage << 8;
3551 debug_info |= gbl->error_group << 16;
3552
3553 writel(debug_info, &sdr_reg_file->failing_stage);
3554 writel(debug_info, &phy_mgr_cfg->cal_debug_info);
3555 writel(PHY_MGR_CAL_FAIL, &phy_mgr_cfg->cal_status);
3556
3557 /* Update the failing group/stage in the register file */
3558 debug_info = gbl->error_stage;
3559 debug_info |= gbl->error_substage << 8;
3560 debug_info |= gbl->error_group << 16;
3561 writel(debug_info, &sdr_reg_file->failing_stage);
3562 }
3563
3564 printf("%s: Calibration complete\n", __FILE__);
3565 }
3566
3567 /**
3568 * hc_initialize_rom_data() - Initialize ROM data
3569 *
3570 * Initialize ROM data.
3571 */
3572 static void hc_initialize_rom_data(void)
3573 {
3574 unsigned int nelem = 0;
3575 const u32 *rom_init;
3576 u32 i, addr;
3577
3578 socfpga_get_seq_inst_init(&rom_init, &nelem);
3579 addr = SDR_PHYGRP_RWMGRGRP_ADDRESS | RW_MGR_INST_ROM_WRITE_OFFSET;
3580 for (i = 0; i < nelem; i++)
3581 writel(rom_init[i], addr + (i << 2));
3582
3583 socfpga_get_seq_ac_init(&rom_init, &nelem);
3584 addr = SDR_PHYGRP_RWMGRGRP_ADDRESS | RW_MGR_AC_ROM_WRITE_OFFSET;
3585 for (i = 0; i < nelem; i++)
3586 writel(rom_init[i], addr + (i << 2));
3587 }
3588
3589 /**
3590 * initialize_reg_file() - Initialize SDR register file
3591 *
3592 * Initialize SDR register file.
3593 */
3594 static void initialize_reg_file(void)
3595 {
3596 /* Initialize the register file with the correct data */
3597 writel(misccfg->reg_file_init_seq_signature, &sdr_reg_file->signature);
3598 writel(0, &sdr_reg_file->debug_data_addr);
3599 writel(0, &sdr_reg_file->cur_stage);
3600 writel(0, &sdr_reg_file->fom);
3601 writel(0, &sdr_reg_file->failing_stage);
3602 writel(0, &sdr_reg_file->debug1);
3603 writel(0, &sdr_reg_file->debug2);
3604 }
3605
3606 /**
3607 * initialize_hps_phy() - Initialize HPS PHY
3608 *
3609 * Initialize HPS PHY.
3610 */
3611 static void initialize_hps_phy(void)
3612 {
3613 u32 reg;
3614 /*
3615 * Tracking also gets configured here because it's in the
3616 * same register.
3617 */
3618 u32 trk_sample_count = 7500;
3619 u32 trk_long_idle_sample_count = (10 << 16) | 100;
3620 /*
3621 * Format is number of outer loops in the 16 MSB, sample
3622 * count in 16 LSB.
3623 */
3624
3625 reg = 0;
3626 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_ACDELAYEN_SET(2);
3627 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_DQDELAYEN_SET(1);
3628 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_DQSDELAYEN_SET(1);
3629 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_DQSLOGICDELAYEN_SET(1);
3630 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_RESETDELAYEN_SET(0);
3631 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_LPDDRDIS_SET(1);
3632 /*
3633 * This field selects the intrinsic latency to RDATA_EN/FULL path.
3634 * 00-bypass, 01- add 5 cycles, 10- add 10 cycles, 11- add 15 cycles.
3635 */
3636 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_ADDLATSEL_SET(0);
3637 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_SAMPLECOUNT_19_0_SET(
3638 trk_sample_count);
3639 writel(reg, &sdr_ctrl->phy_ctrl0);
3640
3641 reg = 0;
3642 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_1_SAMPLECOUNT_31_20_SET(
3643 trk_sample_count >>
3644 SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_SAMPLECOUNT_19_0_WIDTH);
3645 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_1_LONGIDLESAMPLECOUNT_19_0_SET(
3646 trk_long_idle_sample_count);
3647 writel(reg, &sdr_ctrl->phy_ctrl1);
3648
3649 reg = 0;
3650 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_2_LONGIDLESAMPLECOUNT_31_20_SET(
3651 trk_long_idle_sample_count >>
3652 SDR_CTRLGRP_PHYCTRL_PHYCTRL_1_LONGIDLESAMPLECOUNT_19_0_WIDTH);
3653 writel(reg, &sdr_ctrl->phy_ctrl2);
3654 }
3655
3656 /**
3657 * initialize_tracking() - Initialize tracking
3658 *
3659 * Initialize the register file with usable initial data.
3660 */
3661 static void initialize_tracking(void)
3662 {
3663 /*
3664 * Initialize the register file with the correct data.
3665 * Compute usable version of value in case we skip full
3666 * computation later.
3667 */
3668 writel(DIV_ROUND_UP(iocfg->delay_per_opa_tap,
3669 iocfg->delay_per_dchain_tap) - 1,
3670 &sdr_reg_file->dtaps_per_ptap);
3671
3672 /* trk_sample_count */
3673 writel(7500, &sdr_reg_file->trk_sample_count);
3674
3675 /* longidle outer loop [15:0] */
3676 writel((10 << 16) | (100 << 0), &sdr_reg_file->trk_longidle);
3677
3678 /*
3679 * longidle sample count [31:24]
3680 * trfc, worst case of 933Mhz 4Gb [23:16]
3681 * trcd, worst case [15:8]
3682 * vfifo wait [7:0]
3683 */
3684 writel((243 << 24) | (14 << 16) | (10 << 8) | (4 << 0),
3685 &sdr_reg_file->delays);
3686
3687 /* mux delay */
3688 writel((rwcfg->idle << 24) | (rwcfg->activate_1 << 16) |
3689 (rwcfg->sgle_read << 8) | (rwcfg->precharge_all << 0),
3690 &sdr_reg_file->trk_rw_mgr_addr);
3691
3692 writel(rwcfg->mem_if_read_dqs_width,
3693 &sdr_reg_file->trk_read_dqs_width);
3694
3695 /* trefi [7:0] */
3696 writel((rwcfg->refresh_all << 24) | (1000 << 0),
3697 &sdr_reg_file->trk_rfsh);
3698 }
3699
3700 int sdram_calibration_full(void)
3701 {
3702 struct param_type my_param;
3703 struct gbl_type my_gbl;
3704 u32 pass;
3705
3706 memset(&my_param, 0, sizeof(my_param));
3707 memset(&my_gbl, 0, sizeof(my_gbl));
3708
3709 param = &my_param;
3710 gbl = &my_gbl;
3711
3712 rwcfg = socfpga_get_sdram_rwmgr_config();
3713 iocfg = socfpga_get_sdram_io_config();
3714 misccfg = socfpga_get_sdram_misc_config();
3715
3716 /* Set the calibration enabled by default */
3717 gbl->phy_debug_mode_flags |= PHY_DEBUG_ENABLE_CAL_RPT;
3718 /*
3719 * Only sweep all groups (regardless of fail state) by default
3720 * Set enabled read test by default.
3721 */
3722 #if DISABLE_GUARANTEED_READ
3723 gbl->phy_debug_mode_flags |= PHY_DEBUG_DISABLE_GUARANTEED_READ;
3724 #endif
3725 /* Initialize the register file */
3726 initialize_reg_file();
3727
3728 /* Initialize any PHY CSR */
3729 initialize_hps_phy();
3730
3731 scc_mgr_initialize();
3732
3733 initialize_tracking();
3734
3735 printf("%s: Preparing to start memory calibration\n", __FILE__);
3736
3737 debug("%s:%d\n", __func__, __LINE__);
3738 debug_cond(DLEVEL == 1,
3739 "DDR3 FULL_RATE ranks=%u cs/dimm=%u dq/dqs=%u,%u vg/dqs=%u,%u ",
3740 rwcfg->mem_number_of_ranks, rwcfg->mem_number_of_cs_per_dimm,
3741 rwcfg->mem_dq_per_read_dqs, rwcfg->mem_dq_per_write_dqs,
3742 rwcfg->mem_virtual_groups_per_read_dqs,
3743 rwcfg->mem_virtual_groups_per_write_dqs);
3744 debug_cond(DLEVEL == 1,
3745 "dqs=%u,%u dq=%u dm=%u ptap_delay=%u dtap_delay=%u ",
3746 rwcfg->mem_if_read_dqs_width, rwcfg->mem_if_write_dqs_width,
3747 rwcfg->mem_data_width, rwcfg->mem_data_mask_width,
3748 iocfg->delay_per_opa_tap, iocfg->delay_per_dchain_tap);
3749 debug_cond(DLEVEL == 1, "dtap_dqsen_delay=%u, dll=%u",
3750 iocfg->delay_per_dqs_en_dchain_tap, iocfg->dll_chain_length);
3751 debug_cond(DLEVEL == 1,
3752 "max values: en_p=%u dqdqs_p=%u en_d=%u dqs_in_d=%u ",
3753 iocfg->dqs_en_phase_max, iocfg->dqdqs_out_phase_max,
3754 iocfg->dqs_en_delay_max, iocfg->dqs_in_delay_max);
3755 debug_cond(DLEVEL == 1, "io_in_d=%u io_out1_d=%u io_out2_d=%u ",
3756 iocfg->io_in_delay_max, iocfg->io_out1_delay_max,
3757 iocfg->io_out2_delay_max);
3758 debug_cond(DLEVEL == 1, "dqs_in_reserve=%u dqs_out_reserve=%u\n",
3759 iocfg->dqs_in_reserve, iocfg->dqs_out_reserve);
3760
3761 hc_initialize_rom_data();
3762
3763 /* update info for sims */
3764 reg_file_set_stage(CAL_STAGE_NIL);
3765 reg_file_set_group(0);
3766
3767 /*
3768 * Load global needed for those actions that require
3769 * some dynamic calibration support.
3770 */
3771 dyn_calib_steps = STATIC_CALIB_STEPS;
3772 /*
3773 * Load global to allow dynamic selection of delay loop settings
3774 * based on calibration mode.
3775 */
3776 if (!(dyn_calib_steps & CALIB_SKIP_DELAY_LOOPS))
3777 skip_delay_mask = 0xff;
3778 else
3779 skip_delay_mask = 0x0;
3780
3781 pass = run_mem_calibrate();
3782 debug_mem_calibrate(pass);
3783 return pass;
3784 }