2 * Copyright Altera Corporation (C) 2012-2015
4 * SPDX-License-Identifier: BSD-3-Clause
9 #include <asm/arch/sdram.h>
11 #include "sequencer.h"
12 #include "sequencer_auto.h"
13 #include "sequencer_auto_ac_init.h"
14 #include "sequencer_auto_inst_init.h"
15 #include "sequencer_defines.h"
17 static struct socfpga_sdr_rw_load_manager
*sdr_rw_load_mgr_regs
=
18 (struct socfpga_sdr_rw_load_manager
*)(SDR_PHYGRP_RWMGRGRP_ADDRESS
| 0x800);
20 static struct socfpga_sdr_rw_load_jump_manager
*sdr_rw_load_jump_mgr_regs
=
21 (struct socfpga_sdr_rw_load_jump_manager
*)(SDR_PHYGRP_RWMGRGRP_ADDRESS
| 0xC00);
23 static struct socfpga_sdr_reg_file
*sdr_reg_file
=
24 (struct socfpga_sdr_reg_file
*)SDR_PHYGRP_REGFILEGRP_ADDRESS
;
26 static struct socfpga_sdr_scc_mgr
*sdr_scc_mgr
=
27 (struct socfpga_sdr_scc_mgr
*)(SDR_PHYGRP_SCCGRP_ADDRESS
| 0xe00);
29 static struct socfpga_phy_mgr_cmd
*phy_mgr_cmd
=
30 (struct socfpga_phy_mgr_cmd
*)SDR_PHYGRP_PHYMGRGRP_ADDRESS
;
32 static struct socfpga_phy_mgr_cfg
*phy_mgr_cfg
=
33 (struct socfpga_phy_mgr_cfg
*)(SDR_PHYGRP_PHYMGRGRP_ADDRESS
| 0x40);
35 static struct socfpga_data_mgr
*data_mgr
=
36 (struct socfpga_data_mgr
*)SDR_PHYGRP_DATAMGRGRP_ADDRESS
;
38 static struct socfpga_sdr_ctrl
*sdr_ctrl
=
39 (struct socfpga_sdr_ctrl
*)SDR_CTRLGRP_ADDRESS
;
44 * In order to reduce ROM size, most of the selectable calibration steps are
45 * decided at compile time based on the user's calibration mode selection,
46 * as captured by the STATIC_CALIB_STEPS selection below.
48 * However, to support simulation-time selection of fast simulation mode, where
49 * we skip everything except the bare minimum, we need a few of the steps to
50 * be dynamic. In those cases, we either use the DYNAMIC_CALIB_STEPS for the
51 * check, which is based on the rtl-supplied value, or we dynamically compute
52 * the value to use based on the dynamically-chosen calibration mode
56 #define STATIC_IN_RTL_SIM 0
57 #define STATIC_SKIP_DELAY_LOOPS 0
59 #define STATIC_CALIB_STEPS (STATIC_IN_RTL_SIM | CALIB_SKIP_FULL_TEST | \
60 STATIC_SKIP_DELAY_LOOPS)
62 /* calibration steps requested by the rtl */
63 uint16_t dyn_calib_steps
;
66 * To make CALIB_SKIP_DELAY_LOOPS a dynamic conditional option
67 * instead of static, we use boolean logic to select between
68 * non-skip and skip values
70 * The mask is set to include all bits when not-skipping, but is
74 uint16_t skip_delay_mask
; /* mask off bits when skipping/not-skipping */
76 #define SKIP_DELAY_LOOP_VALUE_OR_ZERO(non_skip_value) \
77 ((non_skip_value) & skip_delay_mask)
80 struct param_type
*param
;
81 uint32_t curr_shadow_reg
;
83 static uint32_t rw_mgr_mem_calibrate_write_test(uint32_t rank_bgn
,
84 uint32_t write_group
, uint32_t use_dm
,
85 uint32_t all_correct
, uint32_t *bit_chk
, uint32_t all_ranks
);
87 static void set_failing_group_stage(uint32_t group
, uint32_t stage
,
91 * Only set the global stage if there was not been any other
94 if (gbl
->error_stage
== CAL_STAGE_NIL
) {
95 gbl
->error_substage
= substage
;
96 gbl
->error_stage
= stage
;
97 gbl
->error_group
= group
;
101 static void reg_file_set_group(u16 set_group
)
103 clrsetbits_le32(&sdr_reg_file
->cur_stage
, 0xffff0000, set_group
<< 16);
106 static void reg_file_set_stage(u8 set_stage
)
108 clrsetbits_le32(&sdr_reg_file
->cur_stage
, 0xffff, set_stage
& 0xff);
111 static void reg_file_set_sub_stage(u8 set_sub_stage
)
113 set_sub_stage
&= 0xff;
114 clrsetbits_le32(&sdr_reg_file
->cur_stage
, 0xff00, set_sub_stage
<< 8);
118 * phy_mgr_initialize() - Initialize PHY Manager
120 * Initialize PHY Manager.
122 static void phy_mgr_initialize(void)
126 debug("%s:%d\n", __func__
, __LINE__
);
127 /* Calibration has control over path to memory */
129 * In Hard PHY this is a 2-bit control:
133 writel(0x3, &phy_mgr_cfg
->mux_sel
);
135 /* USER memory clock is not stable we begin initialization */
136 writel(0, &phy_mgr_cfg
->reset_mem_stbl
);
138 /* USER calibration status all set to zero */
139 writel(0, &phy_mgr_cfg
->cal_status
);
141 writel(0, &phy_mgr_cfg
->cal_debug_info
);
143 /* Init params only if we do NOT skip calibration. */
144 if ((dyn_calib_steps
& CALIB_SKIP_ALL
) == CALIB_SKIP_ALL
)
147 ratio
= RW_MGR_MEM_DQ_PER_READ_DQS
/
148 RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS
;
149 param
->read_correct_mask_vg
= (1 << ratio
) - 1;
150 param
->write_correct_mask_vg
= (1 << ratio
) - 1;
151 param
->read_correct_mask
= (1 << RW_MGR_MEM_DQ_PER_READ_DQS
) - 1;
152 param
->write_correct_mask
= (1 << RW_MGR_MEM_DQ_PER_WRITE_DQS
) - 1;
153 ratio
= RW_MGR_MEM_DATA_WIDTH
/
154 RW_MGR_MEM_DATA_MASK_WIDTH
;
155 param
->dm_correct_mask
= (1 << ratio
) - 1;
159 * set_rank_and_odt_mask() - Set Rank and ODT mask
161 * @odt_mode: ODT mode, OFF or READ_WRITE
163 * Set Rank and ODT mask (On-Die Termination).
165 static void set_rank_and_odt_mask(const u32 rank
, const u32 odt_mode
)
171 if (odt_mode
== RW_MGR_ODT_MODE_OFF
) {
174 } else { /* RW_MGR_ODT_MODE_READ_WRITE */
175 switch (RW_MGR_MEM_NUMBER_OF_RANKS
) {
177 /* Read: ODT = 0 ; Write: ODT = 1 */
181 case 2: /* 2 Ranks */
182 if (RW_MGR_MEM_NUMBER_OF_CS_PER_DIMM
== 1) {
184 * - Dual-Slot , Single-Rank (1 CS per DIMM)
186 * - RDIMM, 4 total CS (2 CS per DIMM, 2 DIMM)
188 * Since MEM_NUMBER_OF_RANKS is 2, they
189 * are both single rank with 2 CS each
190 * (special for RDIMM).
192 * Read: Turn on ODT on the opposite rank
193 * Write: Turn on ODT on all ranks
195 odt_mask_0
= 0x3 & ~(1 << rank
);
199 * - Single-Slot , Dual-Rank (2 CS per DIMM)
201 * Read: Turn on ODT off on all ranks
202 * Write: Turn on ODT on active rank
205 odt_mask_1
= 0x3 & (1 << rank
);
208 case 4: /* 4 Ranks */
210 * ----------+-----------------------+
212 * Read From +-----------------------+
213 * Rank | 3 | 2 | 1 | 0 |
214 * ----------+-----+-----+-----+-----+
215 * 0 | 0 | 1 | 0 | 0 |
216 * 1 | 1 | 0 | 0 | 0 |
217 * 2 | 0 | 0 | 0 | 1 |
218 * 3 | 0 | 0 | 1 | 0 |
219 * ----------+-----+-----+-----+-----+
222 * ----------+-----------------------+
224 * Write To +-----------------------+
225 * Rank | 3 | 2 | 1 | 0 |
226 * ----------+-----+-----+-----+-----+
227 * 0 | 0 | 1 | 0 | 1 |
228 * 1 | 1 | 0 | 1 | 0 |
229 * 2 | 0 | 1 | 0 | 1 |
230 * 3 | 1 | 0 | 1 | 0 |
231 * ----------+-----+-----+-----+-----+
255 cs_and_odt_mask
= (0xFF & ~(1 << rank
)) |
256 ((0xFF & odt_mask_0
) << 8) |
257 ((0xFF & odt_mask_1
) << 16);
258 writel(cs_and_odt_mask
, SDR_PHYGRP_RWMGRGRP_ADDRESS
|
259 RW_MGR_SET_CS_AND_ODT_MASK_OFFSET
);
263 * scc_mgr_set() - Set SCC Manager register
264 * @off: Base offset in SCC Manager space
265 * @grp: Read/Write group
266 * @val: Value to be set
268 * This function sets the SCC Manager (Scan Chain Control Manager) register.
270 static void scc_mgr_set(u32 off
, u32 grp
, u32 val
)
272 writel(val
, SDR_PHYGRP_SCCGRP_ADDRESS
| off
| (grp
<< 2));
276 * scc_mgr_initialize() - Initialize SCC Manager registers
278 * Initialize SCC Manager registers.
280 static void scc_mgr_initialize(void)
283 * Clear register file for HPS. 16 (2^4) is the size of the
284 * full register file in the scc mgr:
285 * RFILE_DEPTH = 1 + log2(MEM_DQ_PER_DQS + 1 + MEM_DM_PER_DQS +
286 * MEM_IF_READ_DQS_WIDTH - 1);
290 for (i
= 0; i
< 16; i
++) {
291 debug_cond(DLEVEL
== 1, "%s:%d: Clearing SCC RFILE index %u\n",
292 __func__
, __LINE__
, i
);
293 scc_mgr_set(SCC_MGR_HHP_RFILE_OFFSET
, 0, i
);
297 static void scc_mgr_set_dqdqs_output_phase(uint32_t write_group
, uint32_t phase
)
299 scc_mgr_set(SCC_MGR_DQDQS_OUT_PHASE_OFFSET
, write_group
, phase
);
302 static void scc_mgr_set_dqs_bus_in_delay(uint32_t read_group
, uint32_t delay
)
304 scc_mgr_set(SCC_MGR_DQS_IN_DELAY_OFFSET
, read_group
, delay
);
307 static void scc_mgr_set_dqs_en_phase(uint32_t read_group
, uint32_t phase
)
309 scc_mgr_set(SCC_MGR_DQS_EN_PHASE_OFFSET
, read_group
, phase
);
312 static void scc_mgr_set_dqs_en_delay(uint32_t read_group
, uint32_t delay
)
314 scc_mgr_set(SCC_MGR_DQS_EN_DELAY_OFFSET
, read_group
, delay
);
317 static void scc_mgr_set_dqs_io_in_delay(uint32_t delay
)
319 scc_mgr_set(SCC_MGR_IO_IN_DELAY_OFFSET
, RW_MGR_MEM_DQ_PER_WRITE_DQS
,
323 static void scc_mgr_set_dq_in_delay(uint32_t dq_in_group
, uint32_t delay
)
325 scc_mgr_set(SCC_MGR_IO_IN_DELAY_OFFSET
, dq_in_group
, delay
);
328 static void scc_mgr_set_dq_out1_delay(uint32_t dq_in_group
, uint32_t delay
)
330 scc_mgr_set(SCC_MGR_IO_OUT1_DELAY_OFFSET
, dq_in_group
, delay
);
333 static void scc_mgr_set_dqs_out1_delay(uint32_t delay
)
335 scc_mgr_set(SCC_MGR_IO_OUT1_DELAY_OFFSET
, RW_MGR_MEM_DQ_PER_WRITE_DQS
,
339 static void scc_mgr_set_dm_out1_delay(uint32_t dm
, uint32_t delay
)
341 scc_mgr_set(SCC_MGR_IO_OUT1_DELAY_OFFSET
,
342 RW_MGR_MEM_DQ_PER_WRITE_DQS
+ 1 + dm
,
346 /* load up dqs config settings */
347 static void scc_mgr_load_dqs(uint32_t dqs
)
349 writel(dqs
, &sdr_scc_mgr
->dqs_ena
);
352 /* load up dqs io config settings */
353 static void scc_mgr_load_dqs_io(void)
355 writel(0, &sdr_scc_mgr
->dqs_io_ena
);
358 /* load up dq config settings */
359 static void scc_mgr_load_dq(uint32_t dq_in_group
)
361 writel(dq_in_group
, &sdr_scc_mgr
->dq_ena
);
364 /* load up dm config settings */
365 static void scc_mgr_load_dm(uint32_t dm
)
367 writel(dm
, &sdr_scc_mgr
->dm_ena
);
371 * scc_mgr_set_all_ranks() - Set SCC Manager register for all ranks
372 * @off: Base offset in SCC Manager space
373 * @grp: Read/Write group
374 * @val: Value to be set
375 * @update: If non-zero, trigger SCC Manager update for all ranks
377 * This function sets the SCC Manager (Scan Chain Control Manager) register
378 * and optionally triggers the SCC update for all ranks.
380 static void scc_mgr_set_all_ranks(const u32 off
, const u32 grp
, const u32 val
,
385 for (r
= 0; r
< RW_MGR_MEM_NUMBER_OF_RANKS
;
386 r
+= NUM_RANKS_PER_SHADOW_REG
) {
387 scc_mgr_set(off
, grp
, val
);
389 if (update
|| (r
== 0)) {
390 writel(grp
, &sdr_scc_mgr
->dqs_ena
);
391 writel(0, &sdr_scc_mgr
->update
);
396 static void scc_mgr_set_dqs_en_phase_all_ranks(u32 read_group
, u32 phase
)
399 * USER although the h/w doesn't support different phases per
400 * shadow register, for simplicity our scc manager modeling
401 * keeps different phase settings per shadow reg, and it's
402 * important for us to keep them in sync to match h/w.
403 * for efficiency, the scan chain update should occur only
406 scc_mgr_set_all_ranks(SCC_MGR_DQS_EN_PHASE_OFFSET
,
407 read_group
, phase
, 0);
410 static void scc_mgr_set_dqdqs_output_phase_all_ranks(uint32_t write_group
,
414 * USER although the h/w doesn't support different phases per
415 * shadow register, for simplicity our scc manager modeling
416 * keeps different phase settings per shadow reg, and it's
417 * important for us to keep them in sync to match h/w.
418 * for efficiency, the scan chain update should occur only
421 scc_mgr_set_all_ranks(SCC_MGR_DQDQS_OUT_PHASE_OFFSET
,
422 write_group
, phase
, 0);
425 static void scc_mgr_set_dqs_en_delay_all_ranks(uint32_t read_group
,
429 * In shadow register mode, the T11 settings are stored in
430 * registers in the core, which are updated by the DQS_ENA
431 * signals. Not issuing the SCC_MGR_UPD command allows us to
432 * save lots of rank switching overhead, by calling
433 * select_shadow_regs_for_update with update_scan_chains
436 scc_mgr_set_all_ranks(SCC_MGR_DQS_EN_DELAY_OFFSET
,
437 read_group
, delay
, 1);
438 writel(0, &sdr_scc_mgr
->update
);
442 * scc_mgr_set_oct_out1_delay() - Set OCT output delay
443 * @write_group: Write group
444 * @delay: Delay value
446 * This function sets the OCT output delay in SCC manager.
448 static void scc_mgr_set_oct_out1_delay(const u32 write_group
, const u32 delay
)
450 const int ratio
= RW_MGR_MEM_IF_READ_DQS_WIDTH
/
451 RW_MGR_MEM_IF_WRITE_DQS_WIDTH
;
452 const int base
= write_group
* ratio
;
455 * Load the setting in the SCC manager
456 * Although OCT affects only write data, the OCT delay is controlled
457 * by the DQS logic block which is instantiated once per read group.
458 * For protocols where a write group consists of multiple read groups,
459 * the setting must be set multiple times.
461 for (i
= 0; i
< ratio
; i
++)
462 scc_mgr_set(SCC_MGR_OCT_OUT1_DELAY_OFFSET
, base
+ i
, delay
);
466 * scc_mgr_set_hhp_extras() - Set HHP extras.
468 * Load the fixed setting in the SCC manager HHP extras.
470 static void scc_mgr_set_hhp_extras(void)
473 * Load the fixed setting in the SCC manager
474 * bits: 0:0 = 1'b1 - DQS bypass
475 * bits: 1:1 = 1'b1 - DQ bypass
476 * bits: 4:2 = 3'b001 - rfifo_mode
477 * bits: 6:5 = 2'b01 - rfifo clock_select
478 * bits: 7:7 = 1'b0 - separate gating from ungating setting
479 * bits: 8:8 = 1'b0 - separate OE from Output delay setting
481 const u32 value
= (0 << 8) | (0 << 7) | (1 << 5) |
482 (1 << 2) | (1 << 1) | (1 << 0);
483 const u32 addr
= SDR_PHYGRP_SCCGRP_ADDRESS
|
484 SCC_MGR_HHP_GLOBALS_OFFSET
|
485 SCC_MGR_HHP_EXTRAS_OFFSET
;
487 debug_cond(DLEVEL
== 1, "%s:%d Setting HHP Extras\n",
490 debug_cond(DLEVEL
== 1, "%s:%d Done Setting HHP Extras\n",
495 * scc_mgr_zero_all() - Zero all DQS config
497 * Zero all DQS config.
499 static void scc_mgr_zero_all(void)
504 * USER Zero all DQS config settings, across all groups and all
507 for (r
= 0; r
< RW_MGR_MEM_NUMBER_OF_RANKS
;
508 r
+= NUM_RANKS_PER_SHADOW_REG
) {
509 for (i
= 0; i
< RW_MGR_MEM_IF_READ_DQS_WIDTH
; i
++) {
511 * The phases actually don't exist on a per-rank basis,
512 * but there's no harm updating them several times, so
513 * let's keep the code simple.
515 scc_mgr_set_dqs_bus_in_delay(i
, IO_DQS_IN_RESERVE
);
516 scc_mgr_set_dqs_en_phase(i
, 0);
517 scc_mgr_set_dqs_en_delay(i
, 0);
520 for (i
= 0; i
< RW_MGR_MEM_IF_WRITE_DQS_WIDTH
; i
++) {
521 scc_mgr_set_dqdqs_output_phase(i
, 0);
522 /* Arria V/Cyclone V don't have out2. */
523 scc_mgr_set_oct_out1_delay(i
, IO_DQS_OUT_RESERVE
);
527 /* Multicast to all DQS group enables. */
528 writel(0xff, &sdr_scc_mgr
->dqs_ena
);
529 writel(0, &sdr_scc_mgr
->update
);
533 * scc_set_bypass_mode() - Set bypass mode and trigger SCC update
534 * @write_group: Write group
536 * Set bypass mode and trigger SCC update.
538 static void scc_set_bypass_mode(const u32 write_group
)
540 /* Multicast to all DQ enables. */
541 writel(0xff, &sdr_scc_mgr
->dq_ena
);
542 writel(0xff, &sdr_scc_mgr
->dm_ena
);
544 /* Update current DQS IO enable. */
545 writel(0, &sdr_scc_mgr
->dqs_io_ena
);
547 /* Update the DQS logic. */
548 writel(write_group
, &sdr_scc_mgr
->dqs_ena
);
551 writel(0, &sdr_scc_mgr
->update
);
555 * scc_mgr_load_dqs_for_write_group() - Load DQS settings for Write Group
556 * @write_group: Write group
558 * Load DQS settings for Write Group, do not trigger SCC update.
560 static void scc_mgr_load_dqs_for_write_group(const u32 write_group
)
562 const int ratio
= RW_MGR_MEM_IF_READ_DQS_WIDTH
/
563 RW_MGR_MEM_IF_WRITE_DQS_WIDTH
;
564 const int base
= write_group
* ratio
;
567 * Load the setting in the SCC manager
568 * Although OCT affects only write data, the OCT delay is controlled
569 * by the DQS logic block which is instantiated once per read group.
570 * For protocols where a write group consists of multiple read groups,
571 * the setting must be set multiple times.
573 for (i
= 0; i
< ratio
; i
++)
574 writel(base
+ i
, &sdr_scc_mgr
->dqs_ena
);
578 * scc_mgr_zero_group() - Zero all configs for a group
580 * Zero DQ, DM, DQS and OCT configs for a group.
582 static void scc_mgr_zero_group(const u32 write_group
, const int out_only
)
586 for (r
= 0; r
< RW_MGR_MEM_NUMBER_OF_RANKS
;
587 r
+= NUM_RANKS_PER_SHADOW_REG
) {
588 /* Zero all DQ config settings. */
589 for (i
= 0; i
< RW_MGR_MEM_DQ_PER_WRITE_DQS
; i
++) {
590 scc_mgr_set_dq_out1_delay(i
, 0);
592 scc_mgr_set_dq_in_delay(i
, 0);
595 /* Multicast to all DQ enables. */
596 writel(0xff, &sdr_scc_mgr
->dq_ena
);
598 /* Zero all DM config settings. */
599 for (i
= 0; i
< RW_MGR_NUM_DM_PER_WRITE_GROUP
; i
++)
600 scc_mgr_set_dm_out1_delay(i
, 0);
602 /* Multicast to all DM enables. */
603 writel(0xff, &sdr_scc_mgr
->dm_ena
);
605 /* Zero all DQS IO settings. */
607 scc_mgr_set_dqs_io_in_delay(0);
609 /* Arria V/Cyclone V don't have out2. */
610 scc_mgr_set_dqs_out1_delay(IO_DQS_OUT_RESERVE
);
611 scc_mgr_set_oct_out1_delay(write_group
, IO_DQS_OUT_RESERVE
);
612 scc_mgr_load_dqs_for_write_group(write_group
);
614 /* Multicast to all DQS IO enables (only 1 in total). */
615 writel(0, &sdr_scc_mgr
->dqs_io_ena
);
617 /* Hit update to zero everything. */
618 writel(0, &sdr_scc_mgr
->update
);
623 * apply and load a particular input delay for the DQ pins in a group
624 * group_bgn is the index of the first dq pin (in the write group)
626 static void scc_mgr_apply_group_dq_in_delay(uint32_t group_bgn
, uint32_t delay
)
630 for (i
= 0, p
= group_bgn
; i
< RW_MGR_MEM_DQ_PER_READ_DQS
; i
++, p
++) {
631 scc_mgr_set_dq_in_delay(p
, delay
);
637 * scc_mgr_apply_group_dq_out1_delay() - Apply and load an output delay for the DQ pins in a group
638 * @delay: Delay value
640 * Apply and load a particular output delay for the DQ pins in a group.
642 static void scc_mgr_apply_group_dq_out1_delay(const u32 delay
)
646 for (i
= 0; i
< RW_MGR_MEM_DQ_PER_WRITE_DQS
; i
++) {
647 scc_mgr_set_dq_out1_delay(i
, delay
);
652 /* apply and load a particular output delay for the DM pins in a group */
653 static void scc_mgr_apply_group_dm_out1_delay(uint32_t delay1
)
657 for (i
= 0; i
< RW_MGR_NUM_DM_PER_WRITE_GROUP
; i
++) {
658 scc_mgr_set_dm_out1_delay(i
, delay1
);
664 /* apply and load delay on both DQS and OCT out1 */
665 static void scc_mgr_apply_group_dqs_io_and_oct_out1(uint32_t write_group
,
668 scc_mgr_set_dqs_out1_delay(delay
);
669 scc_mgr_load_dqs_io();
671 scc_mgr_set_oct_out1_delay(write_group
, delay
);
672 scc_mgr_load_dqs_for_write_group(write_group
);
676 * scc_mgr_apply_group_all_out_delay_add() - Apply a delay to the entire output side: DQ, DM, DQS, OCT
677 * @write_group: Write group
678 * @delay: Delay value
680 * Apply a delay to the entire output side: DQ, DM, DQS, OCT.
682 static void scc_mgr_apply_group_all_out_delay_add(const u32 write_group
,
688 for (i
= 0; i
< RW_MGR_MEM_DQ_PER_WRITE_DQS
; i
++)
692 for (i
= 0; i
< RW_MGR_NUM_DM_PER_WRITE_GROUP
; i
++)
696 new_delay
= READ_SCC_DQS_IO_OUT2_DELAY
+ delay
;
697 if (new_delay
> IO_IO_OUT2_DELAY_MAX
) {
698 debug_cond(DLEVEL
== 1,
699 "%s:%d (%u, %u) DQS: %u > %d; adding %u to OUT1\n",
700 __func__
, __LINE__
, write_group
, delay
, new_delay
,
701 IO_IO_OUT2_DELAY_MAX
,
702 new_delay
- IO_IO_OUT2_DELAY_MAX
);
703 new_delay
-= IO_IO_OUT2_DELAY_MAX
;
704 scc_mgr_set_dqs_out1_delay(new_delay
);
707 scc_mgr_load_dqs_io();
710 new_delay
= READ_SCC_OCT_OUT2_DELAY
+ delay
;
711 if (new_delay
> IO_IO_OUT2_DELAY_MAX
) {
712 debug_cond(DLEVEL
== 1,
713 "%s:%d (%u, %u) DQS: %u > %d; adding %u to OUT1\n",
714 __func__
, __LINE__
, write_group
, delay
,
715 new_delay
, IO_IO_OUT2_DELAY_MAX
,
716 new_delay
- IO_IO_OUT2_DELAY_MAX
);
717 new_delay
-= IO_IO_OUT2_DELAY_MAX
;
718 scc_mgr_set_oct_out1_delay(write_group
, new_delay
);
721 scc_mgr_load_dqs_for_write_group(write_group
);
725 * scc_mgr_apply_group_all_out_delay_add() - Apply a delay to the entire output side to all ranks
726 * @write_group: Write group
727 * @delay: Delay value
729 * Apply a delay to the entire output side (DQ, DM, DQS, OCT) to all ranks.
732 scc_mgr_apply_group_all_out_delay_add_all_ranks(const u32 write_group
,
737 for (r
= 0; r
< RW_MGR_MEM_NUMBER_OF_RANKS
;
738 r
+= NUM_RANKS_PER_SHADOW_REG
) {
739 scc_mgr_apply_group_all_out_delay_add(write_group
, delay
);
740 writel(0, &sdr_scc_mgr
->update
);
745 * set_jump_as_return() - Return instruction optimization
747 * Optimization used to recover some slots in ddr3 inst_rom could be
748 * applied to other protocols if we wanted to
750 static void set_jump_as_return(void)
753 * To save space, we replace return with jump to special shared
754 * RETURN instruction so we set the counter to large value so that
757 writel(0xff, &sdr_rw_load_mgr_regs
->load_cntr0
);
758 writel(RW_MGR_RETURN
, &sdr_rw_load_jump_mgr_regs
->load_jump_add0
);
762 * should always use constants as argument to ensure all computations are
763 * performed at compile time
765 static void delay_for_n_mem_clocks(const uint32_t clocks
)
772 debug("%s:%d: clocks=%u ... start\n", __func__
, __LINE__
, clocks
);
775 afi_clocks
= (clocks
+ AFI_RATE_RATIO
-1) / AFI_RATE_RATIO
;
776 /* scale (rounding up) to get afi clocks */
779 * Note, we don't bother accounting for being off a little bit
780 * because of a few extra instructions in outer loops
781 * Note, the loops have a test at the end, and do the test before
782 * the decrement, and so always perform the loop
783 * 1 time more than the counter value
785 if (afi_clocks
== 0) {
787 } else if (afi_clocks
<= 0x100) {
788 inner
= afi_clocks
-1;
791 } else if (afi_clocks
<= 0x10000) {
793 outer
= (afi_clocks
-1) >> 8;
798 c_loop
= (afi_clocks
-1) >> 16;
802 * rom instructions are structured as follows:
804 * IDLE_LOOP2: jnz cntr0, TARGET_A
805 * IDLE_LOOP1: jnz cntr1, TARGET_B
808 * so, when doing nested loops, TARGET_A is set to IDLE_LOOP2, and
809 * TARGET_B is set to IDLE_LOOP2 as well
811 * if we have no outer loop, though, then we can use IDLE_LOOP1 only,
812 * and set TARGET_B to IDLE_LOOP1 and we skip IDLE_LOOP2 entirely
814 * a little confusing, but it helps save precious space in the inst_rom
815 * and sequencer rom and keeps the delays more accurate and reduces
818 if (afi_clocks
<= 0x100) {
819 writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(inner
),
820 &sdr_rw_load_mgr_regs
->load_cntr1
);
822 writel(RW_MGR_IDLE_LOOP1
,
823 &sdr_rw_load_jump_mgr_regs
->load_jump_add1
);
825 writel(RW_MGR_IDLE_LOOP1
, SDR_PHYGRP_RWMGRGRP_ADDRESS
|
826 RW_MGR_RUN_SINGLE_GROUP_OFFSET
);
828 writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(inner
),
829 &sdr_rw_load_mgr_regs
->load_cntr0
);
831 writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(outer
),
832 &sdr_rw_load_mgr_regs
->load_cntr1
);
834 writel(RW_MGR_IDLE_LOOP2
,
835 &sdr_rw_load_jump_mgr_regs
->load_jump_add0
);
837 writel(RW_MGR_IDLE_LOOP2
,
838 &sdr_rw_load_jump_mgr_regs
->load_jump_add1
);
840 /* hack to get around compiler not being smart enough */
841 if (afi_clocks
<= 0x10000) {
842 /* only need to run once */
843 writel(RW_MGR_IDLE_LOOP2
, SDR_PHYGRP_RWMGRGRP_ADDRESS
|
844 RW_MGR_RUN_SINGLE_GROUP_OFFSET
);
847 writel(RW_MGR_IDLE_LOOP2
,
848 SDR_PHYGRP_RWMGRGRP_ADDRESS
|
849 RW_MGR_RUN_SINGLE_GROUP_OFFSET
);
850 } while (c_loop
-- != 0);
853 debug("%s:%d clocks=%u ... end\n", __func__
, __LINE__
, clocks
);
857 * rw_mgr_mem_init_load_regs() - Load instruction registers
858 * @cntr0: Counter 0 value
859 * @cntr1: Counter 1 value
860 * @cntr2: Counter 2 value
861 * @jump: Jump instruction value
863 * Load instruction registers.
865 static void rw_mgr_mem_init_load_regs(u32 cntr0
, u32 cntr1
, u32 cntr2
, u32 jump
)
867 uint32_t grpaddr
= SDR_PHYGRP_RWMGRGRP_ADDRESS
|
868 RW_MGR_RUN_SINGLE_GROUP_OFFSET
;
871 writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(cntr0
),
872 &sdr_rw_load_mgr_regs
->load_cntr0
);
873 writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(cntr1
),
874 &sdr_rw_load_mgr_regs
->load_cntr1
);
875 writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(cntr2
),
876 &sdr_rw_load_mgr_regs
->load_cntr2
);
878 /* Load jump address */
879 writel(jump
, &sdr_rw_load_jump_mgr_regs
->load_jump_add0
);
880 writel(jump
, &sdr_rw_load_jump_mgr_regs
->load_jump_add1
);
881 writel(jump
, &sdr_rw_load_jump_mgr_regs
->load_jump_add2
);
883 /* Execute count instruction */
884 writel(jump
, grpaddr
);
888 * rw_mgr_mem_load_user() - Load user calibration values
889 * @fin1: Final instruction 1
890 * @fin2: Final instruction 2
891 * @precharge: If 1, precharge the banks at the end
893 * Load user calibration values and optionally precharge the banks.
895 static void rw_mgr_mem_load_user(const u32 fin1
, const u32 fin2
,
898 u32 grpaddr
= SDR_PHYGRP_RWMGRGRP_ADDRESS
|
899 RW_MGR_RUN_SINGLE_GROUP_OFFSET
;
902 for (r
= 0; r
< RW_MGR_MEM_NUMBER_OF_RANKS
; r
++) {
903 if (param
->skip_ranks
[r
]) {
904 /* request to skip the rank */
909 set_rank_and_odt_mask(r
, RW_MGR_ODT_MODE_OFF
);
911 /* precharge all banks ... */
913 writel(RW_MGR_PRECHARGE_ALL
, grpaddr
);
916 * USER Use Mirror-ed commands for odd ranks if address
919 if ((RW_MGR_MEM_ADDRESS_MIRRORING
>> r
) & 0x1) {
920 set_jump_as_return();
921 writel(RW_MGR_MRS2_MIRR
, grpaddr
);
922 delay_for_n_mem_clocks(4);
923 set_jump_as_return();
924 writel(RW_MGR_MRS3_MIRR
, grpaddr
);
925 delay_for_n_mem_clocks(4);
926 set_jump_as_return();
927 writel(RW_MGR_MRS1_MIRR
, grpaddr
);
928 delay_for_n_mem_clocks(4);
929 set_jump_as_return();
930 writel(fin1
, grpaddr
);
932 set_jump_as_return();
933 writel(RW_MGR_MRS2
, grpaddr
);
934 delay_for_n_mem_clocks(4);
935 set_jump_as_return();
936 writel(RW_MGR_MRS3
, grpaddr
);
937 delay_for_n_mem_clocks(4);
938 set_jump_as_return();
939 writel(RW_MGR_MRS1
, grpaddr
);
940 set_jump_as_return();
941 writel(fin2
, grpaddr
);
947 set_jump_as_return();
948 writel(RW_MGR_ZQCL
, grpaddr
);
950 /* tZQinit = tDLLK = 512 ck cycles */
951 delay_for_n_mem_clocks(512);
956 * rw_mgr_mem_initialize() - Initialize RW Manager
958 * Initialize RW Manager.
960 static void rw_mgr_mem_initialize(void)
962 debug("%s:%d\n", __func__
, __LINE__
);
964 /* The reset / cke part of initialization is broadcasted to all ranks */
965 writel(RW_MGR_RANK_ALL
, SDR_PHYGRP_RWMGRGRP_ADDRESS
|
966 RW_MGR_SET_CS_AND_ODT_MASK_OFFSET
);
969 * Here's how you load register for a loop
970 * Counters are located @ 0x800
971 * Jump address are located @ 0xC00
972 * For both, registers 0 to 3 are selected using bits 3 and 2, like
973 * in 0x800, 0x804, 0x808, 0x80C and 0xC00, 0xC04, 0xC08, 0xC0C
974 * I know this ain't pretty, but Avalon bus throws away the 2 least
978 /* Start with memory RESET activated */
983 * 200us @ 266MHz (3.75 ns) ~ 54000 clock cycles
984 * If a and b are the number of iteration in 2 nested loops
985 * it takes the following number of cycles to complete the operation:
986 * number_of_cycles = ((2 + n) * a + 2) * b
987 * where n is the number of instruction in the inner loop
988 * One possible solution is n = 0 , a = 256 , b = 106 => a = FF,
991 rw_mgr_mem_init_load_regs(SEQ_TINIT_CNTR0_VAL
, SEQ_TINIT_CNTR1_VAL
,
993 RW_MGR_INIT_RESET_0_CKE_0
);
995 /* Indicate that memory is stable. */
996 writel(1, &phy_mgr_cfg
->reset_mem_stbl
);
999 * transition the RESET to high
1004 * 500us @ 266MHz (3.75 ns) ~ 134000 clock cycles
1005 * If a and b are the number of iteration in 2 nested loops
1006 * it takes the following number of cycles to complete the operation
1007 * number_of_cycles = ((2 + n) * a + 2) * b
1008 * where n is the number of instruction in the inner loop
1009 * One possible solution is n = 2 , a = 131 , b = 256 => a = 83,
1012 rw_mgr_mem_init_load_regs(SEQ_TRESET_CNTR0_VAL
, SEQ_TRESET_CNTR1_VAL
,
1013 SEQ_TRESET_CNTR2_VAL
,
1014 RW_MGR_INIT_RESET_1_CKE_0
);
1016 /* Bring up clock enable. */
1018 /* tXRP < 250 ck cycles */
1019 delay_for_n_mem_clocks(250);
1021 rw_mgr_mem_load_user(RW_MGR_MRS0_DLL_RESET_MIRR
, RW_MGR_MRS0_DLL_RESET
,
1026 * At the end of calibration we have to program the user settings in, and
1027 * USER hand off the memory to the user.
1029 static void rw_mgr_mem_handoff(void)
1031 rw_mgr_mem_load_user(RW_MGR_MRS0_USER_MIRR
, RW_MGR_MRS0_USER
, 1);
1033 * USER need to wait tMOD (12CK or 15ns) time before issuing
1034 * other commands, but we will have plenty of NIOS cycles before
1035 * actual handoff so its okay.
1040 * performs a guaranteed read on the patterns we are going to use during a
1041 * read test to ensure memory works
1043 static uint32_t rw_mgr_mem_calibrate_read_test_patterns(uint32_t rank_bgn
,
1044 uint32_t group
, uint32_t num_tries
, uint32_t *bit_chk
,
1048 uint32_t correct_mask_vg
;
1049 uint32_t tmp_bit_chk
;
1050 uint32_t rank_end
= all_ranks
? RW_MGR_MEM_NUMBER_OF_RANKS
:
1051 (rank_bgn
+ NUM_RANKS_PER_SHADOW_REG
);
1053 uint32_t base_rw_mgr
;
1055 *bit_chk
= param
->read_correct_mask
;
1056 correct_mask_vg
= param
->read_correct_mask_vg
;
1058 for (r
= rank_bgn
; r
< rank_end
; r
++) {
1059 if (param
->skip_ranks
[r
])
1060 /* request to skip the rank */
1064 set_rank_and_odt_mask(r
, RW_MGR_ODT_MODE_READ_WRITE
);
1066 /* Load up a constant bursts of read commands */
1067 writel(0x20, &sdr_rw_load_mgr_regs
->load_cntr0
);
1068 writel(RW_MGR_GUARANTEED_READ
,
1069 &sdr_rw_load_jump_mgr_regs
->load_jump_add0
);
1071 writel(0x20, &sdr_rw_load_mgr_regs
->load_cntr1
);
1072 writel(RW_MGR_GUARANTEED_READ_CONT
,
1073 &sdr_rw_load_jump_mgr_regs
->load_jump_add1
);
1076 for (vg
= RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS
-1; ; vg
--) {
1077 /* reset the fifos to get pointers to known state */
1079 writel(0, &phy_mgr_cmd
->fifo_reset
);
1080 writel(0, SDR_PHYGRP_RWMGRGRP_ADDRESS
|
1081 RW_MGR_RESET_READ_DATAPATH_OFFSET
);
1083 tmp_bit_chk
= tmp_bit_chk
<< (RW_MGR_MEM_DQ_PER_READ_DQS
1084 / RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS
);
1086 addr
= SDR_PHYGRP_RWMGRGRP_ADDRESS
| RW_MGR_RUN_SINGLE_GROUP_OFFSET
;
1087 writel(RW_MGR_GUARANTEED_READ
, addr
+
1088 ((group
* RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS
+
1091 base_rw_mgr
= readl(SDR_PHYGRP_RWMGRGRP_ADDRESS
);
1092 tmp_bit_chk
= tmp_bit_chk
| (correct_mask_vg
& (~base_rw_mgr
));
1097 *bit_chk
&= tmp_bit_chk
;
1100 addr
= SDR_PHYGRP_RWMGRGRP_ADDRESS
| RW_MGR_RUN_SINGLE_GROUP_OFFSET
;
1101 writel(RW_MGR_CLEAR_DQS_ENABLE
, addr
+ (group
<< 2));
1103 set_rank_and_odt_mask(0, RW_MGR_ODT_MODE_OFF
);
1104 debug_cond(DLEVEL
== 1, "%s:%d test_load_patterns(%u,ALL) => (%u == %u) =>\
1105 %lu\n", __func__
, __LINE__
, group
, *bit_chk
, param
->read_correct_mask
,
1106 (long unsigned int)(*bit_chk
== param
->read_correct_mask
));
1107 return *bit_chk
== param
->read_correct_mask
;
1111 * rw_mgr_mem_calibrate_read_load_patterns() - Load up the patterns for read test
1112 * @rank_bgn: Rank number
1113 * @all_ranks: Test all ranks
1115 * Load up the patterns we are going to use during a read test.
1117 static void rw_mgr_mem_calibrate_read_load_patterns(const u32 rank_bgn
,
1118 const int all_ranks
)
1120 const u32 rank_end
= all_ranks
?
1121 RW_MGR_MEM_NUMBER_OF_RANKS
:
1122 (rank_bgn
+ NUM_RANKS_PER_SHADOW_REG
);
1125 debug("%s:%d\n", __func__
, __LINE__
);
1127 for (r
= rank_bgn
; r
< rank_end
; r
++) {
1128 if (param
->skip_ranks
[r
])
1129 /* request to skip the rank */
1133 set_rank_and_odt_mask(r
, RW_MGR_ODT_MODE_READ_WRITE
);
1135 /* Load up a constant bursts */
1136 writel(0x20, &sdr_rw_load_mgr_regs
->load_cntr0
);
1138 writel(RW_MGR_GUARANTEED_WRITE_WAIT0
,
1139 &sdr_rw_load_jump_mgr_regs
->load_jump_add0
);
1141 writel(0x20, &sdr_rw_load_mgr_regs
->load_cntr1
);
1143 writel(RW_MGR_GUARANTEED_WRITE_WAIT1
,
1144 &sdr_rw_load_jump_mgr_regs
->load_jump_add1
);
1146 writel(0x04, &sdr_rw_load_mgr_regs
->load_cntr2
);
1148 writel(RW_MGR_GUARANTEED_WRITE_WAIT2
,
1149 &sdr_rw_load_jump_mgr_regs
->load_jump_add2
);
1151 writel(0x04, &sdr_rw_load_mgr_regs
->load_cntr3
);
1153 writel(RW_MGR_GUARANTEED_WRITE_WAIT3
,
1154 &sdr_rw_load_jump_mgr_regs
->load_jump_add3
);
1156 writel(RW_MGR_GUARANTEED_WRITE
, SDR_PHYGRP_RWMGRGRP_ADDRESS
|
1157 RW_MGR_RUN_SINGLE_GROUP_OFFSET
);
1160 set_rank_and_odt_mask(0, RW_MGR_ODT_MODE_OFF
);
1164 * try a read and see if it returns correct data back. has dummy reads
1165 * inserted into the mix used to align dqs enable. has more thorough checks
1166 * than the regular read test.
1168 static uint32_t rw_mgr_mem_calibrate_read_test(uint32_t rank_bgn
, uint32_t group
,
1169 uint32_t num_tries
, uint32_t all_correct
, uint32_t *bit_chk
,
1170 uint32_t all_groups
, uint32_t all_ranks
)
1173 uint32_t correct_mask_vg
;
1174 uint32_t tmp_bit_chk
;
1175 uint32_t rank_end
= all_ranks
? RW_MGR_MEM_NUMBER_OF_RANKS
:
1176 (rank_bgn
+ NUM_RANKS_PER_SHADOW_REG
);
1178 uint32_t base_rw_mgr
;
1180 *bit_chk
= param
->read_correct_mask
;
1181 correct_mask_vg
= param
->read_correct_mask_vg
;
1183 uint32_t quick_read_mode
= (((STATIC_CALIB_STEPS
) &
1184 CALIB_SKIP_DELAY_SWEEPS
) && ENABLE_SUPER_QUICK_CALIBRATION
);
1186 for (r
= rank_bgn
; r
< rank_end
; r
++) {
1187 if (param
->skip_ranks
[r
])
1188 /* request to skip the rank */
1192 set_rank_and_odt_mask(r
, RW_MGR_ODT_MODE_READ_WRITE
);
1194 writel(0x10, &sdr_rw_load_mgr_regs
->load_cntr1
);
1196 writel(RW_MGR_READ_B2B_WAIT1
,
1197 &sdr_rw_load_jump_mgr_regs
->load_jump_add1
);
1199 writel(0x10, &sdr_rw_load_mgr_regs
->load_cntr2
);
1200 writel(RW_MGR_READ_B2B_WAIT2
,
1201 &sdr_rw_load_jump_mgr_regs
->load_jump_add2
);
1203 if (quick_read_mode
)
1204 writel(0x1, &sdr_rw_load_mgr_regs
->load_cntr0
);
1205 /* need at least two (1+1) reads to capture failures */
1206 else if (all_groups
)
1207 writel(0x06, &sdr_rw_load_mgr_regs
->load_cntr0
);
1209 writel(0x32, &sdr_rw_load_mgr_regs
->load_cntr0
);
1211 writel(RW_MGR_READ_B2B
,
1212 &sdr_rw_load_jump_mgr_regs
->load_jump_add0
);
1214 writel(RW_MGR_MEM_IF_READ_DQS_WIDTH
*
1215 RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS
- 1,
1216 &sdr_rw_load_mgr_regs
->load_cntr3
);
1218 writel(0x0, &sdr_rw_load_mgr_regs
->load_cntr3
);
1220 writel(RW_MGR_READ_B2B
,
1221 &sdr_rw_load_jump_mgr_regs
->load_jump_add3
);
1224 for (vg
= RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS
-1; ; vg
--) {
1225 /* reset the fifos to get pointers to known state */
1226 writel(0, &phy_mgr_cmd
->fifo_reset
);
1227 writel(0, SDR_PHYGRP_RWMGRGRP_ADDRESS
|
1228 RW_MGR_RESET_READ_DATAPATH_OFFSET
);
1230 tmp_bit_chk
= tmp_bit_chk
<< (RW_MGR_MEM_DQ_PER_READ_DQS
1231 / RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS
);
1234 addr
= SDR_PHYGRP_RWMGRGRP_ADDRESS
| RW_MGR_RUN_ALL_GROUPS_OFFSET
;
1236 addr
= SDR_PHYGRP_RWMGRGRP_ADDRESS
| RW_MGR_RUN_SINGLE_GROUP_OFFSET
;
1238 writel(RW_MGR_READ_B2B
, addr
+
1239 ((group
* RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS
+
1242 base_rw_mgr
= readl(SDR_PHYGRP_RWMGRGRP_ADDRESS
);
1243 tmp_bit_chk
= tmp_bit_chk
| (correct_mask_vg
& ~(base_rw_mgr
));
1248 *bit_chk
&= tmp_bit_chk
;
1251 addr
= SDR_PHYGRP_RWMGRGRP_ADDRESS
| RW_MGR_RUN_SINGLE_GROUP_OFFSET
;
1252 writel(RW_MGR_CLEAR_DQS_ENABLE
, addr
+ (group
<< 2));
1255 set_rank_and_odt_mask(0, RW_MGR_ODT_MODE_OFF
);
1256 debug_cond(DLEVEL
== 2, "%s:%d read_test(%u,ALL,%u) =>\
1257 (%u == %u) => %lu", __func__
, __LINE__
, group
,
1258 all_groups
, *bit_chk
, param
->read_correct_mask
,
1259 (long unsigned int)(*bit_chk
==
1260 param
->read_correct_mask
));
1261 return *bit_chk
== param
->read_correct_mask
;
1263 set_rank_and_odt_mask(0, RW_MGR_ODT_MODE_OFF
);
1264 debug_cond(DLEVEL
== 2, "%s:%d read_test(%u,ONE,%u) =>\
1265 (%u != %lu) => %lu\n", __func__
, __LINE__
,
1266 group
, all_groups
, *bit_chk
, (long unsigned int)0,
1267 (long unsigned int)(*bit_chk
!= 0x00));
1268 return *bit_chk
!= 0x00;
1272 static uint32_t rw_mgr_mem_calibrate_read_test_all_ranks(uint32_t group
,
1273 uint32_t num_tries
, uint32_t all_correct
, uint32_t *bit_chk
,
1274 uint32_t all_groups
)
1276 return rw_mgr_mem_calibrate_read_test(0, group
, num_tries
, all_correct
,
1277 bit_chk
, all_groups
, 1);
1280 static void rw_mgr_incr_vfifo(uint32_t grp
, uint32_t *v
)
1282 writel(grp
, &phy_mgr_cmd
->inc_vfifo_hard_phy
);
1286 static void rw_mgr_decr_vfifo(uint32_t grp
, uint32_t *v
)
1290 for (i
= 0; i
< VFIFO_SIZE
-1; i
++)
1291 rw_mgr_incr_vfifo(grp
, v
);
1294 static int find_vfifo_read(uint32_t grp
, uint32_t *bit_chk
)
1297 uint32_t fail_cnt
= 0;
1298 uint32_t test_status
;
1300 for (v
= 0; v
< VFIFO_SIZE
; ) {
1301 debug_cond(DLEVEL
== 2, "%s:%d find_dqs_en_phase: vfifo %u\n",
1302 __func__
, __LINE__
, v
);
1303 test_status
= rw_mgr_mem_calibrate_read_test_all_ranks
1304 (grp
, 1, PASS_ONE_BIT
, bit_chk
, 0);
1312 /* fiddle with FIFO */
1313 rw_mgr_incr_vfifo(grp
, &v
);
1316 if (v
>= VFIFO_SIZE
) {
1317 /* no failing read found!! Something must have gone wrong */
1318 debug_cond(DLEVEL
== 2, "%s:%d find_dqs_en_phase: vfifo failed\n",
1319 __func__
, __LINE__
);
1326 static int find_working_phase(uint32_t *grp
, uint32_t *bit_chk
,
1327 uint32_t dtaps_per_ptap
, uint32_t *work_bgn
,
1328 uint32_t *v
, uint32_t *d
, uint32_t *p
,
1329 uint32_t *i
, uint32_t *max_working_cnt
)
1331 uint32_t found_begin
= 0;
1332 uint32_t tmp_delay
= 0;
1333 uint32_t test_status
;
1335 for (*d
= 0; *d
<= dtaps_per_ptap
; (*d
)++, tmp_delay
+=
1336 IO_DELAY_PER_DQS_EN_DCHAIN_TAP
) {
1337 *work_bgn
= tmp_delay
;
1338 scc_mgr_set_dqs_en_delay_all_ranks(*grp
, *d
);
1340 for (*i
= 0; *i
< VFIFO_SIZE
; (*i
)++) {
1341 for (*p
= 0; *p
<= IO_DQS_EN_PHASE_MAX
; (*p
)++, *work_bgn
+=
1342 IO_DELAY_PER_OPA_TAP
) {
1343 scc_mgr_set_dqs_en_phase_all_ranks(*grp
, *p
);
1346 rw_mgr_mem_calibrate_read_test_all_ranks
1347 (*grp
, 1, PASS_ONE_BIT
, bit_chk
, 0);
1350 *max_working_cnt
= 1;
1359 if (*p
> IO_DQS_EN_PHASE_MAX
)
1360 /* fiddle with FIFO */
1361 rw_mgr_incr_vfifo(*grp
, v
);
1368 if (*i
>= VFIFO_SIZE
) {
1369 /* cannot find working solution */
1370 debug_cond(DLEVEL
== 2, "%s:%d find_dqs_en_phase: no vfifo/\
1371 ptap/dtap\n", __func__
, __LINE__
);
1378 static void sdr_backup_phase(uint32_t *grp
, uint32_t *bit_chk
,
1379 uint32_t *work_bgn
, uint32_t *v
, uint32_t *d
,
1380 uint32_t *p
, uint32_t *max_working_cnt
)
1382 uint32_t found_begin
= 0;
1385 /* Special case code for backing up a phase */
1387 *p
= IO_DQS_EN_PHASE_MAX
;
1388 rw_mgr_decr_vfifo(*grp
, v
);
1392 tmp_delay
= *work_bgn
- IO_DELAY_PER_OPA_TAP
;
1393 scc_mgr_set_dqs_en_phase_all_ranks(*grp
, *p
);
1395 for (*d
= 0; *d
<= IO_DQS_EN_DELAY_MAX
&& tmp_delay
< *work_bgn
;
1396 (*d
)++, tmp_delay
+= IO_DELAY_PER_DQS_EN_DCHAIN_TAP
) {
1397 scc_mgr_set_dqs_en_delay_all_ranks(*grp
, *d
);
1399 if (rw_mgr_mem_calibrate_read_test_all_ranks(*grp
, 1,
1403 *work_bgn
= tmp_delay
;
1408 /* We have found a working dtap before the ptap found above */
1409 if (found_begin
== 1)
1410 (*max_working_cnt
)++;
1413 * Restore VFIFO to old state before we decremented it
1417 if (*p
> IO_DQS_EN_PHASE_MAX
) {
1419 rw_mgr_incr_vfifo(*grp
, v
);
1422 scc_mgr_set_dqs_en_delay_all_ranks(*grp
, 0);
1425 static int sdr_nonworking_phase(uint32_t *grp
, uint32_t *bit_chk
,
1426 uint32_t *work_bgn
, uint32_t *v
, uint32_t *d
,
1427 uint32_t *p
, uint32_t *i
, uint32_t *max_working_cnt
,
1430 uint32_t found_end
= 0;
1433 *work_end
+= IO_DELAY_PER_OPA_TAP
;
1434 if (*p
> IO_DQS_EN_PHASE_MAX
) {
1435 /* fiddle with FIFO */
1437 rw_mgr_incr_vfifo(*grp
, v
);
1440 for (; *i
< VFIFO_SIZE
+ 1; (*i
)++) {
1441 for (; *p
<= IO_DQS_EN_PHASE_MAX
; (*p
)++, *work_end
1442 += IO_DELAY_PER_OPA_TAP
) {
1443 scc_mgr_set_dqs_en_phase_all_ranks(*grp
, *p
);
1445 if (!rw_mgr_mem_calibrate_read_test_all_ranks
1446 (*grp
, 1, PASS_ONE_BIT
, bit_chk
, 0)) {
1450 (*max_working_cnt
)++;
1457 if (*p
> IO_DQS_EN_PHASE_MAX
) {
1458 /* fiddle with FIFO */
1459 rw_mgr_incr_vfifo(*grp
, v
);
1464 if (*i
>= VFIFO_SIZE
+ 1) {
1465 /* cannot see edge of failing read */
1466 debug_cond(DLEVEL
== 2, "%s:%d sdr_nonworking_phase: end:\
1467 failed\n", __func__
, __LINE__
);
1474 static int sdr_find_window_centre(uint32_t *grp
, uint32_t *bit_chk
,
1475 uint32_t *work_bgn
, uint32_t *v
, uint32_t *d
,
1476 uint32_t *p
, uint32_t *work_mid
,
1482 *work_mid
= (*work_bgn
+ *work_end
) / 2;
1484 debug_cond(DLEVEL
== 2, "work_bgn=%d work_end=%d work_mid=%d\n",
1485 *work_bgn
, *work_end
, *work_mid
);
1486 /* Get the middle delay to be less than a VFIFO delay */
1487 for (*p
= 0; *p
<= IO_DQS_EN_PHASE_MAX
;
1488 (*p
)++, tmp_delay
+= IO_DELAY_PER_OPA_TAP
)
1490 debug_cond(DLEVEL
== 2, "vfifo ptap delay %d\n", tmp_delay
);
1491 while (*work_mid
> tmp_delay
)
1492 *work_mid
-= tmp_delay
;
1493 debug_cond(DLEVEL
== 2, "new work_mid %d\n", *work_mid
);
1496 for (*p
= 0; *p
<= IO_DQS_EN_PHASE_MAX
&& tmp_delay
< *work_mid
;
1497 (*p
)++, tmp_delay
+= IO_DELAY_PER_OPA_TAP
)
1499 tmp_delay
-= IO_DELAY_PER_OPA_TAP
;
1500 debug_cond(DLEVEL
== 2, "new p %d, tmp_delay=%d\n", (*p
) - 1, tmp_delay
);
1501 for (*d
= 0; *d
<= IO_DQS_EN_DELAY_MAX
&& tmp_delay
< *work_mid
; (*d
)++,
1502 tmp_delay
+= IO_DELAY_PER_DQS_EN_DCHAIN_TAP
)
1504 debug_cond(DLEVEL
== 2, "new d %d, tmp_delay=%d\n", *d
, tmp_delay
);
1506 scc_mgr_set_dqs_en_phase_all_ranks(*grp
, (*p
) - 1);
1507 scc_mgr_set_dqs_en_delay_all_ranks(*grp
, *d
);
1510 * push vfifo until we can successfully calibrate. We can do this
1511 * because the largest possible margin in 1 VFIFO cycle.
1513 for (i
= 0; i
< VFIFO_SIZE
; i
++) {
1514 debug_cond(DLEVEL
== 2, "find_dqs_en_phase: center: vfifo=%u\n",
1516 if (rw_mgr_mem_calibrate_read_test_all_ranks(*grp
, 1,
1522 /* fiddle with FIFO */
1523 rw_mgr_incr_vfifo(*grp
, v
);
1526 if (i
>= VFIFO_SIZE
) {
1527 debug_cond(DLEVEL
== 2, "%s:%d find_dqs_en_phase: center: \
1528 failed\n", __func__
, __LINE__
);
1535 /* find a good dqs enable to use */
1536 static uint32_t rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase(uint32_t grp
)
1538 uint32_t v
, d
, p
, i
;
1539 uint32_t max_working_cnt
;
1541 uint32_t dtaps_per_ptap
;
1542 uint32_t work_bgn
, work_mid
, work_end
;
1543 uint32_t found_passing_read
, found_failing_read
, initial_failing_dtap
;
1545 debug("%s:%d %u\n", __func__
, __LINE__
, grp
);
1547 reg_file_set_sub_stage(CAL_SUBSTAGE_VFIFO_CENTER
);
1549 scc_mgr_set_dqs_en_delay_all_ranks(grp
, 0);
1550 scc_mgr_set_dqs_en_phase_all_ranks(grp
, 0);
1552 /* ************************************************************** */
1553 /* * Step 0 : Determine number of delay taps for each phase tap * */
1554 dtaps_per_ptap
= IO_DELAY_PER_OPA_TAP
/IO_DELAY_PER_DQS_EN_DCHAIN_TAP
;
1556 /* ********************************************************* */
1557 /* * Step 1 : First push vfifo until we get a failing read * */
1558 v
= find_vfifo_read(grp
, &bit_chk
);
1560 max_working_cnt
= 0;
1562 /* ******************************************************** */
1563 /* * step 2: find first working phase, increment in ptaps * */
1565 if (find_working_phase(&grp
, &bit_chk
, dtaps_per_ptap
, &work_bgn
, &v
, &d
,
1566 &p
, &i
, &max_working_cnt
) == 0)
1569 work_end
= work_bgn
;
1572 * If d is 0 then the working window covers a phase tap and
1573 * we can follow the old procedure otherwise, we've found the beginning,
1574 * and we need to increment the dtaps until we find the end.
1577 /* ********************************************************* */
1578 /* * step 3a: if we have room, back off by one and
1579 increment in dtaps * */
1581 sdr_backup_phase(&grp
, &bit_chk
, &work_bgn
, &v
, &d
, &p
,
1584 /* ********************************************************* */
1585 /* * step 4a: go forward from working phase to non working
1586 phase, increment in ptaps * */
1587 if (sdr_nonworking_phase(&grp
, &bit_chk
, &work_bgn
, &v
, &d
, &p
,
1588 &i
, &max_working_cnt
, &work_end
) == 0)
1591 /* ********************************************************* */
1592 /* * step 5a: back off one from last, increment in dtaps * */
1594 /* Special case code for backing up a phase */
1596 p
= IO_DQS_EN_PHASE_MAX
;
1597 rw_mgr_decr_vfifo(grp
, &v
);
1602 work_end
-= IO_DELAY_PER_OPA_TAP
;
1603 scc_mgr_set_dqs_en_phase_all_ranks(grp
, p
);
1605 /* * The actual increment of dtaps is done outside of
1606 the if/else loop to share code */
1609 debug_cond(DLEVEL
== 2, "%s:%d find_dqs_en_phase: v/p: \
1610 vfifo=%u ptap=%u\n", __func__
, __LINE__
,
1613 /* ******************************************************* */
1614 /* * step 3-5b: Find the right edge of the window using
1616 debug_cond(DLEVEL
== 2, "%s:%d find_dqs_en_phase:vfifo=%u \
1617 ptap=%u dtap=%u bgn=%u\n", __func__
, __LINE__
,
1620 work_end
= work_bgn
;
1622 /* * The actual increment of dtaps is done outside of the
1623 if/else loop to share code */
1625 /* Only here to counterbalance a subtract later on which is
1626 not needed if this branch of the algorithm is taken */
1630 /* The dtap increment to find the failing edge is done here */
1631 for (; d
<= IO_DQS_EN_DELAY_MAX
; d
++, work_end
+=
1632 IO_DELAY_PER_DQS_EN_DCHAIN_TAP
) {
1633 debug_cond(DLEVEL
== 2, "%s:%d find_dqs_en_phase: \
1634 end-2: dtap=%u\n", __func__
, __LINE__
, d
);
1635 scc_mgr_set_dqs_en_delay_all_ranks(grp
, d
);
1637 if (!rw_mgr_mem_calibrate_read_test_all_ranks(grp
, 1,
1644 /* Go back to working dtap */
1646 work_end
-= IO_DELAY_PER_DQS_EN_DCHAIN_TAP
;
1648 debug_cond(DLEVEL
== 2, "%s:%d find_dqs_en_phase: v/p/d: vfifo=%u \
1649 ptap=%u dtap=%u end=%u\n", __func__
, __LINE__
,
1650 v
, p
, d
-1, work_end
);
1652 if (work_end
< work_bgn
) {
1654 debug_cond(DLEVEL
== 2, "%s:%d find_dqs_en_phase: end-2: \
1655 failed\n", __func__
, __LINE__
);
1659 debug_cond(DLEVEL
== 2, "%s:%d find_dqs_en_phase: found range [%u,%u]\n",
1660 __func__
, __LINE__
, work_bgn
, work_end
);
1662 /* *************************************************************** */
1664 * * We need to calculate the number of dtaps that equal a ptap
1665 * * To do that we'll back up a ptap and re-find the edge of the
1666 * * window using dtaps
1669 debug_cond(DLEVEL
== 2, "%s:%d find_dqs_en_phase: calculate dtaps_per_ptap \
1670 for tracking\n", __func__
, __LINE__
);
1672 /* Special case code for backing up a phase */
1674 p
= IO_DQS_EN_PHASE_MAX
;
1675 rw_mgr_decr_vfifo(grp
, &v
);
1676 debug_cond(DLEVEL
== 2, "%s:%d find_dqs_en_phase: backedup \
1677 cycle/phase: v=%u p=%u\n", __func__
, __LINE__
,
1681 debug_cond(DLEVEL
== 2, "%s:%d find_dqs_en_phase: backedup \
1682 phase only: v=%u p=%u", __func__
, __LINE__
,
1686 scc_mgr_set_dqs_en_phase_all_ranks(grp
, p
);
1689 * Increase dtap until we first see a passing read (in case the
1690 * window is smaller than a ptap),
1691 * and then a failing read to mark the edge of the window again
1694 /* Find a passing read */
1695 debug_cond(DLEVEL
== 2, "%s:%d find_dqs_en_phase: find passing read\n",
1696 __func__
, __LINE__
);
1697 found_passing_read
= 0;
1698 found_failing_read
= 0;
1699 initial_failing_dtap
= d
;
1700 for (; d
<= IO_DQS_EN_DELAY_MAX
; d
++) {
1701 debug_cond(DLEVEL
== 2, "%s:%d find_dqs_en_phase: testing \
1702 read d=%u\n", __func__
, __LINE__
, d
);
1703 scc_mgr_set_dqs_en_delay_all_ranks(grp
, d
);
1705 if (rw_mgr_mem_calibrate_read_test_all_ranks(grp
, 1,
1708 found_passing_read
= 1;
1713 if (found_passing_read
) {
1714 /* Find a failing read */
1715 debug_cond(DLEVEL
== 2, "%s:%d find_dqs_en_phase: find failing \
1716 read\n", __func__
, __LINE__
);
1717 for (d
= d
+ 1; d
<= IO_DQS_EN_DELAY_MAX
; d
++) {
1718 debug_cond(DLEVEL
== 2, "%s:%d find_dqs_en_phase: \
1719 testing read d=%u\n", __func__
, __LINE__
, d
);
1720 scc_mgr_set_dqs_en_delay_all_ranks(grp
, d
);
1722 if (!rw_mgr_mem_calibrate_read_test_all_ranks
1723 (grp
, 1, PASS_ONE_BIT
, &bit_chk
, 0)) {
1724 found_failing_read
= 1;
1729 debug_cond(DLEVEL
== 1, "%s:%d find_dqs_en_phase: failed to \
1730 calculate dtaps", __func__
, __LINE__
);
1731 debug_cond(DLEVEL
== 1, "per ptap. Fall back on static value\n");
1735 * The dynamically calculated dtaps_per_ptap is only valid if we
1736 * found a passing/failing read. If we didn't, it means d hit the max
1737 * (IO_DQS_EN_DELAY_MAX). Otherwise, dtaps_per_ptap retains its
1738 * statically calculated value.
1740 if (found_passing_read
&& found_failing_read
)
1741 dtaps_per_ptap
= d
- initial_failing_dtap
;
1743 writel(dtaps_per_ptap
, &sdr_reg_file
->dtaps_per_ptap
);
1744 debug_cond(DLEVEL
== 2, "%s:%d find_dqs_en_phase: dtaps_per_ptap=%u \
1745 - %u = %u", __func__
, __LINE__
, d
,
1746 initial_failing_dtap
, dtaps_per_ptap
);
1748 /* ******************************************** */
1749 /* * step 6: Find the centre of the window * */
1750 if (sdr_find_window_centre(&grp
, &bit_chk
, &work_bgn
, &v
, &d
, &p
,
1751 &work_mid
, &work_end
) == 0)
1754 debug_cond(DLEVEL
== 2, "%s:%d find_dqs_en_phase: center found: \
1755 vfifo=%u ptap=%u dtap=%u\n", __func__
, __LINE__
,
1761 * Try rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase across different
1762 * dq_in_delay values
1765 rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase_sweep_dq_in_delay
1766 (uint32_t write_group
, uint32_t read_group
, uint32_t test_bgn
)
1774 const uint32_t delay_step
= IO_IO_IN_DELAY_MAX
/
1775 (RW_MGR_MEM_DQ_PER_READ_DQS
-1);
1776 /* we start at zero, so have one less dq to devide among */
1778 debug("%s:%d (%u,%u,%u)", __func__
, __LINE__
, write_group
, read_group
,
1781 /* try different dq_in_delays since the dq path is shorter than dqs */
1783 for (r
= 0; r
< RW_MGR_MEM_NUMBER_OF_RANKS
;
1784 r
+= NUM_RANKS_PER_SHADOW_REG
) {
1785 for (i
= 0, p
= test_bgn
, d
= 0; i
< RW_MGR_MEM_DQ_PER_READ_DQS
; i
++, p
++, d
+= delay_step
) {
1786 debug_cond(DLEVEL
== 1, "%s:%d rw_mgr_mem_calibrate_\
1787 vfifo_find_dqs_", __func__
, __LINE__
);
1788 debug_cond(DLEVEL
== 1, "en_phase_sweep_dq_in_delay: g=%u/%u ",
1789 write_group
, read_group
);
1790 debug_cond(DLEVEL
== 1, "r=%u, i=%u p=%u d=%u\n", r
, i
, p
, d
);
1791 scc_mgr_set_dq_in_delay(p
, d
);
1794 writel(0, &sdr_scc_mgr
->update
);
1797 found
= rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase(read_group
);
1799 debug_cond(DLEVEL
== 1, "%s:%d rw_mgr_mem_calibrate_vfifo_find_dqs_\
1800 en_phase_sweep_dq", __func__
, __LINE__
);
1801 debug_cond(DLEVEL
== 1, "_in_delay: g=%u/%u found=%u; Reseting delay \
1802 chain to zero\n", write_group
, read_group
, found
);
1804 for (r
= 0; r
< RW_MGR_MEM_NUMBER_OF_RANKS
;
1805 r
+= NUM_RANKS_PER_SHADOW_REG
) {
1806 for (i
= 0, p
= test_bgn
; i
< RW_MGR_MEM_DQ_PER_READ_DQS
;
1808 scc_mgr_set_dq_in_delay(p
, 0);
1811 writel(0, &sdr_scc_mgr
->update
);
1817 /* per-bit deskew DQ and center */
1818 static uint32_t rw_mgr_mem_calibrate_vfifo_center(uint32_t rank_bgn
,
1819 uint32_t write_group
, uint32_t read_group
, uint32_t test_bgn
,
1820 uint32_t use_read_test
, uint32_t update_fom
)
1822 uint32_t i
, p
, d
, min_index
;
1824 * Store these as signed since there are comparisons with
1828 uint32_t sticky_bit_chk
;
1829 int32_t left_edge
[RW_MGR_MEM_DQ_PER_READ_DQS
];
1830 int32_t right_edge
[RW_MGR_MEM_DQ_PER_READ_DQS
];
1831 int32_t final_dq
[RW_MGR_MEM_DQ_PER_READ_DQS
];
1833 int32_t orig_mid_min
, mid_min
;
1834 int32_t new_dqs
, start_dqs
, start_dqs_en
, shift_dq
, final_dqs
,
1836 int32_t dq_margin
, dqs_margin
;
1838 uint32_t temp_dq_in_delay1
, temp_dq_in_delay2
;
1841 debug("%s:%d: %u %u", __func__
, __LINE__
, read_group
, test_bgn
);
1843 addr
= SDR_PHYGRP_SCCGRP_ADDRESS
| SCC_MGR_DQS_IN_DELAY_OFFSET
;
1844 start_dqs
= readl(addr
+ (read_group
<< 2));
1845 if (IO_SHIFT_DQS_EN_WHEN_SHIFT_DQS
)
1846 start_dqs_en
= readl(addr
+ ((read_group
<< 2)
1847 - IO_DQS_EN_DELAY_OFFSET
));
1849 /* set the left and right edge of each bit to an illegal value */
1850 /* use (IO_IO_IN_DELAY_MAX + 1) as an illegal value */
1852 for (i
= 0; i
< RW_MGR_MEM_DQ_PER_READ_DQS
; i
++) {
1853 left_edge
[i
] = IO_IO_IN_DELAY_MAX
+ 1;
1854 right_edge
[i
] = IO_IO_IN_DELAY_MAX
+ 1;
1857 /* Search for the left edge of the window for each bit */
1858 for (d
= 0; d
<= IO_IO_IN_DELAY_MAX
; d
++) {
1859 scc_mgr_apply_group_dq_in_delay(write_group
, test_bgn
, d
);
1861 writel(0, &sdr_scc_mgr
->update
);
1864 * Stop searching when the read test doesn't pass AND when
1865 * we've seen a passing read on every bit.
1867 if (use_read_test
) {
1868 stop
= !rw_mgr_mem_calibrate_read_test(rank_bgn
,
1869 read_group
, NUM_READ_PB_TESTS
, PASS_ONE_BIT
,
1872 rw_mgr_mem_calibrate_write_test(rank_bgn
, write_group
,
1875 bit_chk
= bit_chk
>> (RW_MGR_MEM_DQ_PER_READ_DQS
*
1876 (read_group
- (write_group
*
1877 RW_MGR_MEM_IF_READ_DQS_WIDTH
/
1878 RW_MGR_MEM_IF_WRITE_DQS_WIDTH
)));
1879 stop
= (bit_chk
== 0);
1881 sticky_bit_chk
= sticky_bit_chk
| bit_chk
;
1882 stop
= stop
&& (sticky_bit_chk
== param
->read_correct_mask
);
1883 debug_cond(DLEVEL
== 2, "%s:%d vfifo_center(left): dtap=%u => %u == %u \
1884 && %u", __func__
, __LINE__
, d
,
1886 param
->read_correct_mask
, stop
);
1891 for (i
= 0; i
< RW_MGR_MEM_DQ_PER_READ_DQS
; i
++) {
1893 /* Remember a passing test as the
1897 /* If a left edge has not been seen yet,
1898 then a future passing test will mark
1899 this edge as the right edge */
1901 IO_IO_IN_DELAY_MAX
+ 1) {
1902 right_edge
[i
] = -(d
+ 1);
1905 bit_chk
= bit_chk
>> 1;
1910 /* Reset DQ delay chains to 0 */
1911 scc_mgr_apply_group_dq_in_delay(test_bgn
, 0);
1913 for (i
= RW_MGR_MEM_DQ_PER_READ_DQS
- 1;; i
--) {
1914 debug_cond(DLEVEL
== 2, "%s:%d vfifo_center: left_edge[%u]: \
1915 %d right_edge[%u]: %d\n", __func__
, __LINE__
,
1916 i
, left_edge
[i
], i
, right_edge
[i
]);
1919 * Check for cases where we haven't found the left edge,
1920 * which makes our assignment of the the right edge invalid.
1921 * Reset it to the illegal value.
1923 if ((left_edge
[i
] == IO_IO_IN_DELAY_MAX
+ 1) && (
1924 right_edge
[i
] != IO_IO_IN_DELAY_MAX
+ 1)) {
1925 right_edge
[i
] = IO_IO_IN_DELAY_MAX
+ 1;
1926 debug_cond(DLEVEL
== 2, "%s:%d vfifo_center: reset \
1927 right_edge[%u]: %d\n", __func__
, __LINE__
,
1932 * Reset sticky bit (except for bits where we have seen
1933 * both the left and right edge).
1935 sticky_bit_chk
= sticky_bit_chk
<< 1;
1936 if ((left_edge
[i
] != IO_IO_IN_DELAY_MAX
+ 1) &&
1937 (right_edge
[i
] != IO_IO_IN_DELAY_MAX
+ 1)) {
1938 sticky_bit_chk
= sticky_bit_chk
| 1;
1945 /* Search for the right edge of the window for each bit */
1946 for (d
= 0; d
<= IO_DQS_IN_DELAY_MAX
- start_dqs
; d
++) {
1947 scc_mgr_set_dqs_bus_in_delay(read_group
, d
+ start_dqs
);
1948 if (IO_SHIFT_DQS_EN_WHEN_SHIFT_DQS
) {
1949 uint32_t delay
= d
+ start_dqs_en
;
1950 if (delay
> IO_DQS_EN_DELAY_MAX
)
1951 delay
= IO_DQS_EN_DELAY_MAX
;
1952 scc_mgr_set_dqs_en_delay(read_group
, delay
);
1954 scc_mgr_load_dqs(read_group
);
1956 writel(0, &sdr_scc_mgr
->update
);
1959 * Stop searching when the read test doesn't pass AND when
1960 * we've seen a passing read on every bit.
1962 if (use_read_test
) {
1963 stop
= !rw_mgr_mem_calibrate_read_test(rank_bgn
,
1964 read_group
, NUM_READ_PB_TESTS
, PASS_ONE_BIT
,
1967 rw_mgr_mem_calibrate_write_test(rank_bgn
, write_group
,
1970 bit_chk
= bit_chk
>> (RW_MGR_MEM_DQ_PER_READ_DQS
*
1971 (read_group
- (write_group
*
1972 RW_MGR_MEM_IF_READ_DQS_WIDTH
/
1973 RW_MGR_MEM_IF_WRITE_DQS_WIDTH
)));
1974 stop
= (bit_chk
== 0);
1976 sticky_bit_chk
= sticky_bit_chk
| bit_chk
;
1977 stop
= stop
&& (sticky_bit_chk
== param
->read_correct_mask
);
1979 debug_cond(DLEVEL
== 2, "%s:%d vfifo_center(right): dtap=%u => %u == \
1980 %u && %u", __func__
, __LINE__
, d
,
1981 sticky_bit_chk
, param
->read_correct_mask
, stop
);
1986 for (i
= 0; i
< RW_MGR_MEM_DQ_PER_READ_DQS
; i
++) {
1988 /* Remember a passing test as
1993 /* If a right edge has not been
1994 seen yet, then a future passing
1995 test will mark this edge as the
1997 if (right_edge
[i
] ==
1998 IO_IO_IN_DELAY_MAX
+ 1) {
1999 left_edge
[i
] = -(d
+ 1);
2002 /* d = 0 failed, but it passed
2003 when testing the left edge,
2004 so it must be marginal,
2006 if (right_edge
[i
] ==
2007 IO_IO_IN_DELAY_MAX
+ 1 &&
2013 /* If a right edge has not been
2014 seen yet, then a future passing
2015 test will mark this edge as the
2017 else if (right_edge
[i
] ==
2018 IO_IO_IN_DELAY_MAX
+
2020 left_edge
[i
] = -(d
+ 1);
2025 debug_cond(DLEVEL
== 2, "%s:%d vfifo_center[r,\
2026 d=%u]: ", __func__
, __LINE__
, d
);
2027 debug_cond(DLEVEL
== 2, "bit_chk_test=%d left_edge[%u]: %d ",
2028 (int)(bit_chk
& 1), i
, left_edge
[i
]);
2029 debug_cond(DLEVEL
== 2, "right_edge[%u]: %d\n", i
,
2031 bit_chk
= bit_chk
>> 1;
2036 /* Check that all bits have a window */
2037 for (i
= 0; i
< RW_MGR_MEM_DQ_PER_READ_DQS
; i
++) {
2038 debug_cond(DLEVEL
== 2, "%s:%d vfifo_center: left_edge[%u]: \
2039 %d right_edge[%u]: %d", __func__
, __LINE__
,
2040 i
, left_edge
[i
], i
, right_edge
[i
]);
2041 if ((left_edge
[i
] == IO_IO_IN_DELAY_MAX
+ 1) || (right_edge
[i
]
2042 == IO_IO_IN_DELAY_MAX
+ 1)) {
2044 * Restore delay chain settings before letting the loop
2045 * in rw_mgr_mem_calibrate_vfifo to retry different
2046 * dqs/ck relationships.
2048 scc_mgr_set_dqs_bus_in_delay(read_group
, start_dqs
);
2049 if (IO_SHIFT_DQS_EN_WHEN_SHIFT_DQS
) {
2050 scc_mgr_set_dqs_en_delay(read_group
,
2053 scc_mgr_load_dqs(read_group
);
2054 writel(0, &sdr_scc_mgr
->update
);
2056 debug_cond(DLEVEL
== 1, "%s:%d vfifo_center: failed to \
2057 find edge [%u]: %d %d", __func__
, __LINE__
,
2058 i
, left_edge
[i
], right_edge
[i
]);
2059 if (use_read_test
) {
2060 set_failing_group_stage(read_group
*
2061 RW_MGR_MEM_DQ_PER_READ_DQS
+ i
,
2063 CAL_SUBSTAGE_VFIFO_CENTER
);
2065 set_failing_group_stage(read_group
*
2066 RW_MGR_MEM_DQ_PER_READ_DQS
+ i
,
2067 CAL_STAGE_VFIFO_AFTER_WRITES
,
2068 CAL_SUBSTAGE_VFIFO_CENTER
);
2074 /* Find middle of window for each DQ bit */
2075 mid_min
= left_edge
[0] - right_edge
[0];
2077 for (i
= 1; i
< RW_MGR_MEM_DQ_PER_READ_DQS
; i
++) {
2078 mid
= left_edge
[i
] - right_edge
[i
];
2079 if (mid
< mid_min
) {
2086 * -mid_min/2 represents the amount that we need to move DQS.
2087 * If mid_min is odd and positive we'll need to add one to
2088 * make sure the rounding in further calculations is correct
2089 * (always bias to the right), so just add 1 for all positive values.
2094 mid_min
= mid_min
/ 2;
2096 debug_cond(DLEVEL
== 1, "%s:%d vfifo_center: mid_min=%d (index=%u)\n",
2097 __func__
, __LINE__
, mid_min
, min_index
);
2099 /* Determine the amount we can change DQS (which is -mid_min) */
2100 orig_mid_min
= mid_min
;
2101 new_dqs
= start_dqs
- mid_min
;
2102 if (new_dqs
> IO_DQS_IN_DELAY_MAX
)
2103 new_dqs
= IO_DQS_IN_DELAY_MAX
;
2104 else if (new_dqs
< 0)
2107 mid_min
= start_dqs
- new_dqs
;
2108 debug_cond(DLEVEL
== 1, "vfifo_center: new mid_min=%d new_dqs=%d\n",
2111 if (IO_SHIFT_DQS_EN_WHEN_SHIFT_DQS
) {
2112 if (start_dqs_en
- mid_min
> IO_DQS_EN_DELAY_MAX
)
2113 mid_min
+= start_dqs_en
- mid_min
- IO_DQS_EN_DELAY_MAX
;
2114 else if (start_dqs_en
- mid_min
< 0)
2115 mid_min
+= start_dqs_en
- mid_min
;
2117 new_dqs
= start_dqs
- mid_min
;
2119 debug_cond(DLEVEL
== 1, "vfifo_center: start_dqs=%d start_dqs_en=%d \
2120 new_dqs=%d mid_min=%d\n", start_dqs
,
2121 IO_SHIFT_DQS_EN_WHEN_SHIFT_DQS
? start_dqs_en
: -1,
2124 /* Initialize data for export structures */
2125 dqs_margin
= IO_IO_IN_DELAY_MAX
+ 1;
2126 dq_margin
= IO_IO_IN_DELAY_MAX
+ 1;
2128 /* add delay to bring centre of all DQ windows to the same "level" */
2129 for (i
= 0, p
= test_bgn
; i
< RW_MGR_MEM_DQ_PER_READ_DQS
; i
++, p
++) {
2130 /* Use values before divide by 2 to reduce round off error */
2131 shift_dq
= (left_edge
[i
] - right_edge
[i
] -
2132 (left_edge
[min_index
] - right_edge
[min_index
]))/2 +
2133 (orig_mid_min
- mid_min
);
2135 debug_cond(DLEVEL
== 2, "vfifo_center: before: \
2136 shift_dq[%u]=%d\n", i
, shift_dq
);
2138 addr
= SDR_PHYGRP_SCCGRP_ADDRESS
| SCC_MGR_IO_IN_DELAY_OFFSET
;
2139 temp_dq_in_delay1
= readl(addr
+ (p
<< 2));
2140 temp_dq_in_delay2
= readl(addr
+ (i
<< 2));
2142 if (shift_dq
+ (int32_t)temp_dq_in_delay1
>
2143 (int32_t)IO_IO_IN_DELAY_MAX
) {
2144 shift_dq
= (int32_t)IO_IO_IN_DELAY_MAX
- temp_dq_in_delay2
;
2145 } else if (shift_dq
+ (int32_t)temp_dq_in_delay1
< 0) {
2146 shift_dq
= -(int32_t)temp_dq_in_delay1
;
2148 debug_cond(DLEVEL
== 2, "vfifo_center: after: \
2149 shift_dq[%u]=%d\n", i
, shift_dq
);
2150 final_dq
[i
] = temp_dq_in_delay1
+ shift_dq
;
2151 scc_mgr_set_dq_in_delay(p
, final_dq
[i
]);
2154 debug_cond(DLEVEL
== 2, "vfifo_center: margin[%u]=[%d,%d]\n", i
,
2155 left_edge
[i
] - shift_dq
+ (-mid_min
),
2156 right_edge
[i
] + shift_dq
- (-mid_min
));
2157 /* To determine values for export structures */
2158 if (left_edge
[i
] - shift_dq
+ (-mid_min
) < dq_margin
)
2159 dq_margin
= left_edge
[i
] - shift_dq
+ (-mid_min
);
2161 if (right_edge
[i
] + shift_dq
- (-mid_min
) < dqs_margin
)
2162 dqs_margin
= right_edge
[i
] + shift_dq
- (-mid_min
);
2165 final_dqs
= new_dqs
;
2166 if (IO_SHIFT_DQS_EN_WHEN_SHIFT_DQS
)
2167 final_dqs_en
= start_dqs_en
- mid_min
;
2170 if (IO_SHIFT_DQS_EN_WHEN_SHIFT_DQS
) {
2171 scc_mgr_set_dqs_en_delay(read_group
, final_dqs_en
);
2172 scc_mgr_load_dqs(read_group
);
2176 scc_mgr_set_dqs_bus_in_delay(read_group
, final_dqs
);
2177 scc_mgr_load_dqs(read_group
);
2178 debug_cond(DLEVEL
== 2, "%s:%d vfifo_center: dq_margin=%d \
2179 dqs_margin=%d", __func__
, __LINE__
,
2180 dq_margin
, dqs_margin
);
2183 * Do not remove this line as it makes sure all of our decisions
2184 * have been applied. Apply the update bit.
2186 writel(0, &sdr_scc_mgr
->update
);
2188 return (dq_margin
>= 0) && (dqs_margin
>= 0);
2192 * rw_mgr_mem_calibrate_guaranteed_write() - Perform guaranteed write into the device
2193 * @rw_group: Read/Write Group
2194 * @phase: DQ/DQS phase
2196 * Because initially no communication ca be reliably performed with the memory
2197 * device, the sequencer uses a guaranteed write mechanism to write data into
2198 * the memory device.
2200 static int rw_mgr_mem_calibrate_guaranteed_write(const u32 rw_group
,
2206 /* Set a particular DQ/DQS phase. */
2207 scc_mgr_set_dqdqs_output_phase_all_ranks(rw_group
, phase
);
2209 debug_cond(DLEVEL
== 1, "%s:%d guaranteed write: g=%u p=%u\n",
2210 __func__
, __LINE__
, rw_group
, phase
);
2213 * Altera EMI_RM 2015.05.04 :: Figure 1-25
2214 * Load up the patterns used by read calibration using the
2215 * current DQDQS phase.
2217 rw_mgr_mem_calibrate_read_load_patterns(0, 1);
2219 if (gbl
->phy_debug_mode_flags
& PHY_DEBUG_DISABLE_GUARANTEED_READ
)
2223 * Altera EMI_RM 2015.05.04 :: Figure 1-26
2224 * Back-to-Back reads of the patterns used for calibration.
2226 ret
= rw_mgr_mem_calibrate_read_test_patterns(0, rw_group
, 1,
2228 if (!ret
) { /* FIXME: 0 means failure in this old code :-( */
2229 debug_cond(DLEVEL
== 1,
2230 "%s:%d Guaranteed read test failed: g=%u p=%u\n",
2231 __func__
, __LINE__
, rw_group
, phase
);
2239 * rw_mgr_mem_calibrate_dqs_enable_calibration() - DQS Enable Calibration
2240 * @rw_group: Read/Write Group
2241 * @test_bgn: Rank at which the test begins
2243 * DQS enable calibration ensures reliable capture of the DQ signal without
2244 * glitches on the DQS line.
2246 static int rw_mgr_mem_calibrate_dqs_enable_calibration(const u32 rw_group
,
2252 * Altera EMI_RM 2015.05.04 :: Figure 1-27
2253 * DQS and DQS Eanble Signal Relationships.
2255 ret
= rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase_sweep_dq_in_delay(
2256 rw_group
, rw_group
, test_bgn
);
2257 if (!ret
) /* FIXME: 0 means failure in this old code :-( */
2264 * rw_mgr_mem_calibrate_dq_dqs_centering() - Centering DQ/DQS
2265 * @rw_group: Read/Write Group
2266 * @test_bgn: Rank at which the test begins
2267 * @use_read_test: Perform a read test
2268 * @update_fom: Update FOM
2270 * The centerin DQ/DQS stage attempts to align DQ and DQS signals on reads
2274 rw_mgr_mem_calibrate_dq_dqs_centering(const u32 rw_group
, const u32 test_bgn
,
2275 const int use_read_test
,
2276 const int update_fom
)
2279 int ret
, grp_calibrated
;
2283 * Altera EMI_RM 2015.05.04 :: Figure 1-28
2284 * Read per-bit deskew can be done on a per shadow register basis.
2287 for (rank_bgn
= 0, sr
= 0;
2288 rank_bgn
< RW_MGR_MEM_NUMBER_OF_RANKS
;
2289 rank_bgn
+= NUM_RANKS_PER_SHADOW_REG
, sr
++) {
2290 /* Check if this set of ranks should be skipped entirely. */
2291 if (param
->skip_shadow_regs
[sr
])
2294 ret
= rw_mgr_mem_calibrate_vfifo_center(rank_bgn
, rw_group
,
2304 if (!grp_calibrated
)
2311 * rw_mgr_mem_calibrate_vfifo() - Calibrate the read valid prediction FIFO
2312 * @rw_group: Read/Write Group
2313 * @test_bgn: Rank at which the test begins
2315 * Stage 1: Calibrate the read valid prediction FIFO.
2317 * This function implements UniPHY calibration Stage 1, as explained in
2318 * detail in Altera EMI_RM 2015.05.04 , "UniPHY Calibration Stages".
2320 * - read valid prediction will consist of finding:
2321 * - DQS enable phase and DQS enable delay (DQS Enable Calibration)
2322 * - DQS input phase and DQS input delay (DQ/DQS Centering)
2323 * - we also do a per-bit deskew on the DQ lines.
2325 static int rw_mgr_mem_calibrate_vfifo(const u32 rw_group
, const u32 test_bgn
)
2328 uint32_t dtaps_per_ptap
;
2329 uint32_t failed_substage
;
2333 debug("%s:%d: %u %u\n", __func__
, __LINE__
, rw_group
, test_bgn
);
2335 /* Update info for sims */
2336 reg_file_set_group(rw_group
);
2337 reg_file_set_stage(CAL_STAGE_VFIFO
);
2338 reg_file_set_sub_stage(CAL_SUBSTAGE_GUARANTEED_READ
);
2340 failed_substage
= CAL_SUBSTAGE_GUARANTEED_READ
;
2342 /* USER Determine number of delay taps for each phase tap. */
2343 dtaps_per_ptap
= DIV_ROUND_UP(IO_DELAY_PER_OPA_TAP
,
2344 IO_DELAY_PER_DQS_EN_DCHAIN_TAP
) - 1;
2346 for (d
= 0; d
<= dtaps_per_ptap
; d
+= 2) {
2348 * In RLDRAMX we may be messing the delay of pins in
2349 * the same write rw_group but outside of the current read
2350 * the rw_group, but that's ok because we haven't calibrated
2354 scc_mgr_apply_group_all_out_delay_add_all_ranks(
2358 for (p
= 0; p
<= IO_DQDQS_OUT_PHASE_MAX
; p
++) {
2359 /* 1) Guaranteed Write */
2360 ret
= rw_mgr_mem_calibrate_guaranteed_write(rw_group
, p
);
2364 /* 2) DQS Enable Calibration */
2365 ret
= rw_mgr_mem_calibrate_dqs_enable_calibration(rw_group
,
2368 failed_substage
= CAL_SUBSTAGE_DQS_EN_PHASE
;
2372 /* 3) Centering DQ/DQS */
2374 * If doing read after write calibration, do not update
2375 * FOM now. Do it then.
2377 ret
= rw_mgr_mem_calibrate_dq_dqs_centering(rw_group
,
2380 failed_substage
= CAL_SUBSTAGE_VFIFO_CENTER
;
2389 /* Calibration Stage 1 failed. */
2390 set_failing_group_stage(rw_group
, CAL_STAGE_VFIFO
, failed_substage
);
2393 /* Calibration Stage 1 completed OK. */
2396 * Reset the delay chains back to zero if they have moved > 1
2397 * (check for > 1 because loop will increase d even when pass in
2401 scc_mgr_zero_group(rw_group
, 1);
2406 /* VFIFO Calibration -- Read Deskew Calibration after write deskew */
2407 static uint32_t rw_mgr_mem_calibrate_vfifo_end(uint32_t read_group
,
2410 uint32_t rank_bgn
, sr
;
2411 uint32_t grp_calibrated
;
2412 uint32_t write_group
;
2414 debug("%s:%d %u %u", __func__
, __LINE__
, read_group
, test_bgn
);
2416 /* update info for sims */
2418 reg_file_set_stage(CAL_STAGE_VFIFO_AFTER_WRITES
);
2419 reg_file_set_sub_stage(CAL_SUBSTAGE_VFIFO_CENTER
);
2421 write_group
= read_group
;
2423 /* update info for sims */
2424 reg_file_set_group(read_group
);
2427 /* Read per-bit deskew can be done on a per shadow register basis */
2428 for (rank_bgn
= 0, sr
= 0; rank_bgn
< RW_MGR_MEM_NUMBER_OF_RANKS
;
2429 rank_bgn
+= NUM_RANKS_PER_SHADOW_REG
, ++sr
) {
2430 /* Determine if this set of ranks should be skipped entirely */
2431 if (!param
->skip_shadow_regs
[sr
]) {
2432 /* This is the last calibration round, update FOM here */
2433 if (!rw_mgr_mem_calibrate_vfifo_center(rank_bgn
,
2444 if (grp_calibrated
== 0) {
2445 set_failing_group_stage(write_group
,
2446 CAL_STAGE_VFIFO_AFTER_WRITES
,
2447 CAL_SUBSTAGE_VFIFO_CENTER
);
2454 /* Calibrate LFIFO to find smallest read latency */
2455 static uint32_t rw_mgr_mem_calibrate_lfifo(void)
2460 debug("%s:%d\n", __func__
, __LINE__
);
2462 /* update info for sims */
2463 reg_file_set_stage(CAL_STAGE_LFIFO
);
2464 reg_file_set_sub_stage(CAL_SUBSTAGE_READ_LATENCY
);
2466 /* Load up the patterns used by read calibration for all ranks */
2467 rw_mgr_mem_calibrate_read_load_patterns(0, 1);
2471 writel(gbl
->curr_read_lat
, &phy_mgr_cfg
->phy_rlat
);
2472 debug_cond(DLEVEL
== 2, "%s:%d lfifo: read_lat=%u",
2473 __func__
, __LINE__
, gbl
->curr_read_lat
);
2475 if (!rw_mgr_mem_calibrate_read_test_all_ranks(0,
2483 /* reduce read latency and see if things are working */
2485 gbl
->curr_read_lat
--;
2486 } while (gbl
->curr_read_lat
> 0);
2488 /* reset the fifos to get pointers to known state */
2490 writel(0, &phy_mgr_cmd
->fifo_reset
);
2493 /* add a fudge factor to the read latency that was determined */
2494 gbl
->curr_read_lat
+= 2;
2495 writel(gbl
->curr_read_lat
, &phy_mgr_cfg
->phy_rlat
);
2496 debug_cond(DLEVEL
== 2, "%s:%d lfifo: success: using \
2497 read_lat=%u\n", __func__
, __LINE__
,
2498 gbl
->curr_read_lat
);
2501 set_failing_group_stage(0xff, CAL_STAGE_LFIFO
,
2502 CAL_SUBSTAGE_READ_LATENCY
);
2504 debug_cond(DLEVEL
== 2, "%s:%d lfifo: failed at initial \
2505 read_lat=%u\n", __func__
, __LINE__
,
2506 gbl
->curr_read_lat
);
2512 * issue write test command.
2513 * two variants are provided. one that just tests a write pattern and
2514 * another that tests datamask functionality.
2516 static void rw_mgr_mem_calibrate_write_test_issue(uint32_t group
,
2519 uint32_t mcc_instruction
;
2520 uint32_t quick_write_mode
= (((STATIC_CALIB_STEPS
) & CALIB_SKIP_WRITES
) &&
2521 ENABLE_SUPER_QUICK_CALIBRATION
);
2522 uint32_t rw_wl_nop_cycles
;
2526 * Set counter and jump addresses for the right
2527 * number of NOP cycles.
2528 * The number of supported NOP cycles can range from -1 to infinity
2529 * Three different cases are handled:
2531 * 1. For a number of NOP cycles greater than 0, the RW Mgr looping
2532 * mechanism will be used to insert the right number of NOPs
2534 * 2. For a number of NOP cycles equals to 0, the micro-instruction
2535 * issuing the write command will jump straight to the
2536 * micro-instruction that turns on DQS (for DDRx), or outputs write
2537 * data (for RLD), skipping
2538 * the NOP micro-instruction all together
2540 * 3. A number of NOP cycles equal to -1 indicates that DQS must be
2541 * turned on in the same micro-instruction that issues the write
2542 * command. Then we need
2543 * to directly jump to the micro-instruction that sends out the data
2545 * NOTE: Implementing this mechanism uses 2 RW Mgr jump-counters
2546 * (2 and 3). One jump-counter (0) is used to perform multiple
2547 * write-read operations.
2548 * one counter left to issue this command in "multiple-group" mode
2551 rw_wl_nop_cycles
= gbl
->rw_wl_nop_cycles
;
2553 if (rw_wl_nop_cycles
== -1) {
2555 * CNTR 2 - We want to execute the special write operation that
2556 * turns on DQS right away and then skip directly to the
2557 * instruction that sends out the data. We set the counter to a
2558 * large number so that the jump is always taken.
2560 writel(0xFF, &sdr_rw_load_mgr_regs
->load_cntr2
);
2562 /* CNTR 3 - Not used */
2564 mcc_instruction
= RW_MGR_LFSR_WR_RD_DM_BANK_0_WL_1
;
2565 writel(RW_MGR_LFSR_WR_RD_DM_BANK_0_DATA
,
2566 &sdr_rw_load_jump_mgr_regs
->load_jump_add2
);
2567 writel(RW_MGR_LFSR_WR_RD_DM_BANK_0_NOP
,
2568 &sdr_rw_load_jump_mgr_regs
->load_jump_add3
);
2570 mcc_instruction
= RW_MGR_LFSR_WR_RD_BANK_0_WL_1
;
2571 writel(RW_MGR_LFSR_WR_RD_BANK_0_DATA
,
2572 &sdr_rw_load_jump_mgr_regs
->load_jump_add2
);
2573 writel(RW_MGR_LFSR_WR_RD_BANK_0_NOP
,
2574 &sdr_rw_load_jump_mgr_regs
->load_jump_add3
);
2576 } else if (rw_wl_nop_cycles
== 0) {
2578 * CNTR 2 - We want to skip the NOP operation and go straight
2579 * to the DQS enable instruction. We set the counter to a large
2580 * number so that the jump is always taken.
2582 writel(0xFF, &sdr_rw_load_mgr_regs
->load_cntr2
);
2584 /* CNTR 3 - Not used */
2586 mcc_instruction
= RW_MGR_LFSR_WR_RD_DM_BANK_0
;
2587 writel(RW_MGR_LFSR_WR_RD_DM_BANK_0_DQS
,
2588 &sdr_rw_load_jump_mgr_regs
->load_jump_add2
);
2590 mcc_instruction
= RW_MGR_LFSR_WR_RD_BANK_0
;
2591 writel(RW_MGR_LFSR_WR_RD_BANK_0_DQS
,
2592 &sdr_rw_load_jump_mgr_regs
->load_jump_add2
);
2596 * CNTR 2 - In this case we want to execute the next instruction
2597 * and NOT take the jump. So we set the counter to 0. The jump
2598 * address doesn't count.
2600 writel(0x0, &sdr_rw_load_mgr_regs
->load_cntr2
);
2601 writel(0x0, &sdr_rw_load_jump_mgr_regs
->load_jump_add2
);
2604 * CNTR 3 - Set the nop counter to the number of cycles we
2605 * need to loop for, minus 1.
2607 writel(rw_wl_nop_cycles
- 1, &sdr_rw_load_mgr_regs
->load_cntr3
);
2609 mcc_instruction
= RW_MGR_LFSR_WR_RD_DM_BANK_0
;
2610 writel(RW_MGR_LFSR_WR_RD_DM_BANK_0_NOP
,
2611 &sdr_rw_load_jump_mgr_regs
->load_jump_add3
);
2613 mcc_instruction
= RW_MGR_LFSR_WR_RD_BANK_0
;
2614 writel(RW_MGR_LFSR_WR_RD_BANK_0_NOP
,
2615 &sdr_rw_load_jump_mgr_regs
->load_jump_add3
);
2619 writel(0, SDR_PHYGRP_RWMGRGRP_ADDRESS
|
2620 RW_MGR_RESET_READ_DATAPATH_OFFSET
);
2622 if (quick_write_mode
)
2623 writel(0x08, &sdr_rw_load_mgr_regs
->load_cntr0
);
2625 writel(0x40, &sdr_rw_load_mgr_regs
->load_cntr0
);
2627 writel(mcc_instruction
, &sdr_rw_load_jump_mgr_regs
->load_jump_add0
);
2630 * CNTR 1 - This is used to ensure enough time elapses
2631 * for read data to come back.
2633 writel(0x30, &sdr_rw_load_mgr_regs
->load_cntr1
);
2636 writel(RW_MGR_LFSR_WR_RD_DM_BANK_0_WAIT
,
2637 &sdr_rw_load_jump_mgr_regs
->load_jump_add1
);
2639 writel(RW_MGR_LFSR_WR_RD_BANK_0_WAIT
,
2640 &sdr_rw_load_jump_mgr_regs
->load_jump_add1
);
2643 addr
= SDR_PHYGRP_RWMGRGRP_ADDRESS
| RW_MGR_RUN_SINGLE_GROUP_OFFSET
;
2644 writel(mcc_instruction
, addr
+ (group
<< 2));
2647 /* Test writes, can check for a single bit pass or multiple bit pass */
2648 static uint32_t rw_mgr_mem_calibrate_write_test(uint32_t rank_bgn
,
2649 uint32_t write_group
, uint32_t use_dm
, uint32_t all_correct
,
2650 uint32_t *bit_chk
, uint32_t all_ranks
)
2653 uint32_t correct_mask_vg
;
2654 uint32_t tmp_bit_chk
;
2656 uint32_t rank_end
= all_ranks
? RW_MGR_MEM_NUMBER_OF_RANKS
:
2657 (rank_bgn
+ NUM_RANKS_PER_SHADOW_REG
);
2658 uint32_t addr_rw_mgr
;
2659 uint32_t base_rw_mgr
;
2661 *bit_chk
= param
->write_correct_mask
;
2662 correct_mask_vg
= param
->write_correct_mask_vg
;
2664 for (r
= rank_bgn
; r
< rank_end
; r
++) {
2665 if (param
->skip_ranks
[r
]) {
2666 /* request to skip the rank */
2671 set_rank_and_odt_mask(r
, RW_MGR_ODT_MODE_READ_WRITE
);
2674 addr_rw_mgr
= SDR_PHYGRP_RWMGRGRP_ADDRESS
;
2675 for (vg
= RW_MGR_MEM_VIRTUAL_GROUPS_PER_WRITE_DQS
-1; ; vg
--) {
2676 /* reset the fifos to get pointers to known state */
2677 writel(0, &phy_mgr_cmd
->fifo_reset
);
2679 tmp_bit_chk
= tmp_bit_chk
<<
2680 (RW_MGR_MEM_DQ_PER_WRITE_DQS
/
2681 RW_MGR_MEM_VIRTUAL_GROUPS_PER_WRITE_DQS
);
2682 rw_mgr_mem_calibrate_write_test_issue(write_group
*
2683 RW_MGR_MEM_VIRTUAL_GROUPS_PER_WRITE_DQS
+vg
,
2686 base_rw_mgr
= readl(addr_rw_mgr
);
2687 tmp_bit_chk
= tmp_bit_chk
| (correct_mask_vg
& ~(base_rw_mgr
));
2691 *bit_chk
&= tmp_bit_chk
;
2695 set_rank_and_odt_mask(0, RW_MGR_ODT_MODE_OFF
);
2696 debug_cond(DLEVEL
== 2, "write_test(%u,%u,ALL) : %u == \
2697 %u => %lu", write_group
, use_dm
,
2698 *bit_chk
, param
->write_correct_mask
,
2699 (long unsigned int)(*bit_chk
==
2700 param
->write_correct_mask
));
2701 return *bit_chk
== param
->write_correct_mask
;
2703 set_rank_and_odt_mask(0, RW_MGR_ODT_MODE_OFF
);
2704 debug_cond(DLEVEL
== 2, "write_test(%u,%u,ONE) : %u != ",
2705 write_group
, use_dm
, *bit_chk
);
2706 debug_cond(DLEVEL
== 2, "%lu" " => %lu", (long unsigned int)0,
2707 (long unsigned int)(*bit_chk
!= 0));
2708 return *bit_chk
!= 0x00;
2713 * center all windows. do per-bit-deskew to possibly increase size of
2716 static uint32_t rw_mgr_mem_calibrate_writes_center(uint32_t rank_bgn
,
2717 uint32_t write_group
, uint32_t test_bgn
)
2719 uint32_t i
, p
, min_index
;
2722 * Store these as signed since there are comparisons with
2726 uint32_t sticky_bit_chk
;
2727 int32_t left_edge
[RW_MGR_MEM_DQ_PER_WRITE_DQS
];
2728 int32_t right_edge
[RW_MGR_MEM_DQ_PER_WRITE_DQS
];
2730 int32_t mid_min
, orig_mid_min
;
2731 int32_t new_dqs
, start_dqs
, shift_dq
;
2732 int32_t dq_margin
, dqs_margin
, dm_margin
;
2734 uint32_t temp_dq_out1_delay
;
2737 debug("%s:%d %u %u", __func__
, __LINE__
, write_group
, test_bgn
);
2741 addr
= SDR_PHYGRP_SCCGRP_ADDRESS
| SCC_MGR_IO_OUT1_DELAY_OFFSET
;
2742 start_dqs
= readl(addr
+
2743 (RW_MGR_MEM_DQ_PER_WRITE_DQS
<< 2));
2745 /* per-bit deskew */
2748 * set the left and right edge of each bit to an illegal value
2749 * use (IO_IO_OUT1_DELAY_MAX + 1) as an illegal value.
2752 for (i
= 0; i
< RW_MGR_MEM_DQ_PER_WRITE_DQS
; i
++) {
2753 left_edge
[i
] = IO_IO_OUT1_DELAY_MAX
+ 1;
2754 right_edge
[i
] = IO_IO_OUT1_DELAY_MAX
+ 1;
2757 /* Search for the left edge of the window for each bit */
2758 for (d
= 0; d
<= IO_IO_OUT1_DELAY_MAX
; d
++) {
2759 scc_mgr_apply_group_dq_out1_delay(write_group
, d
);
2761 writel(0, &sdr_scc_mgr
->update
);
2764 * Stop searching when the read test doesn't pass AND when
2765 * we've seen a passing read on every bit.
2767 stop
= !rw_mgr_mem_calibrate_write_test(rank_bgn
, write_group
,
2768 0, PASS_ONE_BIT
, &bit_chk
, 0);
2769 sticky_bit_chk
= sticky_bit_chk
| bit_chk
;
2770 stop
= stop
&& (sticky_bit_chk
== param
->write_correct_mask
);
2771 debug_cond(DLEVEL
== 2, "write_center(left): dtap=%d => %u \
2772 == %u && %u [bit_chk= %u ]\n",
2773 d
, sticky_bit_chk
, param
->write_correct_mask
,
2779 for (i
= 0; i
< RW_MGR_MEM_DQ_PER_WRITE_DQS
; i
++) {
2782 * Remember a passing test as the
2788 * If a left edge has not been seen
2789 * yet, then a future passing test will
2790 * mark this edge as the right edge.
2793 IO_IO_OUT1_DELAY_MAX
+ 1) {
2794 right_edge
[i
] = -(d
+ 1);
2797 debug_cond(DLEVEL
== 2, "write_center[l,d=%d):", d
);
2798 debug_cond(DLEVEL
== 2, "bit_chk_test=%d left_edge[%u]: %d",
2799 (int)(bit_chk
& 1), i
, left_edge
[i
]);
2800 debug_cond(DLEVEL
== 2, "right_edge[%u]: %d\n", i
,
2802 bit_chk
= bit_chk
>> 1;
2807 /* Reset DQ delay chains to 0 */
2808 scc_mgr_apply_group_dq_out1_delay(0);
2810 for (i
= RW_MGR_MEM_DQ_PER_WRITE_DQS
- 1;; i
--) {
2811 debug_cond(DLEVEL
== 2, "%s:%d write_center: left_edge[%u]: \
2812 %d right_edge[%u]: %d\n", __func__
, __LINE__
,
2813 i
, left_edge
[i
], i
, right_edge
[i
]);
2816 * Check for cases where we haven't found the left edge,
2817 * which makes our assignment of the the right edge invalid.
2818 * Reset it to the illegal value.
2820 if ((left_edge
[i
] == IO_IO_OUT1_DELAY_MAX
+ 1) &&
2821 (right_edge
[i
] != IO_IO_OUT1_DELAY_MAX
+ 1)) {
2822 right_edge
[i
] = IO_IO_OUT1_DELAY_MAX
+ 1;
2823 debug_cond(DLEVEL
== 2, "%s:%d write_center: reset \
2824 right_edge[%u]: %d\n", __func__
, __LINE__
,
2829 * Reset sticky bit (except for bits where we have
2830 * seen the left edge).
2832 sticky_bit_chk
= sticky_bit_chk
<< 1;
2833 if ((left_edge
[i
] != IO_IO_OUT1_DELAY_MAX
+ 1))
2834 sticky_bit_chk
= sticky_bit_chk
| 1;
2840 /* Search for the right edge of the window for each bit */
2841 for (d
= 0; d
<= IO_IO_OUT1_DELAY_MAX
- start_dqs
; d
++) {
2842 scc_mgr_apply_group_dqs_io_and_oct_out1(write_group
,
2845 writel(0, &sdr_scc_mgr
->update
);
2848 * Stop searching when the read test doesn't pass AND when
2849 * we've seen a passing read on every bit.
2851 stop
= !rw_mgr_mem_calibrate_write_test(rank_bgn
, write_group
,
2852 0, PASS_ONE_BIT
, &bit_chk
, 0);
2854 sticky_bit_chk
= sticky_bit_chk
| bit_chk
;
2855 stop
= stop
&& (sticky_bit_chk
== param
->write_correct_mask
);
2857 debug_cond(DLEVEL
== 2, "write_center (right): dtap=%u => %u == \
2858 %u && %u\n", d
, sticky_bit_chk
,
2859 param
->write_correct_mask
, stop
);
2863 for (i
= 0; i
< RW_MGR_MEM_DQ_PER_WRITE_DQS
;
2865 /* d = 0 failed, but it passed when
2866 testing the left edge, so it must be
2867 marginal, set it to -1 */
2868 if (right_edge
[i
] ==
2869 IO_IO_OUT1_DELAY_MAX
+ 1 &&
2871 IO_IO_OUT1_DELAY_MAX
+ 1) {
2878 for (i
= 0; i
< RW_MGR_MEM_DQ_PER_WRITE_DQS
; i
++) {
2881 * Remember a passing test as
2888 * If a right edge has not
2889 * been seen yet, then a future
2890 * passing test will mark this
2891 * edge as the left edge.
2893 if (right_edge
[i
] ==
2894 IO_IO_OUT1_DELAY_MAX
+ 1)
2895 left_edge
[i
] = -(d
+ 1);
2898 * d = 0 failed, but it passed
2899 * when testing the left edge,
2900 * so it must be marginal, set
2903 if (right_edge
[i
] ==
2904 IO_IO_OUT1_DELAY_MAX
+ 1 &&
2906 IO_IO_OUT1_DELAY_MAX
+ 1)
2909 * If a right edge has not been
2910 * seen yet, then a future
2911 * passing test will mark this
2912 * edge as the left edge.
2914 else if (right_edge
[i
] ==
2915 IO_IO_OUT1_DELAY_MAX
+
2917 left_edge
[i
] = -(d
+ 1);
2920 debug_cond(DLEVEL
== 2, "write_center[r,d=%d):", d
);
2921 debug_cond(DLEVEL
== 2, "bit_chk_test=%d left_edge[%u]: %d",
2922 (int)(bit_chk
& 1), i
, left_edge
[i
]);
2923 debug_cond(DLEVEL
== 2, "right_edge[%u]: %d\n", i
,
2925 bit_chk
= bit_chk
>> 1;
2930 /* Check that all bits have a window */
2931 for (i
= 0; i
< RW_MGR_MEM_DQ_PER_WRITE_DQS
; i
++) {
2932 debug_cond(DLEVEL
== 2, "%s:%d write_center: left_edge[%u]: \
2933 %d right_edge[%u]: %d", __func__
, __LINE__
,
2934 i
, left_edge
[i
], i
, right_edge
[i
]);
2935 if ((left_edge
[i
] == IO_IO_OUT1_DELAY_MAX
+ 1) ||
2936 (right_edge
[i
] == IO_IO_OUT1_DELAY_MAX
+ 1)) {
2937 set_failing_group_stage(test_bgn
+ i
,
2939 CAL_SUBSTAGE_WRITES_CENTER
);
2944 /* Find middle of window for each DQ bit */
2945 mid_min
= left_edge
[0] - right_edge
[0];
2947 for (i
= 1; i
< RW_MGR_MEM_DQ_PER_WRITE_DQS
; i
++) {
2948 mid
= left_edge
[i
] - right_edge
[i
];
2949 if (mid
< mid_min
) {
2956 * -mid_min/2 represents the amount that we need to move DQS.
2957 * If mid_min is odd and positive we'll need to add one to
2958 * make sure the rounding in further calculations is correct
2959 * (always bias to the right), so just add 1 for all positive values.
2963 mid_min
= mid_min
/ 2;
2964 debug_cond(DLEVEL
== 1, "%s:%d write_center: mid_min=%d\n", __func__
,
2967 /* Determine the amount we can change DQS (which is -mid_min) */
2968 orig_mid_min
= mid_min
;
2969 new_dqs
= start_dqs
;
2971 debug_cond(DLEVEL
== 1, "%s:%d write_center: start_dqs=%d new_dqs=%d \
2972 mid_min=%d\n", __func__
, __LINE__
, start_dqs
, new_dqs
, mid_min
);
2973 /* Initialize data for export structures */
2974 dqs_margin
= IO_IO_OUT1_DELAY_MAX
+ 1;
2975 dq_margin
= IO_IO_OUT1_DELAY_MAX
+ 1;
2977 /* add delay to bring centre of all DQ windows to the same "level" */
2978 for (i
= 0, p
= test_bgn
; i
< RW_MGR_MEM_DQ_PER_WRITE_DQS
; i
++, p
++) {
2979 /* Use values before divide by 2 to reduce round off error */
2980 shift_dq
= (left_edge
[i
] - right_edge
[i
] -
2981 (left_edge
[min_index
] - right_edge
[min_index
]))/2 +
2982 (orig_mid_min
- mid_min
);
2984 debug_cond(DLEVEL
== 2, "%s:%d write_center: before: shift_dq \
2985 [%u]=%d\n", __func__
, __LINE__
, i
, shift_dq
);
2987 addr
= SDR_PHYGRP_SCCGRP_ADDRESS
| SCC_MGR_IO_OUT1_DELAY_OFFSET
;
2988 temp_dq_out1_delay
= readl(addr
+ (i
<< 2));
2989 if (shift_dq
+ (int32_t)temp_dq_out1_delay
>
2990 (int32_t)IO_IO_OUT1_DELAY_MAX
) {
2991 shift_dq
= (int32_t)IO_IO_OUT1_DELAY_MAX
- temp_dq_out1_delay
;
2992 } else if (shift_dq
+ (int32_t)temp_dq_out1_delay
< 0) {
2993 shift_dq
= -(int32_t)temp_dq_out1_delay
;
2995 debug_cond(DLEVEL
== 2, "write_center: after: shift_dq[%u]=%d\n",
2997 scc_mgr_set_dq_out1_delay(i
, temp_dq_out1_delay
+ shift_dq
);
3000 debug_cond(DLEVEL
== 2, "write_center: margin[%u]=[%d,%d]\n", i
,
3001 left_edge
[i
] - shift_dq
+ (-mid_min
),
3002 right_edge
[i
] + shift_dq
- (-mid_min
));
3003 /* To determine values for export structures */
3004 if (left_edge
[i
] - shift_dq
+ (-mid_min
) < dq_margin
)
3005 dq_margin
= left_edge
[i
] - shift_dq
+ (-mid_min
);
3007 if (right_edge
[i
] + shift_dq
- (-mid_min
) < dqs_margin
)
3008 dqs_margin
= right_edge
[i
] + shift_dq
- (-mid_min
);
3012 scc_mgr_apply_group_dqs_io_and_oct_out1(write_group
, new_dqs
);
3013 writel(0, &sdr_scc_mgr
->update
);
3016 debug_cond(DLEVEL
== 2, "%s:%d write_center: DM\n", __func__
, __LINE__
);
3019 * set the left and right edge of each bit to an illegal value,
3020 * use (IO_IO_OUT1_DELAY_MAX + 1) as an illegal value,
3022 left_edge
[0] = IO_IO_OUT1_DELAY_MAX
+ 1;
3023 right_edge
[0] = IO_IO_OUT1_DELAY_MAX
+ 1;
3024 int32_t bgn_curr
= IO_IO_OUT1_DELAY_MAX
+ 1;
3025 int32_t end_curr
= IO_IO_OUT1_DELAY_MAX
+ 1;
3026 int32_t bgn_best
= IO_IO_OUT1_DELAY_MAX
+ 1;
3027 int32_t end_best
= IO_IO_OUT1_DELAY_MAX
+ 1;
3028 int32_t win_best
= 0;
3030 /* Search for the/part of the window with DM shift */
3031 for (d
= IO_IO_OUT1_DELAY_MAX
; d
>= 0; d
-= DELTA_D
) {
3032 scc_mgr_apply_group_dm_out1_delay(d
);
3033 writel(0, &sdr_scc_mgr
->update
);
3035 if (rw_mgr_mem_calibrate_write_test(rank_bgn
, write_group
, 1,
3036 PASS_ALL_BITS
, &bit_chk
,
3038 /* USE Set current end of the window */
3041 * If a starting edge of our window has not been seen
3042 * this is our current start of the DM window.
3044 if (bgn_curr
== IO_IO_OUT1_DELAY_MAX
+ 1)
3048 * If current window is bigger than best seen.
3049 * Set best seen to be current window.
3051 if ((end_curr
-bgn_curr
+1) > win_best
) {
3052 win_best
= end_curr
-bgn_curr
+1;
3053 bgn_best
= bgn_curr
;
3054 end_best
= end_curr
;
3057 /* We just saw a failing test. Reset temp edge */
3058 bgn_curr
= IO_IO_OUT1_DELAY_MAX
+ 1;
3059 end_curr
= IO_IO_OUT1_DELAY_MAX
+ 1;
3064 /* Reset DM delay chains to 0 */
3065 scc_mgr_apply_group_dm_out1_delay(0);
3068 * Check to see if the current window nudges up aganist 0 delay.
3069 * If so we need to continue the search by shifting DQS otherwise DQS
3070 * search begins as a new search. */
3071 if (end_curr
!= 0) {
3072 bgn_curr
= IO_IO_OUT1_DELAY_MAX
+ 1;
3073 end_curr
= IO_IO_OUT1_DELAY_MAX
+ 1;
3076 /* Search for the/part of the window with DQS shifts */
3077 for (d
= 0; d
<= IO_IO_OUT1_DELAY_MAX
- new_dqs
; d
+= DELTA_D
) {
3079 * Note: This only shifts DQS, so are we limiting ourselve to
3080 * width of DQ unnecessarily.
3082 scc_mgr_apply_group_dqs_io_and_oct_out1(write_group
,
3085 writel(0, &sdr_scc_mgr
->update
);
3086 if (rw_mgr_mem_calibrate_write_test(rank_bgn
, write_group
, 1,
3087 PASS_ALL_BITS
, &bit_chk
,
3089 /* USE Set current end of the window */
3092 * If a beginning edge of our window has not been seen
3093 * this is our current begin of the DM window.
3095 if (bgn_curr
== IO_IO_OUT1_DELAY_MAX
+ 1)
3099 * If current window is bigger than best seen. Set best
3100 * seen to be current window.
3102 if ((end_curr
-bgn_curr
+1) > win_best
) {
3103 win_best
= end_curr
-bgn_curr
+1;
3104 bgn_best
= bgn_curr
;
3105 end_best
= end_curr
;
3108 /* We just saw a failing test. Reset temp edge */
3109 bgn_curr
= IO_IO_OUT1_DELAY_MAX
+ 1;
3110 end_curr
= IO_IO_OUT1_DELAY_MAX
+ 1;
3112 /* Early exit optimization: if ther remaining delay
3113 chain space is less than already seen largest window
3116 (IO_IO_OUT1_DELAY_MAX
- new_dqs
- d
)) {
3122 /* assign left and right edge for cal and reporting; */
3123 left_edge
[0] = -1*bgn_best
;
3124 right_edge
[0] = end_best
;
3126 debug_cond(DLEVEL
== 2, "%s:%d dm_calib: left=%d right=%d\n", __func__
,
3127 __LINE__
, left_edge
[0], right_edge
[0]);
3129 /* Move DQS (back to orig) */
3130 scc_mgr_apply_group_dqs_io_and_oct_out1(write_group
, new_dqs
);
3134 /* Find middle of window for the DM bit */
3135 mid
= (left_edge
[0] - right_edge
[0]) / 2;
3137 /* only move right, since we are not moving DQS/DQ */
3141 /* dm_marign should fail if we never find a window */
3145 dm_margin
= left_edge
[0] - mid
;
3147 scc_mgr_apply_group_dm_out1_delay(mid
);
3148 writel(0, &sdr_scc_mgr
->update
);
3150 debug_cond(DLEVEL
== 2, "%s:%d dm_calib: left=%d right=%d mid=%d \
3151 dm_margin=%d\n", __func__
, __LINE__
, left_edge
[0],
3152 right_edge
[0], mid
, dm_margin
);
3154 gbl
->fom_out
+= dq_margin
+ dqs_margin
;
3156 debug_cond(DLEVEL
== 2, "%s:%d write_center: dq_margin=%d \
3157 dqs_margin=%d dm_margin=%d\n", __func__
, __LINE__
,
3158 dq_margin
, dqs_margin
, dm_margin
);
3161 * Do not remove this line as it makes sure all of our
3162 * decisions have been applied.
3164 writel(0, &sdr_scc_mgr
->update
);
3165 return (dq_margin
>= 0) && (dqs_margin
>= 0) && (dm_margin
>= 0);
3168 /* calibrate the write operations */
3169 static uint32_t rw_mgr_mem_calibrate_writes(uint32_t rank_bgn
, uint32_t g
,
3172 /* update info for sims */
3173 debug("%s:%d %u %u\n", __func__
, __LINE__
, g
, test_bgn
);
3175 reg_file_set_stage(CAL_STAGE_WRITES
);
3176 reg_file_set_sub_stage(CAL_SUBSTAGE_WRITES_CENTER
);
3178 reg_file_set_group(g
);
3180 if (!rw_mgr_mem_calibrate_writes_center(rank_bgn
, g
, test_bgn
)) {
3181 set_failing_group_stage(g
, CAL_STAGE_WRITES
,
3182 CAL_SUBSTAGE_WRITES_CENTER
);
3190 * mem_precharge_and_activate() - Precharge all banks and activate
3192 * Precharge all banks and activate row 0 in bank "000..." and bank "111...".
3194 static void mem_precharge_and_activate(void)
3198 for (r
= 0; r
< RW_MGR_MEM_NUMBER_OF_RANKS
; r
++) {
3199 /* Test if the rank should be skipped. */
3200 if (param
->skip_ranks
[r
])
3204 set_rank_and_odt_mask(r
, RW_MGR_ODT_MODE_OFF
);
3206 /* Precharge all banks. */
3207 writel(RW_MGR_PRECHARGE_ALL
, SDR_PHYGRP_RWMGRGRP_ADDRESS
|
3208 RW_MGR_RUN_SINGLE_GROUP_OFFSET
);
3210 writel(0x0F, &sdr_rw_load_mgr_regs
->load_cntr0
);
3211 writel(RW_MGR_ACTIVATE_0_AND_1_WAIT1
,
3212 &sdr_rw_load_jump_mgr_regs
->load_jump_add0
);
3214 writel(0x0F, &sdr_rw_load_mgr_regs
->load_cntr1
);
3215 writel(RW_MGR_ACTIVATE_0_AND_1_WAIT2
,
3216 &sdr_rw_load_jump_mgr_regs
->load_jump_add1
);
3218 /* Activate rows. */
3219 writel(RW_MGR_ACTIVATE_0_AND_1
, SDR_PHYGRP_RWMGRGRP_ADDRESS
|
3220 RW_MGR_RUN_SINGLE_GROUP_OFFSET
);
3225 * mem_init_latency() - Configure memory RLAT and WLAT settings
3227 * Configure memory RLAT and WLAT parameters.
3229 static void mem_init_latency(void)
3232 * For AV/CV, LFIFO is hardened and always runs at full rate
3233 * so max latency in AFI clocks, used here, is correspondingly
3236 const u32 max_latency
= (1 << MAX_LATENCY_COUNT_WIDTH
) - 1;
3239 debug("%s:%d\n", __func__
, __LINE__
);
3242 * Read in write latency.
3243 * WL for Hard PHY does not include additive latency.
3245 wlat
= readl(&data_mgr
->t_wl_add
);
3246 wlat
+= readl(&data_mgr
->mem_t_add
);
3248 gbl
->rw_wl_nop_cycles
= wlat
- 1;
3250 /* Read in readl latency. */
3251 rlat
= readl(&data_mgr
->t_rl_add
);
3253 /* Set a pretty high read latency initially. */
3254 gbl
->curr_read_lat
= rlat
+ 16;
3255 if (gbl
->curr_read_lat
> max_latency
)
3256 gbl
->curr_read_lat
= max_latency
;
3258 writel(gbl
->curr_read_lat
, &phy_mgr_cfg
->phy_rlat
);
3260 /* Advertise write latency. */
3261 writel(wlat
, &phy_mgr_cfg
->afi_wlat
);
3265 * @mem_skip_calibrate() - Set VFIFO and LFIFO to instant-on settings
3267 * Set VFIFO and LFIFO to instant-on settings in skip calibration mode.
3269 static void mem_skip_calibrate(void)
3271 uint32_t vfifo_offset
;
3274 debug("%s:%d\n", __func__
, __LINE__
);
3275 /* Need to update every shadow register set used by the interface */
3276 for (r
= 0; r
< RW_MGR_MEM_NUMBER_OF_RANKS
;
3277 r
+= NUM_RANKS_PER_SHADOW_REG
) {
3279 * Set output phase alignment settings appropriate for
3282 for (i
= 0; i
< RW_MGR_MEM_IF_READ_DQS_WIDTH
; i
++) {
3283 scc_mgr_set_dqs_en_phase(i
, 0);
3284 #if IO_DLL_CHAIN_LENGTH == 6
3285 scc_mgr_set_dqdqs_output_phase(i
, 6);
3287 scc_mgr_set_dqdqs_output_phase(i
, 7);
3292 * Write data arrives to the I/O two cycles before write
3293 * latency is reached (720 deg).
3294 * -> due to bit-slip in a/c bus
3295 * -> to allow board skew where dqs is longer than ck
3296 * -> how often can this happen!?
3297 * -> can claim back some ptaps for high freq
3298 * support if we can relax this, but i digress...
3300 * The write_clk leads mem_ck by 90 deg
3301 * The minimum ptap of the OPA is 180 deg
3302 * Each ptap has (360 / IO_DLL_CHAIN_LENGH) deg of delay
3303 * The write_clk is always delayed by 2 ptaps
3305 * Hence, to make DQS aligned to CK, we need to delay
3307 * (720 - 90 - 180 - 2 * (360 / IO_DLL_CHAIN_LENGTH))
3309 * Dividing the above by (360 / IO_DLL_CHAIN_LENGTH)
3310 * gives us the number of ptaps, which simplies to:
3312 * (1.25 * IO_DLL_CHAIN_LENGTH - 2)
3314 scc_mgr_set_dqdqs_output_phase(i
,
3315 1.25 * IO_DLL_CHAIN_LENGTH
- 2);
3317 writel(0xff, &sdr_scc_mgr
->dqs_ena
);
3318 writel(0xff, &sdr_scc_mgr
->dqs_io_ena
);
3320 for (i
= 0; i
< RW_MGR_MEM_IF_WRITE_DQS_WIDTH
; i
++) {
3321 writel(i
, SDR_PHYGRP_SCCGRP_ADDRESS
|
3322 SCC_MGR_GROUP_COUNTER_OFFSET
);
3324 writel(0xff, &sdr_scc_mgr
->dq_ena
);
3325 writel(0xff, &sdr_scc_mgr
->dm_ena
);
3326 writel(0, &sdr_scc_mgr
->update
);
3329 /* Compensate for simulation model behaviour */
3330 for (i
= 0; i
< RW_MGR_MEM_IF_READ_DQS_WIDTH
; i
++) {
3331 scc_mgr_set_dqs_bus_in_delay(i
, 10);
3332 scc_mgr_load_dqs(i
);
3334 writel(0, &sdr_scc_mgr
->update
);
3337 * ArriaV has hard FIFOs that can only be initialized by incrementing
3340 vfifo_offset
= CALIB_VFIFO_OFFSET
;
3341 for (j
= 0; j
< vfifo_offset
; j
++)
3342 writel(0xff, &phy_mgr_cmd
->inc_vfifo_hard_phy
);
3343 writel(0, &phy_mgr_cmd
->fifo_reset
);
3346 * For Arria V and Cyclone V with hard LFIFO, we get the skip-cal
3347 * setting from generation-time constant.
3349 gbl
->curr_read_lat
= CALIB_LFIFO_OFFSET
;
3350 writel(gbl
->curr_read_lat
, &phy_mgr_cfg
->phy_rlat
);
3354 * mem_calibrate() - Memory calibration entry point.
3356 * Perform memory calibration.
3358 static uint32_t mem_calibrate(void)
3361 uint32_t rank_bgn
, sr
;
3362 uint32_t write_group
, write_test_bgn
;
3363 uint32_t read_group
, read_test_bgn
;
3364 uint32_t run_groups
, current_run
;
3365 uint32_t failing_groups
= 0;
3366 uint32_t group_failed
= 0;
3368 const u32 rwdqs_ratio
= RW_MGR_MEM_IF_READ_DQS_WIDTH
/
3369 RW_MGR_MEM_IF_WRITE_DQS_WIDTH
;
3371 debug("%s:%d\n", __func__
, __LINE__
);
3373 /* Initialize the data settings */
3374 gbl
->error_substage
= CAL_SUBSTAGE_NIL
;
3375 gbl
->error_stage
= CAL_STAGE_NIL
;
3376 gbl
->error_group
= 0xff;
3380 /* Initialize WLAT and RLAT. */
3383 /* Initialize bit slips. */
3384 mem_precharge_and_activate();
3386 for (i
= 0; i
< RW_MGR_MEM_IF_READ_DQS_WIDTH
; i
++) {
3387 writel(i
, SDR_PHYGRP_SCCGRP_ADDRESS
|
3388 SCC_MGR_GROUP_COUNTER_OFFSET
);
3389 /* Only needed once to set all groups, pins, DQ, DQS, DM. */
3391 scc_mgr_set_hhp_extras();
3393 scc_set_bypass_mode(i
);
3396 /* Calibration is skipped. */
3397 if ((dyn_calib_steps
& CALIB_SKIP_ALL
) == CALIB_SKIP_ALL
) {
3399 * Set VFIFO and LFIFO to instant-on settings in skip
3402 mem_skip_calibrate();
3405 * Do not remove this line as it makes sure all of our
3406 * decisions have been applied.
3408 writel(0, &sdr_scc_mgr
->update
);
3412 /* Calibration is not skipped. */
3413 for (i
= 0; i
< NUM_CALIB_REPEAT
; i
++) {
3415 * Zero all delay chain/phase settings for all
3416 * groups and all shadow register sets.
3420 run_groups
= ~param
->skip_groups
;
3422 for (write_group
= 0, write_test_bgn
= 0; write_group
3423 < RW_MGR_MEM_IF_WRITE_DQS_WIDTH
; write_group
++,
3424 write_test_bgn
+= RW_MGR_MEM_DQ_PER_WRITE_DQS
) {
3426 /* Initialize the group failure */
3429 current_run
= run_groups
& ((1 <<
3430 RW_MGR_NUM_DQS_PER_WRITE_GROUP
) - 1);
3431 run_groups
= run_groups
>>
3432 RW_MGR_NUM_DQS_PER_WRITE_GROUP
;
3434 if (current_run
== 0)
3437 writel(write_group
, SDR_PHYGRP_SCCGRP_ADDRESS
|
3438 SCC_MGR_GROUP_COUNTER_OFFSET
);
3439 scc_mgr_zero_group(write_group
, 0);
3441 for (read_group
= write_group
* rwdqs_ratio
,
3443 read_group
< (write_group
+ 1) * rwdqs_ratio
;
3445 read_test_bgn
+= RW_MGR_MEM_DQ_PER_READ_DQS
) {
3446 if (STATIC_CALIB_STEPS
& CALIB_SKIP_VFIFO
)
3449 /* Calibrate the VFIFO */
3450 if (rw_mgr_mem_calibrate_vfifo(read_group
,
3454 if (!(gbl
->phy_debug_mode_flags
& PHY_DEBUG_SWEEP_ALL_GROUPS
))
3457 /* The group failed, we're done. */
3461 /* Calibrate the output side */
3462 for (rank_bgn
= 0, sr
= 0;
3463 rank_bgn
< RW_MGR_MEM_NUMBER_OF_RANKS
;
3464 rank_bgn
+= NUM_RANKS_PER_SHADOW_REG
, sr
++) {
3465 if (STATIC_CALIB_STEPS
& CALIB_SKIP_WRITES
)
3468 /* Not needed in quick mode! */
3469 if (STATIC_CALIB_STEPS
& CALIB_SKIP_DELAY_SWEEPS
)
3473 * Determine if this set of ranks
3474 * should be skipped entirely.
3476 if (param
->skip_shadow_regs
[sr
])
3479 /* Calibrate WRITEs */
3480 if (rw_mgr_mem_calibrate_writes(rank_bgn
,
3481 write_group
, write_test_bgn
))
3485 if (!(gbl
->phy_debug_mode_flags
& PHY_DEBUG_SWEEP_ALL_GROUPS
))
3489 /* Some group failed, we're done. */
3493 for (read_group
= write_group
* rwdqs_ratio
,
3495 read_group
< (write_group
+ 1) * rwdqs_ratio
;
3497 read_test_bgn
+= RW_MGR_MEM_DQ_PER_READ_DQS
) {
3498 if (STATIC_CALIB_STEPS
& CALIB_SKIP_WRITES
)
3501 if (rw_mgr_mem_calibrate_vfifo_end(read_group
,
3505 if (!(gbl
->phy_debug_mode_flags
& PHY_DEBUG_SWEEP_ALL_GROUPS
))
3508 /* The group failed, we're done. */
3512 /* No group failed, continue as usual. */
3515 grp_failed
: /* A group failed, increment the counter. */
3520 * USER If there are any failing groups then report
3523 if (failing_groups
!= 0)
3526 if (STATIC_CALIB_STEPS
& CALIB_SKIP_LFIFO
)
3530 * If we're skipping groups as part of debug,
3531 * don't calibrate LFIFO.
3533 if (param
->skip_groups
!= 0)
3536 /* Calibrate the LFIFO */
3537 if (!rw_mgr_mem_calibrate_lfifo())
3542 * Do not remove this line as it makes sure all of our decisions
3543 * have been applied.
3545 writel(0, &sdr_scc_mgr
->update
);
3550 * run_mem_calibrate() - Perform memory calibration
3552 * This function triggers the entire memory calibration procedure.
3554 static int run_mem_calibrate(void)
3558 debug("%s:%d\n", __func__
, __LINE__
);
3560 /* Reset pass/fail status shown on afi_cal_success/fail */
3561 writel(PHY_MGR_CAL_RESET
, &phy_mgr_cfg
->cal_status
);
3563 /* Stop tracking manager. */
3564 clrbits_le32(&sdr_ctrl
->ctrl_cfg
, 1 << 22);
3566 phy_mgr_initialize();
3567 rw_mgr_mem_initialize();
3569 /* Perform the actual memory calibration. */
3570 pass
= mem_calibrate();
3572 mem_precharge_and_activate();
3573 writel(0, &phy_mgr_cmd
->fifo_reset
);
3576 rw_mgr_mem_handoff();
3578 * In Hard PHY this is a 2-bit control:
3580 * 1: DDIO Mux Select
3582 writel(0x2, &phy_mgr_cfg
->mux_sel
);
3584 /* Start tracking manager. */
3585 setbits_le32(&sdr_ctrl
->ctrl_cfg
, 1 << 22);
3591 * debug_mem_calibrate() - Report result of memory calibration
3592 * @pass: Value indicating whether calibration passed or failed
3594 * This function reports the results of the memory calibration
3595 * and writes debug information into the register file.
3597 static void debug_mem_calibrate(int pass
)
3599 uint32_t debug_info
;
3602 printf("%s: CALIBRATION PASSED\n", __FILE__
);
3607 if (gbl
->fom_in
> 0xff)
3610 if (gbl
->fom_out
> 0xff)
3611 gbl
->fom_out
= 0xff;
3613 /* Update the FOM in the register file */
3614 debug_info
= gbl
->fom_in
;
3615 debug_info
|= gbl
->fom_out
<< 8;
3616 writel(debug_info
, &sdr_reg_file
->fom
);
3618 writel(debug_info
, &phy_mgr_cfg
->cal_debug_info
);
3619 writel(PHY_MGR_CAL_SUCCESS
, &phy_mgr_cfg
->cal_status
);
3621 printf("%s: CALIBRATION FAILED\n", __FILE__
);
3623 debug_info
= gbl
->error_stage
;
3624 debug_info
|= gbl
->error_substage
<< 8;
3625 debug_info
|= gbl
->error_group
<< 16;
3627 writel(debug_info
, &sdr_reg_file
->failing_stage
);
3628 writel(debug_info
, &phy_mgr_cfg
->cal_debug_info
);
3629 writel(PHY_MGR_CAL_FAIL
, &phy_mgr_cfg
->cal_status
);
3631 /* Update the failing group/stage in the register file */
3632 debug_info
= gbl
->error_stage
;
3633 debug_info
|= gbl
->error_substage
<< 8;
3634 debug_info
|= gbl
->error_group
<< 16;
3635 writel(debug_info
, &sdr_reg_file
->failing_stage
);
3638 printf("%s: Calibration complete\n", __FILE__
);
3642 * hc_initialize_rom_data() - Initialize ROM data
3644 * Initialize ROM data.
3646 static void hc_initialize_rom_data(void)
3650 addr
= SDR_PHYGRP_RWMGRGRP_ADDRESS
| RW_MGR_INST_ROM_WRITE_OFFSET
;
3651 for (i
= 0; i
< ARRAY_SIZE(inst_rom_init
); i
++)
3652 writel(inst_rom_init
[i
], addr
+ (i
<< 2));
3654 addr
= SDR_PHYGRP_RWMGRGRP_ADDRESS
| RW_MGR_AC_ROM_WRITE_OFFSET
;
3655 for (i
= 0; i
< ARRAY_SIZE(ac_rom_init
); i
++)
3656 writel(ac_rom_init
[i
], addr
+ (i
<< 2));
3660 * initialize_reg_file() - Initialize SDR register file
3662 * Initialize SDR register file.
3664 static void initialize_reg_file(void)
3666 /* Initialize the register file with the correct data */
3667 writel(REG_FILE_INIT_SEQ_SIGNATURE
, &sdr_reg_file
->signature
);
3668 writel(0, &sdr_reg_file
->debug_data_addr
);
3669 writel(0, &sdr_reg_file
->cur_stage
);
3670 writel(0, &sdr_reg_file
->fom
);
3671 writel(0, &sdr_reg_file
->failing_stage
);
3672 writel(0, &sdr_reg_file
->debug1
);
3673 writel(0, &sdr_reg_file
->debug2
);
3677 * initialize_hps_phy() - Initialize HPS PHY
3679 * Initialize HPS PHY.
3681 static void initialize_hps_phy(void)
3685 * Tracking also gets configured here because it's in the
3688 uint32_t trk_sample_count
= 7500;
3689 uint32_t trk_long_idle_sample_count
= (10 << 16) | 100;
3691 * Format is number of outer loops in the 16 MSB, sample
3696 reg
|= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_ACDELAYEN_SET(2);
3697 reg
|= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_DQDELAYEN_SET(1);
3698 reg
|= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_DQSDELAYEN_SET(1);
3699 reg
|= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_DQSLOGICDELAYEN_SET(1);
3700 reg
|= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_RESETDELAYEN_SET(0);
3701 reg
|= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_LPDDRDIS_SET(1);
3703 * This field selects the intrinsic latency to RDATA_EN/FULL path.
3704 * 00-bypass, 01- add 5 cycles, 10- add 10 cycles, 11- add 15 cycles.
3706 reg
|= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_ADDLATSEL_SET(0);
3707 reg
|= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_SAMPLECOUNT_19_0_SET(
3709 writel(reg
, &sdr_ctrl
->phy_ctrl0
);
3712 reg
|= SDR_CTRLGRP_PHYCTRL_PHYCTRL_1_SAMPLECOUNT_31_20_SET(
3714 SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_SAMPLECOUNT_19_0_WIDTH
);
3715 reg
|= SDR_CTRLGRP_PHYCTRL_PHYCTRL_1_LONGIDLESAMPLECOUNT_19_0_SET(
3716 trk_long_idle_sample_count
);
3717 writel(reg
, &sdr_ctrl
->phy_ctrl1
);
3720 reg
|= SDR_CTRLGRP_PHYCTRL_PHYCTRL_2_LONGIDLESAMPLECOUNT_31_20_SET(
3721 trk_long_idle_sample_count
>>
3722 SDR_CTRLGRP_PHYCTRL_PHYCTRL_1_LONGIDLESAMPLECOUNT_19_0_WIDTH
);
3723 writel(reg
, &sdr_ctrl
->phy_ctrl2
);
3727 * initialize_tracking() - Initialize tracking
3729 * Initialize the register file with usable initial data.
3731 static void initialize_tracking(void)
3734 * Initialize the register file with the correct data.
3735 * Compute usable version of value in case we skip full
3736 * computation later.
3738 writel(DIV_ROUND_UP(IO_DELAY_PER_OPA_TAP
, IO_DELAY_PER_DCHAIN_TAP
) - 1,
3739 &sdr_reg_file
->dtaps_per_ptap
);
3741 /* trk_sample_count */
3742 writel(7500, &sdr_reg_file
->trk_sample_count
);
3744 /* longidle outer loop [15:0] */
3745 writel((10 << 16) | (100 << 0), &sdr_reg_file
->trk_longidle
);
3748 * longidle sample count [31:24]
3749 * trfc, worst case of 933Mhz 4Gb [23:16]
3750 * trcd, worst case [15:8]
3753 writel((243 << 24) | (14 << 16) | (10 << 8) | (4 << 0),
3754 &sdr_reg_file
->delays
);
3757 writel((RW_MGR_IDLE
<< 24) | (RW_MGR_ACTIVATE_1
<< 16) |
3758 (RW_MGR_SGLE_READ
<< 8) | (RW_MGR_PRECHARGE_ALL
<< 0),
3759 &sdr_reg_file
->trk_rw_mgr_addr
);
3761 writel(RW_MGR_MEM_IF_READ_DQS_WIDTH
,
3762 &sdr_reg_file
->trk_read_dqs_width
);
3765 writel((RW_MGR_REFRESH_ALL
<< 24) | (1000 << 0),
3766 &sdr_reg_file
->trk_rfsh
);
3769 int sdram_calibration_full(void)
3771 struct param_type my_param
;
3772 struct gbl_type my_gbl
;
3775 memset(&my_param
, 0, sizeof(my_param
));
3776 memset(&my_gbl
, 0, sizeof(my_gbl
));
3781 /* Set the calibration enabled by default */
3782 gbl
->phy_debug_mode_flags
|= PHY_DEBUG_ENABLE_CAL_RPT
;
3784 * Only sweep all groups (regardless of fail state) by default
3785 * Set enabled read test by default.
3787 #if DISABLE_GUARANTEED_READ
3788 gbl
->phy_debug_mode_flags
|= PHY_DEBUG_DISABLE_GUARANTEED_READ
;
3790 /* Initialize the register file */
3791 initialize_reg_file();
3793 /* Initialize any PHY CSR */
3794 initialize_hps_phy();
3796 scc_mgr_initialize();
3798 initialize_tracking();
3800 printf("%s: Preparing to start memory calibration\n", __FILE__
);
3802 debug("%s:%d\n", __func__
, __LINE__
);
3803 debug_cond(DLEVEL
== 1,
3804 "DDR3 FULL_RATE ranks=%u cs/dimm=%u dq/dqs=%u,%u vg/dqs=%u,%u ",
3805 RW_MGR_MEM_NUMBER_OF_RANKS
, RW_MGR_MEM_NUMBER_OF_CS_PER_DIMM
,
3806 RW_MGR_MEM_DQ_PER_READ_DQS
, RW_MGR_MEM_DQ_PER_WRITE_DQS
,
3807 RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS
,
3808 RW_MGR_MEM_VIRTUAL_GROUPS_PER_WRITE_DQS
);
3809 debug_cond(DLEVEL
== 1,
3810 "dqs=%u,%u dq=%u dm=%u ptap_delay=%u dtap_delay=%u ",
3811 RW_MGR_MEM_IF_READ_DQS_WIDTH
, RW_MGR_MEM_IF_WRITE_DQS_WIDTH
,
3812 RW_MGR_MEM_DATA_WIDTH
, RW_MGR_MEM_DATA_MASK_WIDTH
,
3813 IO_DELAY_PER_OPA_TAP
, IO_DELAY_PER_DCHAIN_TAP
);
3814 debug_cond(DLEVEL
== 1, "dtap_dqsen_delay=%u, dll=%u",
3815 IO_DELAY_PER_DQS_EN_DCHAIN_TAP
, IO_DLL_CHAIN_LENGTH
);
3816 debug_cond(DLEVEL
== 1, "max values: en_p=%u dqdqs_p=%u en_d=%u dqs_in_d=%u ",
3817 IO_DQS_EN_PHASE_MAX
, IO_DQDQS_OUT_PHASE_MAX
,
3818 IO_DQS_EN_DELAY_MAX
, IO_DQS_IN_DELAY_MAX
);
3819 debug_cond(DLEVEL
== 1, "io_in_d=%u io_out1_d=%u io_out2_d=%u ",
3820 IO_IO_IN_DELAY_MAX
, IO_IO_OUT1_DELAY_MAX
,
3821 IO_IO_OUT2_DELAY_MAX
);
3822 debug_cond(DLEVEL
== 1, "dqs_in_reserve=%u dqs_out_reserve=%u\n",
3823 IO_DQS_IN_RESERVE
, IO_DQS_OUT_RESERVE
);
3825 hc_initialize_rom_data();
3827 /* update info for sims */
3828 reg_file_set_stage(CAL_STAGE_NIL
);
3829 reg_file_set_group(0);
3832 * Load global needed for those actions that require
3833 * some dynamic calibration support.
3835 dyn_calib_steps
= STATIC_CALIB_STEPS
;
3837 * Load global to allow dynamic selection of delay loop settings
3838 * based on calibration mode.
3840 if (!(dyn_calib_steps
& CALIB_SKIP_DELAY_LOOPS
))
3841 skip_delay_mask
= 0xff;
3843 skip_delay_mask
= 0x0;
3845 pass
= run_mem_calibrate();
3846 debug_mem_calibrate(pass
);