1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2020-2023 Intel Corporation
9 #include "ivpu_hw_40xx_reg.h"
10 #include "ivpu_hw_reg_io.h"
15 #include <linux/dmi.h>
17 #define TILE_MAX_NUM 6
18 #define TILE_MAX_MASK 0x3f
20 #define LNL_HW_ID 0x4040
22 #define SKU_TILE_SHIFT 0u
23 #define SKU_TILE_MASK 0x0000ffffu
24 #define SKU_HW_ID_SHIFT 16u
25 #define SKU_HW_ID_MASK 0xffff0000u
27 #define PLL_CONFIG_DEFAULT 0x1
28 #define PLL_CDYN_DEFAULT 0x80
29 #define PLL_EPP_DEFAULT 0x80
30 #define PLL_REF_CLK_FREQ (50 * 1000000)
31 #define PLL_RATIO_TO_FREQ(x) ((x) * PLL_REF_CLK_FREQ)
33 #define PLL_PROFILING_FREQ_DEFAULT 38400000
34 #define PLL_PROFILING_FREQ_HIGH 400000000
36 #define TIM_SAFE_ENABLE 0xf1d0dead
37 #define TIM_WATCHDOG_RESET_VALUE 0xffffffff
39 #define TIMEOUT_US (150 * USEC_PER_MSEC)
40 #define PWR_ISLAND_STATUS_TIMEOUT_US (5 * USEC_PER_MSEC)
41 #define PLL_TIMEOUT_US (1500 * USEC_PER_MSEC)
43 #define WEIGHTS_DEFAULT 0xf711f711u
44 #define WEIGHTS_ATS_DEFAULT 0x0000f711u
46 #define ICB_0_IRQ_MASK ((REG_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, HOST_IPC_FIFO_INT)) | \
47 (REG_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_0_INT)) | \
48 (REG_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_1_INT)) | \
49 (REG_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_2_INT)) | \
50 (REG_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, NOC_FIREWALL_INT)) | \
51 (REG_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_0_INT)) | \
52 (REG_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_1_INT)))
54 #define ICB_1_IRQ_MASK ((REG_FLD(VPU_40XX_HOST_SS_ICB_STATUS_1, CPU_INT_REDIRECT_2_INT)) | \
55 (REG_FLD(VPU_40XX_HOST_SS_ICB_STATUS_1, CPU_INT_REDIRECT_3_INT)) | \
56 (REG_FLD(VPU_40XX_HOST_SS_ICB_STATUS_1, CPU_INT_REDIRECT_4_INT)))
58 #define ICB_0_1_IRQ_MASK ((((u64)ICB_1_IRQ_MASK) << 32) | ICB_0_IRQ_MASK)
60 #define BUTTRESS_IRQ_MASK ((REG_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, ATS_ERR)) | \
61 (REG_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, CFI0_ERR)) | \
62 (REG_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, CFI1_ERR)) | \
63 (REG_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, IMR0_ERR)) | \
64 (REG_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, IMR1_ERR)) | \
65 (REG_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, SURV_ERR)))
67 #define BUTTRESS_IRQ_ENABLE_MASK ((u32)~BUTTRESS_IRQ_MASK)
68 #define BUTTRESS_IRQ_DISABLE_MASK ((u32)-1)
70 #define ITF_FIREWALL_VIOLATION_MASK ((REG_FLD(VPU_40XX_HOST_SS_FW_SOC_IRQ_EN, CSS_ROM_CMX)) | \
71 (REG_FLD(VPU_40XX_HOST_SS_FW_SOC_IRQ_EN, CSS_DBG)) | \
72 (REG_FLD(VPU_40XX_HOST_SS_FW_SOC_IRQ_EN, CSS_CTRL)) | \
73 (REG_FLD(VPU_40XX_HOST_SS_FW_SOC_IRQ_EN, DEC400)) | \
74 (REG_FLD(VPU_40XX_HOST_SS_FW_SOC_IRQ_EN, MSS_NCE)) | \
75 (REG_FLD(VPU_40XX_HOST_SS_FW_SOC_IRQ_EN, MSS_MBI)) | \
76 (REG_FLD(VPU_40XX_HOST_SS_FW_SOC_IRQ_EN, MSS_MBI_CMX)))
78 static char *ivpu_platform_to_str(u32 platform
)
81 case IVPU_PLATFORM_SILICON
:
82 return "IVPU_PLATFORM_SILICON";
83 case IVPU_PLATFORM_SIMICS
:
84 return "IVPU_PLATFORM_SIMICS";
85 case IVPU_PLATFORM_FPGA
:
86 return "IVPU_PLATFORM_FPGA";
88 return "Invalid platform";
92 static const struct dmi_system_id ivpu_dmi_platform_simulation
[] = {
94 .ident
= "Intel Simics",
96 DMI_MATCH(DMI_BOARD_NAME
, "lnlrvp"),
97 DMI_MATCH(DMI_BOARD_VERSION
, "1.0"),
98 DMI_MATCH(DMI_BOARD_SERIAL
, "123456789"),
102 .ident
= "Intel Simics",
104 DMI_MATCH(DMI_BOARD_NAME
, "Simics"),
110 static void ivpu_hw_read_platform(struct ivpu_device
*vdev
)
112 if (dmi_check_system(ivpu_dmi_platform_simulation
))
113 vdev
->platform
= IVPU_PLATFORM_SIMICS
;
115 vdev
->platform
= IVPU_PLATFORM_SILICON
;
117 ivpu_dbg(vdev
, MISC
, "Platform type: %s (%d)\n",
118 ivpu_platform_to_str(vdev
->platform
), vdev
->platform
);
121 static void ivpu_hw_wa_init(struct ivpu_device
*vdev
)
123 vdev
->wa
.punit_disabled
= ivpu_is_fpga(vdev
);
124 vdev
->wa
.clear_runtime_mem
= false;
126 if (ivpu_hw_gen(vdev
) == IVPU_HW_40XX
)
127 vdev
->wa
.disable_clock_relinquish
= true;
130 static void ivpu_hw_timeouts_init(struct ivpu_device
*vdev
)
132 if (ivpu_is_fpga(vdev
)) {
133 vdev
->timeout
.boot
= 100000;
134 vdev
->timeout
.jsm
= 50000;
135 vdev
->timeout
.tdr
= 2000000;
136 vdev
->timeout
.reschedule_suspend
= 1000;
137 } else if (ivpu_is_simics(vdev
)) {
138 vdev
->timeout
.boot
= 50;
139 vdev
->timeout
.jsm
= 500;
140 vdev
->timeout
.tdr
= 10000;
141 vdev
->timeout
.reschedule_suspend
= 10;
143 vdev
->timeout
.boot
= 1000;
144 vdev
->timeout
.jsm
= 500;
145 vdev
->timeout
.tdr
= 2000;
146 vdev
->timeout
.reschedule_suspend
= 10;
150 static int ivpu_pll_wait_for_cmd_send(struct ivpu_device
*vdev
)
152 return REGB_POLL_FLD(VPU_40XX_BUTTRESS_WP_REQ_CMD
, SEND
, 0, PLL_TIMEOUT_US
);
155 static int ivpu_pll_cmd_send(struct ivpu_device
*vdev
, u16 min_ratio
, u16 max_ratio
,
156 u16 target_ratio
, u16 epp
, u16 config
, u16 cdyn
)
161 ret
= ivpu_pll_wait_for_cmd_send(vdev
);
163 ivpu_err(vdev
, "Failed to sync before WP request: %d\n", ret
);
167 val
= REGB_RD32(VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD0
);
168 val
= REG_SET_FLD_NUM(VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD0
, MIN_RATIO
, min_ratio
, val
);
169 val
= REG_SET_FLD_NUM(VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD0
, MAX_RATIO
, max_ratio
, val
);
170 REGB_WR32(VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD0
, val
);
172 val
= REGB_RD32(VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD1
);
173 val
= REG_SET_FLD_NUM(VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD1
, TARGET_RATIO
, target_ratio
, val
);
174 val
= REG_SET_FLD_NUM(VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD1
, EPP
, epp
, val
);
175 REGB_WR32(VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD1
, val
);
177 val
= REGB_RD32(VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD2
);
178 val
= REG_SET_FLD_NUM(VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD2
, CONFIG
, config
, val
);
179 val
= REG_SET_FLD_NUM(VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD2
, CDYN
, cdyn
, val
);
180 REGB_WR32(VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD2
, val
);
182 val
= REGB_RD32(VPU_40XX_BUTTRESS_WP_REQ_CMD
);
183 val
= REG_SET_FLD(VPU_40XX_BUTTRESS_WP_REQ_CMD
, SEND
, val
);
184 REGB_WR32(VPU_40XX_BUTTRESS_WP_REQ_CMD
, val
);
186 ret
= ivpu_pll_wait_for_cmd_send(vdev
);
188 ivpu_err(vdev
, "Failed to sync after WP request: %d\n", ret
);
193 static int ivpu_pll_wait_for_status_ready(struct ivpu_device
*vdev
)
195 return REGB_POLL_FLD(VPU_40XX_BUTTRESS_VPU_STATUS
, READY
, 1, PLL_TIMEOUT_US
);
198 static int ivpu_wait_for_clock_own_resource_ack(struct ivpu_device
*vdev
)
200 if (ivpu_is_simics(vdev
))
203 return REGB_POLL_FLD(VPU_40XX_BUTTRESS_VPU_STATUS
, CLOCK_RESOURCE_OWN_ACK
, 1, TIMEOUT_US
);
206 static void ivpu_pll_init_frequency_ratios(struct ivpu_device
*vdev
)
208 struct ivpu_hw_info
*hw
= vdev
->hw
;
209 u8 fuse_min_ratio
, fuse_pn_ratio
, fuse_max_ratio
;
210 u32 fmin_fuse
, fmax_fuse
;
212 fmin_fuse
= REGB_RD32(VPU_40XX_BUTTRESS_FMIN_FUSE
);
213 fuse_min_ratio
= REG_GET_FLD(VPU_40XX_BUTTRESS_FMIN_FUSE
, MIN_RATIO
, fmin_fuse
);
214 fuse_pn_ratio
= REG_GET_FLD(VPU_40XX_BUTTRESS_FMIN_FUSE
, PN_RATIO
, fmin_fuse
);
216 fmax_fuse
= REGB_RD32(VPU_40XX_BUTTRESS_FMAX_FUSE
);
217 fuse_max_ratio
= REG_GET_FLD(VPU_40XX_BUTTRESS_FMAX_FUSE
, MAX_RATIO
, fmax_fuse
);
219 hw
->pll
.min_ratio
= clamp_t(u8
, ivpu_pll_min_ratio
, fuse_min_ratio
, fuse_max_ratio
);
220 hw
->pll
.max_ratio
= clamp_t(u8
, ivpu_pll_max_ratio
, hw
->pll
.min_ratio
, fuse_max_ratio
);
221 hw
->pll
.pn_ratio
= clamp_t(u8
, fuse_pn_ratio
, hw
->pll
.min_ratio
, hw
->pll
.max_ratio
);
224 static int ivpu_pll_drive(struct ivpu_device
*vdev
, bool enable
)
226 u16 config
= enable
? PLL_CONFIG_DEFAULT
: 0;
227 u16 cdyn
= enable
? PLL_CDYN_DEFAULT
: 0;
228 u16 epp
= enable
? PLL_EPP_DEFAULT
: 0;
229 struct ivpu_hw_info
*hw
= vdev
->hw
;
230 u16 target_ratio
= hw
->pll
.pn_ratio
;
233 ivpu_dbg(vdev
, PM
, "PLL workpoint request: %u Hz, epp: 0x%x, config: 0x%x, cdyn: 0x%x\n",
234 PLL_RATIO_TO_FREQ(target_ratio
), epp
, config
, cdyn
);
236 ret
= ivpu_pll_cmd_send(vdev
, hw
->pll
.min_ratio
, hw
->pll
.max_ratio
,
237 target_ratio
, epp
, config
, cdyn
);
239 ivpu_err(vdev
, "Failed to send PLL workpoint request: %d\n", ret
);
244 ret
= ivpu_pll_wait_for_status_ready(vdev
);
246 ivpu_err(vdev
, "Timed out waiting for PLL ready status\n");
254 static int ivpu_pll_enable(struct ivpu_device
*vdev
)
256 return ivpu_pll_drive(vdev
, true);
259 static int ivpu_pll_disable(struct ivpu_device
*vdev
)
261 return ivpu_pll_drive(vdev
, false);
264 static void ivpu_boot_host_ss_rst_drive(struct ivpu_device
*vdev
, bool enable
)
266 u32 val
= REGV_RD32(VPU_40XX_HOST_SS_CPR_RST_EN
);
269 val
= REG_SET_FLD(VPU_40XX_HOST_SS_CPR_RST_EN
, TOP_NOC
, val
);
270 val
= REG_SET_FLD(VPU_40XX_HOST_SS_CPR_RST_EN
, DSS_MAS
, val
);
271 val
= REG_SET_FLD(VPU_40XX_HOST_SS_CPR_RST_EN
, CSS_MAS
, val
);
273 val
= REG_CLR_FLD(VPU_40XX_HOST_SS_CPR_RST_EN
, TOP_NOC
, val
);
274 val
= REG_CLR_FLD(VPU_40XX_HOST_SS_CPR_RST_EN
, DSS_MAS
, val
);
275 val
= REG_CLR_FLD(VPU_40XX_HOST_SS_CPR_RST_EN
, CSS_MAS
, val
);
278 REGV_WR32(VPU_40XX_HOST_SS_CPR_RST_EN
, val
);
281 static void ivpu_boot_host_ss_clk_drive(struct ivpu_device
*vdev
, bool enable
)
283 u32 val
= REGV_RD32(VPU_40XX_HOST_SS_CPR_CLK_EN
);
286 val
= REG_SET_FLD(VPU_40XX_HOST_SS_CPR_CLK_EN
, TOP_NOC
, val
);
287 val
= REG_SET_FLD(VPU_40XX_HOST_SS_CPR_CLK_EN
, DSS_MAS
, val
);
288 val
= REG_SET_FLD(VPU_40XX_HOST_SS_CPR_CLK_EN
, CSS_MAS
, val
);
290 val
= REG_CLR_FLD(VPU_40XX_HOST_SS_CPR_CLK_EN
, TOP_NOC
, val
);
291 val
= REG_CLR_FLD(VPU_40XX_HOST_SS_CPR_CLK_EN
, DSS_MAS
, val
);
292 val
= REG_CLR_FLD(VPU_40XX_HOST_SS_CPR_CLK_EN
, CSS_MAS
, val
);
295 REGV_WR32(VPU_40XX_HOST_SS_CPR_CLK_EN
, val
);
298 static int ivpu_boot_noc_qreqn_check(struct ivpu_device
*vdev
, u32 exp_val
)
300 u32 val
= REGV_RD32(VPU_40XX_HOST_SS_NOC_QREQN
);
302 if (!REG_TEST_FLD_NUM(VPU_40XX_HOST_SS_NOC_QREQN
, TOP_SOCMMIO
, exp_val
, val
))
308 static int ivpu_boot_noc_qacceptn_check(struct ivpu_device
*vdev
, u32 exp_val
)
310 u32 val
= REGV_RD32(VPU_40XX_HOST_SS_NOC_QACCEPTN
);
312 if (!REG_TEST_FLD_NUM(VPU_40XX_HOST_SS_NOC_QACCEPTN
, TOP_SOCMMIO
, exp_val
, val
))
318 static int ivpu_boot_noc_qdeny_check(struct ivpu_device
*vdev
, u32 exp_val
)
320 u32 val
= REGV_RD32(VPU_40XX_HOST_SS_NOC_QDENY
);
322 if (!REG_TEST_FLD_NUM(VPU_40XX_HOST_SS_NOC_QDENY
, TOP_SOCMMIO
, exp_val
, val
))
328 static int ivpu_boot_top_noc_qrenqn_check(struct ivpu_device
*vdev
, u32 exp_val
)
330 u32 val
= REGV_RD32(VPU_40XX_TOP_NOC_QREQN
);
332 if (!REG_TEST_FLD_NUM(VPU_40XX_TOP_NOC_QREQN
, CPU_CTRL
, exp_val
, val
) ||
333 !REG_TEST_FLD_NUM(VPU_40XX_TOP_NOC_QREQN
, HOSTIF_L2CACHE
, exp_val
, val
))
339 static int ivpu_boot_top_noc_qacceptn_check(struct ivpu_device
*vdev
, u32 exp_val
)
341 u32 val
= REGV_RD32(VPU_40XX_TOP_NOC_QACCEPTN
);
343 if (!REG_TEST_FLD_NUM(VPU_40XX_TOP_NOC_QACCEPTN
, CPU_CTRL
, exp_val
, val
) ||
344 !REG_TEST_FLD_NUM(VPU_40XX_TOP_NOC_QACCEPTN
, HOSTIF_L2CACHE
, exp_val
, val
))
350 static int ivpu_boot_top_noc_qdeny_check(struct ivpu_device
*vdev
, u32 exp_val
)
352 u32 val
= REGV_RD32(VPU_40XX_TOP_NOC_QDENY
);
354 if (!REG_TEST_FLD_NUM(VPU_40XX_TOP_NOC_QDENY
, CPU_CTRL
, exp_val
, val
) ||
355 !REG_TEST_FLD_NUM(VPU_40XX_TOP_NOC_QDENY
, HOSTIF_L2CACHE
, exp_val
, val
))
361 static void ivpu_boot_idle_gen_drive(struct ivpu_device
*vdev
, bool enable
)
363 u32 val
= REGV_RD32(VPU_40XX_HOST_SS_AON_IDLE_GEN
);
366 val
= REG_SET_FLD(VPU_40XX_HOST_SS_AON_IDLE_GEN
, EN
, val
);
368 val
= REG_CLR_FLD(VPU_40XX_HOST_SS_AON_IDLE_GEN
, EN
, val
);
370 REGV_WR32(VPU_40XX_HOST_SS_AON_IDLE_GEN
, val
);
373 static int ivpu_boot_host_ss_check(struct ivpu_device
*vdev
)
377 ret
= ivpu_boot_noc_qreqn_check(vdev
, 0x0);
379 ivpu_err(vdev
, "Failed qreqn check: %d\n", ret
);
383 ret
= ivpu_boot_noc_qacceptn_check(vdev
, 0x0);
385 ivpu_err(vdev
, "Failed qacceptn check: %d\n", ret
);
389 ret
= ivpu_boot_noc_qdeny_check(vdev
, 0x0);
391 ivpu_err(vdev
, "Failed qdeny check %d\n", ret
);
396 static int ivpu_boot_host_ss_axi_drive(struct ivpu_device
*vdev
, bool enable
)
401 val
= REGV_RD32(VPU_40XX_HOST_SS_NOC_QREQN
);
403 val
= REG_SET_FLD(VPU_40XX_HOST_SS_NOC_QREQN
, TOP_SOCMMIO
, val
);
405 val
= REG_CLR_FLD(VPU_40XX_HOST_SS_NOC_QREQN
, TOP_SOCMMIO
, val
);
406 REGV_WR32(VPU_40XX_HOST_SS_NOC_QREQN
, val
);
408 ret
= ivpu_boot_noc_qacceptn_check(vdev
, enable
? 0x1 : 0x0);
410 ivpu_err(vdev
, "Failed qacceptn check: %d\n", ret
);
414 ret
= ivpu_boot_noc_qdeny_check(vdev
, 0x0);
416 ivpu_err(vdev
, "Failed qdeny check: %d\n", ret
);
421 REGB_WR32(VPU_40XX_BUTTRESS_PORT_ARBITRATION_WEIGHTS
, WEIGHTS_DEFAULT
);
422 REGB_WR32(VPU_40XX_BUTTRESS_PORT_ARBITRATION_WEIGHTS_ATS
, WEIGHTS_ATS_DEFAULT
);
428 static int ivpu_boot_host_ss_axi_enable(struct ivpu_device
*vdev
)
430 return ivpu_boot_host_ss_axi_drive(vdev
, true);
433 static int ivpu_boot_host_ss_top_noc_drive(struct ivpu_device
*vdev
, bool enable
)
438 val
= REGV_RD32(VPU_40XX_TOP_NOC_QREQN
);
440 val
= REG_SET_FLD(VPU_40XX_TOP_NOC_QREQN
, CPU_CTRL
, val
);
441 val
= REG_SET_FLD(VPU_40XX_TOP_NOC_QREQN
, HOSTIF_L2CACHE
, val
);
443 val
= REG_CLR_FLD(VPU_40XX_TOP_NOC_QREQN
, CPU_CTRL
, val
);
444 val
= REG_CLR_FLD(VPU_40XX_TOP_NOC_QREQN
, HOSTIF_L2CACHE
, val
);
446 REGV_WR32(VPU_40XX_TOP_NOC_QREQN
, val
);
448 ret
= ivpu_boot_top_noc_qacceptn_check(vdev
, enable
? 0x1 : 0x0);
450 ivpu_err(vdev
, "Failed qacceptn check: %d\n", ret
);
454 ret
= ivpu_boot_top_noc_qdeny_check(vdev
, 0x0);
456 ivpu_err(vdev
, "Failed qdeny check: %d\n", ret
);
461 static int ivpu_boot_host_ss_top_noc_enable(struct ivpu_device
*vdev
)
463 return ivpu_boot_host_ss_top_noc_drive(vdev
, true);
466 static void ivpu_boot_pwr_island_trickle_drive(struct ivpu_device
*vdev
, bool enable
)
468 u32 val
= REGV_RD32(VPU_40XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0
);
471 val
= REG_SET_FLD(VPU_40XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0
, CSS_CPU
, val
);
473 val
= REG_CLR_FLD(VPU_40XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0
, CSS_CPU
, val
);
475 REGV_WR32(VPU_40XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0
, val
);
481 static void ivpu_boot_pwr_island_drive(struct ivpu_device
*vdev
, bool enable
)
483 u32 val
= REGV_RD32(VPU_40XX_HOST_SS_AON_PWR_ISLAND_EN0
);
486 val
= REG_SET_FLD(VPU_40XX_HOST_SS_AON_PWR_ISLAND_EN0
, CSS_CPU
, val
);
488 val
= REG_CLR_FLD(VPU_40XX_HOST_SS_AON_PWR_ISLAND_EN0
, CSS_CPU
, val
);
490 REGV_WR32(VPU_40XX_HOST_SS_AON_PWR_ISLAND_EN0
, val
);
496 static int ivpu_boot_wait_for_pwr_island_status(struct ivpu_device
*vdev
, u32 exp_val
)
498 if (ivpu_is_fpga(vdev
))
501 return REGV_POLL_FLD(VPU_40XX_HOST_SS_AON_PWR_ISLAND_STATUS0
, CSS_CPU
,
502 exp_val
, PWR_ISLAND_STATUS_TIMEOUT_US
);
505 static void ivpu_boot_pwr_island_isolation_drive(struct ivpu_device
*vdev
, bool enable
)
507 u32 val
= REGV_RD32(VPU_40XX_HOST_SS_AON_PWR_ISO_EN0
);
510 val
= REG_SET_FLD(VPU_40XX_HOST_SS_AON_PWR_ISO_EN0
, CSS_CPU
, val
);
512 val
= REG_CLR_FLD(VPU_40XX_HOST_SS_AON_PWR_ISO_EN0
, CSS_CPU
, val
);
514 REGV_WR32(VPU_40XX_HOST_SS_AON_PWR_ISO_EN0
, val
);
517 static void ivpu_boot_no_snoop_enable(struct ivpu_device
*vdev
)
519 u32 val
= REGV_RD32(VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES
);
521 val
= REG_SET_FLD(VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES
, SNOOP_OVERRIDE_EN
, val
);
522 val
= REG_CLR_FLD(VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES
, AW_SNOOP_OVERRIDE
, val
);
523 val
= REG_CLR_FLD(VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES
, AR_SNOOP_OVERRIDE
, val
);
525 REGV_WR32(VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES
, val
);
528 static void ivpu_boot_tbu_mmu_enable(struct ivpu_device
*vdev
)
530 u32 val
= REGV_RD32(VPU_40XX_HOST_IF_TBU_MMUSSIDV
);
532 val
= REG_SET_FLD(VPU_40XX_HOST_IF_TBU_MMUSSIDV
, TBU0_AWMMUSSIDV
, val
);
533 val
= REG_SET_FLD(VPU_40XX_HOST_IF_TBU_MMUSSIDV
, TBU0_ARMMUSSIDV
, val
);
534 val
= REG_SET_FLD(VPU_40XX_HOST_IF_TBU_MMUSSIDV
, TBU1_AWMMUSSIDV
, val
);
535 val
= REG_SET_FLD(VPU_40XX_HOST_IF_TBU_MMUSSIDV
, TBU1_ARMMUSSIDV
, val
);
536 val
= REG_SET_FLD(VPU_40XX_HOST_IF_TBU_MMUSSIDV
, TBU2_AWMMUSSIDV
, val
);
537 val
= REG_SET_FLD(VPU_40XX_HOST_IF_TBU_MMUSSIDV
, TBU2_ARMMUSSIDV
, val
);
539 REGV_WR32(VPU_40XX_HOST_IF_TBU_MMUSSIDV
, val
);
542 static int ivpu_boot_cpu_noc_qacceptn_check(struct ivpu_device
*vdev
, u32 exp_val
)
544 u32 val
= REGV_RD32(VPU_40XX_CPU_SS_CPR_NOC_QACCEPTN
);
546 if (!REG_TEST_FLD_NUM(VPU_40XX_CPU_SS_CPR_NOC_QACCEPTN
, TOP_MMIO
, exp_val
, val
))
552 static int ivpu_boot_cpu_noc_qdeny_check(struct ivpu_device
*vdev
, u32 exp_val
)
554 u32 val
= REGV_RD32(VPU_40XX_CPU_SS_CPR_NOC_QDENY
);
556 if (!REG_TEST_FLD_NUM(VPU_40XX_CPU_SS_CPR_NOC_QDENY
, TOP_MMIO
, exp_val
, val
))
562 static int ivpu_boot_pwr_domain_enable(struct ivpu_device
*vdev
)
566 ret
= ivpu_wait_for_clock_own_resource_ack(vdev
);
568 ivpu_err(vdev
, "Timed out waiting for clock own resource ACK\n");
572 ivpu_boot_pwr_island_trickle_drive(vdev
, true);
573 ivpu_boot_pwr_island_drive(vdev
, true);
575 ret
= ivpu_boot_wait_for_pwr_island_status(vdev
, 0x1);
577 ivpu_err(vdev
, "Timed out waiting for power island status\n");
581 ret
= ivpu_boot_top_noc_qrenqn_check(vdev
, 0x0);
583 ivpu_err(vdev
, "Failed qrenqn check %d\n", ret
);
587 ivpu_boot_host_ss_clk_drive(vdev
, true);
588 ivpu_boot_host_ss_rst_drive(vdev
, true);
589 ivpu_boot_pwr_island_isolation_drive(vdev
, false);
594 static int ivpu_boot_soc_cpu_drive(struct ivpu_device
*vdev
, bool enable
)
599 val
= REGV_RD32(VPU_40XX_CPU_SS_CPR_NOC_QREQN
);
601 val
= REG_SET_FLD(VPU_40XX_CPU_SS_CPR_NOC_QREQN
, TOP_MMIO
, val
);
603 val
= REG_CLR_FLD(VPU_40XX_CPU_SS_CPR_NOC_QREQN
, TOP_MMIO
, val
);
604 REGV_WR32(VPU_40XX_CPU_SS_CPR_NOC_QREQN
, val
);
606 ret
= ivpu_boot_cpu_noc_qacceptn_check(vdev
, enable
? 0x1 : 0x0);
608 ivpu_err(vdev
, "Failed qacceptn check: %d\n", ret
);
612 ret
= ivpu_boot_cpu_noc_qdeny_check(vdev
, 0x0);
614 ivpu_err(vdev
, "Failed qdeny check: %d\n", ret
);
619 static int ivpu_boot_soc_cpu_enable(struct ivpu_device
*vdev
)
621 return ivpu_boot_soc_cpu_drive(vdev
, true);
624 static int ivpu_boot_soc_cpu_boot(struct ivpu_device
*vdev
)
630 ret
= ivpu_boot_soc_cpu_enable(vdev
);
632 ivpu_err(vdev
, "Failed to enable SOC CPU: %d\n", ret
);
636 val64
= vdev
->fw
->entry_point
;
637 val64
<<= ffs(VPU_40XX_HOST_SS_VERIFICATION_ADDRESS_LO_IMAGE_LOCATION_MASK
) - 1;
638 REGV_WR64(VPU_40XX_HOST_SS_VERIFICATION_ADDRESS_LO
, val64
);
640 val
= REGV_RD32(VPU_40XX_HOST_SS_VERIFICATION_ADDRESS_LO
);
641 val
= REG_SET_FLD(VPU_40XX_HOST_SS_VERIFICATION_ADDRESS_LO
, DONE
, val
);
642 REGV_WR32(VPU_40XX_HOST_SS_VERIFICATION_ADDRESS_LO
, val
);
644 ivpu_dbg(vdev
, PM
, "Booting firmware, mode: %s\n",
645 ivpu_fw_is_cold_boot(vdev
) ? "cold boot" : "resume");
650 static int ivpu_boot_d0i3_drive(struct ivpu_device
*vdev
, bool enable
)
655 ret
= REGB_POLL_FLD(VPU_40XX_BUTTRESS_D0I3_CONTROL
, INPROGRESS
, 0, TIMEOUT_US
);
657 ivpu_err(vdev
, "Failed to sync before D0i3 transition: %d\n", ret
);
661 val
= REGB_RD32(VPU_40XX_BUTTRESS_D0I3_CONTROL
);
663 val
= REG_SET_FLD(VPU_40XX_BUTTRESS_D0I3_CONTROL
, I3
, val
);
665 val
= REG_CLR_FLD(VPU_40XX_BUTTRESS_D0I3_CONTROL
, I3
, val
);
666 REGB_WR32(VPU_40XX_BUTTRESS_D0I3_CONTROL
, val
);
668 ret
= REGB_POLL_FLD(VPU_40XX_BUTTRESS_D0I3_CONTROL
, INPROGRESS
, 0, TIMEOUT_US
);
670 ivpu_err(vdev
, "Failed to sync after D0i3 transition: %d\n", ret
);
677 static bool ivpu_tile_disable_check(u32 config
)
679 /* Allowed values: 0 or one bit from range 0-5 (6 tiles) */
683 if (config
> BIT(TILE_MAX_NUM
- 1))
686 if ((config
& (config
- 1)) == 0)
692 static int ivpu_hw_40xx_info_init(struct ivpu_device
*vdev
)
694 struct ivpu_hw_info
*hw
= vdev
->hw
;
699 fuse
= REGB_RD32(VPU_40XX_BUTTRESS_TILE_FUSE
);
700 if (!REG_TEST_FLD(VPU_40XX_BUTTRESS_TILE_FUSE
, VALID
, fuse
)) {
701 ivpu_err(vdev
, "Fuse: invalid (0x%x)\n", fuse
);
705 tile_disable
= REG_GET_FLD(VPU_40XX_BUTTRESS_TILE_FUSE
, CONFIG
, fuse
);
706 if (!ivpu_tile_disable_check(tile_disable
)) {
707 ivpu_err(vdev
, "Fuse: Invalid tile disable config (0x%x)\n", tile_disable
);
712 ivpu_dbg(vdev
, MISC
, "Fuse: %d tiles enabled. Tile number %d disabled\n",
713 TILE_MAX_NUM
- 1, ffs(tile_disable
) - 1);
715 ivpu_dbg(vdev
, MISC
, "Fuse: All %d tiles enabled\n", TILE_MAX_NUM
);
717 tile_enable
= (~tile_disable
) & TILE_MAX_MASK
;
719 hw
->sku
= REG_SET_FLD_NUM(SKU
, HW_ID
, LNL_HW_ID
, hw
->sku
);
720 hw
->sku
= REG_SET_FLD_NUM(SKU
, TILE
, tile_enable
, hw
->sku
);
721 hw
->tile_fuse
= tile_disable
;
722 hw
->pll
.profiling_freq
= PLL_PROFILING_FREQ_DEFAULT
;
724 ivpu_pll_init_frequency_ratios(vdev
);
726 ivpu_hw_init_range(&vdev
->hw
->ranges
.global
, 0x80000000, SZ_512M
);
727 ivpu_hw_init_range(&vdev
->hw
->ranges
.user
, 0x80000000, SZ_256M
);
728 ivpu_hw_init_range(&vdev
->hw
->ranges
.shave
, 0x80000000 + SZ_256M
, SZ_2G
- SZ_256M
);
729 ivpu_hw_init_range(&vdev
->hw
->ranges
.dma
, 0x200000000, SZ_8G
);
734 static int ivpu_hw_40xx_reset(struct ivpu_device
*vdev
)
739 ret
= REGB_POLL_FLD(VPU_40XX_BUTTRESS_IP_RESET
, TRIGGER
, 0, TIMEOUT_US
);
741 ivpu_err(vdev
, "Wait for *_TRIGGER timed out\n");
745 val
= REGB_RD32(VPU_40XX_BUTTRESS_IP_RESET
);
746 val
= REG_SET_FLD(VPU_40XX_BUTTRESS_IP_RESET
, TRIGGER
, val
);
747 REGB_WR32(VPU_40XX_BUTTRESS_IP_RESET
, val
);
749 ret
= REGB_POLL_FLD(VPU_40XX_BUTTRESS_IP_RESET
, TRIGGER
, 0, TIMEOUT_US
);
751 ivpu_err(vdev
, "Timed out waiting for RESET completion\n");
756 static int ivpu_hw_40xx_d0i3_enable(struct ivpu_device
*vdev
)
760 if (IVPU_WA(punit_disabled
))
763 ret
= ivpu_boot_d0i3_drive(vdev
, true);
765 ivpu_err(vdev
, "Failed to enable D0i3: %d\n", ret
);
767 udelay(5); /* VPU requires 5 us to complete the transition */
772 static int ivpu_hw_40xx_d0i3_disable(struct ivpu_device
*vdev
)
776 if (IVPU_WA(punit_disabled
))
779 ret
= ivpu_boot_d0i3_drive(vdev
, false);
781 ivpu_err(vdev
, "Failed to disable D0i3: %d\n", ret
);
786 static void ivpu_hw_40xx_profiling_freq_reg_set(struct ivpu_device
*vdev
)
788 u32 val
= REGB_RD32(VPU_40XX_BUTTRESS_VPU_STATUS
);
790 if (vdev
->hw
->pll
.profiling_freq
== PLL_PROFILING_FREQ_DEFAULT
)
791 val
= REG_CLR_FLD(VPU_40XX_BUTTRESS_VPU_STATUS
, PERF_CLK
, val
);
793 val
= REG_SET_FLD(VPU_40XX_BUTTRESS_VPU_STATUS
, PERF_CLK
, val
);
795 REGB_WR32(VPU_40XX_BUTTRESS_VPU_STATUS
, val
);
798 static void ivpu_hw_40xx_ats_print(struct ivpu_device
*vdev
)
800 ivpu_dbg(vdev
, MISC
, "Buttress ATS: %s\n",
801 REGB_RD32(VPU_40XX_BUTTRESS_HM_ATS
) ? "Enable" : "Disable");
804 static void ivpu_hw_40xx_clock_relinquish_disable(struct ivpu_device
*vdev
)
806 u32 val
= REGB_RD32(VPU_40XX_BUTTRESS_VPU_STATUS
);
808 val
= REG_SET_FLD(VPU_40XX_BUTTRESS_VPU_STATUS
, DISABLE_CLK_RELINQUISH
, val
);
809 REGB_WR32(VPU_40XX_BUTTRESS_VPU_STATUS
, val
);
812 static int ivpu_hw_40xx_power_up(struct ivpu_device
*vdev
)
816 ret
= ivpu_hw_40xx_reset(vdev
);
818 ivpu_err(vdev
, "Failed to reset HW: %d\n", ret
);
822 ivpu_hw_read_platform(vdev
);
823 ivpu_hw_wa_init(vdev
);
824 ivpu_hw_timeouts_init(vdev
);
826 ret
= ivpu_hw_40xx_d0i3_disable(vdev
);
828 ivpu_warn(vdev
, "Failed to disable D0I3: %d\n", ret
);
830 ret
= ivpu_pll_enable(vdev
);
832 ivpu_err(vdev
, "Failed to enable PLL: %d\n", ret
);
836 if (IVPU_WA(disable_clock_relinquish
))
837 ivpu_hw_40xx_clock_relinquish_disable(vdev
);
838 ivpu_hw_40xx_profiling_freq_reg_set(vdev
);
839 ivpu_hw_40xx_ats_print(vdev
);
841 ret
= ivpu_boot_host_ss_check(vdev
);
843 ivpu_err(vdev
, "Failed to configure host SS: %d\n", ret
);
847 ivpu_boot_idle_gen_drive(vdev
, false);
849 ret
= ivpu_boot_pwr_domain_enable(vdev
);
851 ivpu_err(vdev
, "Failed to enable power domain: %d\n", ret
);
855 ret
= ivpu_boot_host_ss_axi_enable(vdev
);
857 ivpu_err(vdev
, "Failed to enable AXI: %d\n", ret
);
861 ret
= ivpu_boot_host_ss_top_noc_enable(vdev
);
863 ivpu_err(vdev
, "Failed to enable TOP NOC: %d\n", ret
);
868 static int ivpu_hw_40xx_boot_fw(struct ivpu_device
*vdev
)
872 ivpu_boot_no_snoop_enable(vdev
);
873 ivpu_boot_tbu_mmu_enable(vdev
);
875 ret
= ivpu_boot_soc_cpu_boot(vdev
);
877 ivpu_err(vdev
, "Failed to boot SOC CPU: %d\n", ret
);
882 static bool ivpu_hw_40xx_is_idle(struct ivpu_device
*vdev
)
886 if (IVPU_WA(punit_disabled
))
889 val
= REGB_RD32(VPU_40XX_BUTTRESS_VPU_STATUS
);
890 return REG_TEST_FLD(VPU_40XX_BUTTRESS_VPU_STATUS
, READY
, val
) &&
891 REG_TEST_FLD(VPU_40XX_BUTTRESS_VPU_STATUS
, IDLE
, val
);
894 static int ivpu_hw_40xx_power_down(struct ivpu_device
*vdev
)
898 if (!ivpu_hw_40xx_is_idle(vdev
) && ivpu_hw_40xx_reset(vdev
))
899 ivpu_warn(vdev
, "Failed to reset the VPU\n");
901 if (ivpu_pll_disable(vdev
)) {
902 ivpu_err(vdev
, "Failed to disable PLL\n");
906 if (ivpu_hw_40xx_d0i3_enable(vdev
)) {
907 ivpu_err(vdev
, "Failed to enter D0I3\n");
914 static void ivpu_hw_40xx_wdt_disable(struct ivpu_device
*vdev
)
918 REGV_WR32(VPU_40XX_CPU_SS_TIM_SAFE
, TIM_SAFE_ENABLE
);
919 REGV_WR32(VPU_40XX_CPU_SS_TIM_WATCHDOG
, TIM_WATCHDOG_RESET_VALUE
);
921 REGV_WR32(VPU_40XX_CPU_SS_TIM_SAFE
, TIM_SAFE_ENABLE
);
922 REGV_WR32(VPU_40XX_CPU_SS_TIM_WDOG_EN
, 0);
924 val
= REGV_RD32(VPU_40XX_CPU_SS_TIM_GEN_CONFIG
);
925 val
= REG_CLR_FLD(VPU_40XX_CPU_SS_TIM_GEN_CONFIG
, WDOG_TO_INT_CLR
, val
);
926 REGV_WR32(VPU_40XX_CPU_SS_TIM_GEN_CONFIG
, val
);
929 /* Register indirect accesses */
930 static u32
ivpu_hw_40xx_reg_pll_freq_get(struct ivpu_device
*vdev
)
934 pll_curr_ratio
= REGB_RD32(VPU_40XX_BUTTRESS_PLL_FREQ
);
935 pll_curr_ratio
&= VPU_40XX_BUTTRESS_PLL_FREQ_RATIO_MASK
;
937 return PLL_RATIO_TO_FREQ(pll_curr_ratio
);
940 static u32
ivpu_hw_40xx_reg_telemetry_offset_get(struct ivpu_device
*vdev
)
942 return REGB_RD32(VPU_40XX_BUTTRESS_VPU_TELEMETRY_OFFSET
);
945 static u32
ivpu_hw_40xx_reg_telemetry_size_get(struct ivpu_device
*vdev
)
947 return REGB_RD32(VPU_40XX_BUTTRESS_VPU_TELEMETRY_SIZE
);
950 static u32
ivpu_hw_40xx_reg_telemetry_enable_get(struct ivpu_device
*vdev
)
952 return REGB_RD32(VPU_40XX_BUTTRESS_VPU_TELEMETRY_ENABLE
);
955 static void ivpu_hw_40xx_reg_db_set(struct ivpu_device
*vdev
, u32 db_id
)
957 u32 reg_stride
= VPU_40XX_CPU_SS_DOORBELL_1
- VPU_40XX_CPU_SS_DOORBELL_0
;
958 u32 val
= REG_FLD(VPU_40XX_CPU_SS_DOORBELL_0
, SET
);
960 REGV_WR32I(VPU_40XX_CPU_SS_DOORBELL_0
, reg_stride
, db_id
, val
);
963 static u32
ivpu_hw_40xx_reg_ipc_rx_addr_get(struct ivpu_device
*vdev
)
965 return REGV_RD32(VPU_40XX_HOST_SS_TIM_IPC_FIFO_ATM
);
968 static u32
ivpu_hw_40xx_reg_ipc_rx_count_get(struct ivpu_device
*vdev
)
970 u32 count
= REGV_RD32_SILENT(VPU_40XX_HOST_SS_TIM_IPC_FIFO_STAT
);
972 return REG_GET_FLD(VPU_40XX_HOST_SS_TIM_IPC_FIFO_STAT
, FILL_LEVEL
, count
);
975 static void ivpu_hw_40xx_reg_ipc_tx_set(struct ivpu_device
*vdev
, u32 vpu_addr
)
977 REGV_WR32(VPU_40XX_CPU_SS_TIM_IPC_FIFO
, vpu_addr
);
980 static void ivpu_hw_40xx_irq_clear(struct ivpu_device
*vdev
)
982 REGV_WR64(VPU_40XX_HOST_SS_ICB_CLEAR_0
, ICB_0_1_IRQ_MASK
);
985 static void ivpu_hw_40xx_irq_enable(struct ivpu_device
*vdev
)
987 REGV_WR32(VPU_40XX_HOST_SS_FW_SOC_IRQ_EN
, ITF_FIREWALL_VIOLATION_MASK
);
988 REGV_WR64(VPU_40XX_HOST_SS_ICB_ENABLE_0
, ICB_0_1_IRQ_MASK
);
989 REGB_WR32(VPU_40XX_BUTTRESS_LOCAL_INT_MASK
, BUTTRESS_IRQ_ENABLE_MASK
);
990 REGB_WR32(VPU_40XX_BUTTRESS_GLOBAL_INT_MASK
, 0x0);
993 static void ivpu_hw_40xx_irq_disable(struct ivpu_device
*vdev
)
995 REGB_WR32(VPU_40XX_BUTTRESS_GLOBAL_INT_MASK
, 0x1);
996 REGB_WR32(VPU_40XX_BUTTRESS_LOCAL_INT_MASK
, BUTTRESS_IRQ_DISABLE_MASK
);
997 REGV_WR64(VPU_40XX_HOST_SS_ICB_ENABLE_0
, 0x0ull
);
998 REGV_WR32(VPU_40XX_HOST_SS_FW_SOC_IRQ_EN
, 0x0ul
);
1001 static void ivpu_hw_40xx_irq_wdt_nce_handler(struct ivpu_device
*vdev
)
1003 /* TODO: For LNN hang consider engine reset instead of full recovery */
1004 ivpu_pm_schedule_recovery(vdev
);
1007 static void ivpu_hw_40xx_irq_wdt_mss_handler(struct ivpu_device
*vdev
)
1009 ivpu_hw_wdt_disable(vdev
);
1010 ivpu_pm_schedule_recovery(vdev
);
1013 static void ivpu_hw_40xx_irq_noc_firewall_handler(struct ivpu_device
*vdev
)
1015 ivpu_pm_schedule_recovery(vdev
);
1018 /* Handler for IRQs from VPU core (irqV) */
1019 static irqreturn_t
ivpu_hw_40xx_irqv_handler(struct ivpu_device
*vdev
, int irq
)
1021 u32 status
= REGV_RD32(VPU_40XX_HOST_SS_ICB_STATUS_0
) & ICB_0_IRQ_MASK
;
1022 irqreturn_t ret
= IRQ_NONE
;
1027 REGV_WR32(VPU_40XX_HOST_SS_ICB_CLEAR_0
, status
);
1029 if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0
, MMU_IRQ_0_INT
, status
))
1030 ivpu_mmu_irq_evtq_handler(vdev
);
1032 if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0
, HOST_IPC_FIFO_INT
, status
))
1033 ret
|= ivpu_ipc_irq_handler(vdev
);
1035 if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0
, MMU_IRQ_1_INT
, status
))
1036 ivpu_dbg(vdev
, IRQ
, "MMU sync complete\n");
1038 if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0
, MMU_IRQ_2_INT
, status
))
1039 ivpu_mmu_irq_gerr_handler(vdev
);
1041 if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0
, CPU_INT_REDIRECT_0_INT
, status
))
1042 ivpu_hw_40xx_irq_wdt_mss_handler(vdev
);
1044 if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0
, CPU_INT_REDIRECT_1_INT
, status
))
1045 ivpu_hw_40xx_irq_wdt_nce_handler(vdev
);
1047 if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0
, NOC_FIREWALL_INT
, status
))
1048 ivpu_hw_40xx_irq_noc_firewall_handler(vdev
);
1053 /* Handler for IRQs from Buttress core (irqB) */
1054 static irqreturn_t
ivpu_hw_40xx_irqb_handler(struct ivpu_device
*vdev
, int irq
)
1056 bool schedule_recovery
= false;
1057 u32 status
= REGB_RD32(VPU_40XX_BUTTRESS_INTERRUPT_STAT
) & BUTTRESS_IRQ_MASK
;
1062 if (REG_TEST_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT
, FREQ_CHANGE
, status
))
1063 ivpu_dbg(vdev
, IRQ
, "FREQ_CHANGE");
1065 if (REG_TEST_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT
, ATS_ERR
, status
)) {
1066 ivpu_err(vdev
, "ATS_ERR LOG1 0x%08x ATS_ERR_LOG2 0x%08x\n",
1067 REGB_RD32(VPU_40XX_BUTTRESS_ATS_ERR_LOG1
),
1068 REGB_RD32(VPU_40XX_BUTTRESS_ATS_ERR_LOG2
));
1069 REGB_WR32(VPU_40XX_BUTTRESS_ATS_ERR_CLEAR
, 0x1);
1070 schedule_recovery
= true;
1073 if (REG_TEST_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT
, CFI0_ERR
, status
)) {
1074 ivpu_err(vdev
, "CFI0_ERR 0x%08x", REGB_RD32(VPU_40XX_BUTTRESS_CFI0_ERR_LOG
));
1075 REGB_WR32(VPU_40XX_BUTTRESS_CFI0_ERR_CLEAR
, 0x1);
1076 schedule_recovery
= true;
1079 if (REG_TEST_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT
, CFI1_ERR
, status
)) {
1080 ivpu_err(vdev
, "CFI1_ERR 0x%08x", REGB_RD32(VPU_40XX_BUTTRESS_CFI1_ERR_LOG
));
1081 REGB_WR32(VPU_40XX_BUTTRESS_CFI1_ERR_CLEAR
, 0x1);
1082 schedule_recovery
= true;
1085 if (REG_TEST_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT
, IMR0_ERR
, status
)) {
1086 ivpu_err(vdev
, "IMR_ERR_CFI0 LOW: 0x%08x HIGH: 0x%08x",
1087 REGB_RD32(VPU_40XX_BUTTRESS_IMR_ERR_CFI0_LOW
),
1088 REGB_RD32(VPU_40XX_BUTTRESS_IMR_ERR_CFI0_HIGH
));
1089 REGB_WR32(VPU_40XX_BUTTRESS_IMR_ERR_CFI0_CLEAR
, 0x1);
1090 schedule_recovery
= true;
1093 if (REG_TEST_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT
, IMR1_ERR
, status
)) {
1094 ivpu_err(vdev
, "IMR_ERR_CFI1 LOW: 0x%08x HIGH: 0x%08x",
1095 REGB_RD32(VPU_40XX_BUTTRESS_IMR_ERR_CFI1_LOW
),
1096 REGB_RD32(VPU_40XX_BUTTRESS_IMR_ERR_CFI1_HIGH
));
1097 REGB_WR32(VPU_40XX_BUTTRESS_IMR_ERR_CFI1_CLEAR
, 0x1);
1098 schedule_recovery
= true;
1101 if (REG_TEST_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT
, SURV_ERR
, status
)) {
1102 ivpu_err(vdev
, "Survivability error detected\n");
1103 schedule_recovery
= true;
1106 /* This must be done after interrupts are cleared at the source. */
1107 REGB_WR32(VPU_40XX_BUTTRESS_INTERRUPT_STAT
, status
);
1109 if (schedule_recovery
)
1110 ivpu_pm_schedule_recovery(vdev
);
1115 static irqreturn_t
ivpu_hw_40xx_irq_handler(int irq
, void *ptr
)
1117 struct ivpu_device
*vdev
= ptr
;
1118 irqreturn_t ret
= IRQ_NONE
;
1120 REGB_WR32(VPU_40XX_BUTTRESS_GLOBAL_INT_MASK
, 0x1);
1122 ret
|= ivpu_hw_40xx_irqv_handler(vdev
, irq
);
1123 ret
|= ivpu_hw_40xx_irqb_handler(vdev
, irq
);
1125 /* Re-enable global interrupts to re-trigger MSI for pending interrupts */
1126 REGB_WR32(VPU_40XX_BUTTRESS_GLOBAL_INT_MASK
, 0x0);
1128 if (ret
& IRQ_WAKE_THREAD
)
1129 return IRQ_WAKE_THREAD
;
1134 static void ivpu_hw_40xx_diagnose_failure(struct ivpu_device
*vdev
)
1136 u32 irqv
= REGV_RD32(VPU_40XX_HOST_SS_ICB_STATUS_0
) & ICB_0_IRQ_MASK
;
1137 u32 irqb
= REGB_RD32(VPU_40XX_BUTTRESS_INTERRUPT_STAT
) & BUTTRESS_IRQ_MASK
;
1139 if (ivpu_hw_40xx_reg_ipc_rx_count_get(vdev
))
1140 ivpu_err(vdev
, "IPC FIFO queue not empty, missed IPC IRQ");
1142 if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0
, CPU_INT_REDIRECT_0_INT
, irqv
))
1143 ivpu_err(vdev
, "WDT MSS timeout detected\n");
1145 if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0
, CPU_INT_REDIRECT_1_INT
, irqv
))
1146 ivpu_err(vdev
, "WDT NCE timeout detected\n");
1148 if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0
, NOC_FIREWALL_INT
, irqv
))
1149 ivpu_err(vdev
, "NOC Firewall irq detected\n");
1151 if (REG_TEST_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT
, ATS_ERR
, irqb
)) {
1152 ivpu_err(vdev
, "ATS_ERR_LOG1 0x%08x ATS_ERR_LOG2 0x%08x\n",
1153 REGB_RD32(VPU_40XX_BUTTRESS_ATS_ERR_LOG1
),
1154 REGB_RD32(VPU_40XX_BUTTRESS_ATS_ERR_LOG2
));
1157 if (REG_TEST_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT
, CFI0_ERR
, irqb
))
1158 ivpu_err(vdev
, "CFI0_ERR_LOG 0x%08x\n", REGB_RD32(VPU_40XX_BUTTRESS_CFI0_ERR_LOG
));
1160 if (REG_TEST_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT
, CFI1_ERR
, irqb
))
1161 ivpu_err(vdev
, "CFI1_ERR_LOG 0x%08x\n", REGB_RD32(VPU_40XX_BUTTRESS_CFI1_ERR_LOG
));
1163 if (REG_TEST_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT
, IMR0_ERR
, irqb
))
1164 ivpu_err(vdev
, "IMR_ERR_CFI0 LOW: 0x%08x HIGH: 0x%08x\n",
1165 REGB_RD32(VPU_40XX_BUTTRESS_IMR_ERR_CFI0_LOW
),
1166 REGB_RD32(VPU_40XX_BUTTRESS_IMR_ERR_CFI0_HIGH
));
1168 if (REG_TEST_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT
, IMR1_ERR
, irqb
))
1169 ivpu_err(vdev
, "IMR_ERR_CFI1 LOW: 0x%08x HIGH: 0x%08x\n",
1170 REGB_RD32(VPU_40XX_BUTTRESS_IMR_ERR_CFI1_LOW
),
1171 REGB_RD32(VPU_40XX_BUTTRESS_IMR_ERR_CFI1_HIGH
));
1173 if (REG_TEST_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT
, SURV_ERR
, irqb
))
1174 ivpu_err(vdev
, "Survivability error detected\n");
1177 const struct ivpu_hw_ops ivpu_hw_40xx_ops
= {
1178 .info_init
= ivpu_hw_40xx_info_init
,
1179 .power_up
= ivpu_hw_40xx_power_up
,
1180 .is_idle
= ivpu_hw_40xx_is_idle
,
1181 .power_down
= ivpu_hw_40xx_power_down
,
1182 .reset
= ivpu_hw_40xx_reset
,
1183 .boot_fw
= ivpu_hw_40xx_boot_fw
,
1184 .wdt_disable
= ivpu_hw_40xx_wdt_disable
,
1185 .diagnose_failure
= ivpu_hw_40xx_diagnose_failure
,
1186 .reg_pll_freq_get
= ivpu_hw_40xx_reg_pll_freq_get
,
1187 .reg_telemetry_offset_get
= ivpu_hw_40xx_reg_telemetry_offset_get
,
1188 .reg_telemetry_size_get
= ivpu_hw_40xx_reg_telemetry_size_get
,
1189 .reg_telemetry_enable_get
= ivpu_hw_40xx_reg_telemetry_enable_get
,
1190 .reg_db_set
= ivpu_hw_40xx_reg_db_set
,
1191 .reg_ipc_rx_addr_get
= ivpu_hw_40xx_reg_ipc_rx_addr_get
,
1192 .reg_ipc_rx_count_get
= ivpu_hw_40xx_reg_ipc_rx_count_get
,
1193 .reg_ipc_tx_set
= ivpu_hw_40xx_reg_ipc_tx_set
,
1194 .irq_clear
= ivpu_hw_40xx_irq_clear
,
1195 .irq_enable
= ivpu_hw_40xx_irq_enable
,
1196 .irq_disable
= ivpu_hw_40xx_irq_disable
,
1197 .irq_handler
= ivpu_hw_40xx_irq_handler
,