1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2019 HiSilicon Limited. */
3 #include <linux/acpi.h>
5 #include <linux/bitops.h>
6 #include <linux/debugfs.h>
7 #include <linux/init.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/pci.h>
12 #include <linux/seq_file.h>
13 #include <linux/topology.h>
14 #include <linux/uacce.h>
17 #define PCI_DEVICE_ID_ZIP_PF 0xa250
18 #define PCI_DEVICE_ID_ZIP_VF 0xa251
20 #define HZIP_VF_NUM 63
21 #define HZIP_QUEUE_NUM_V1 4096
22 #define HZIP_QUEUE_NUM_V2 1024
24 #define HZIP_CLOCK_GATE_CTRL 0x301004
25 #define COMP0_ENABLE BIT(0)
26 #define COMP1_ENABLE BIT(1)
27 #define DECOMP0_ENABLE BIT(2)
28 #define DECOMP1_ENABLE BIT(3)
29 #define DECOMP2_ENABLE BIT(4)
30 #define DECOMP3_ENABLE BIT(5)
31 #define DECOMP4_ENABLE BIT(6)
32 #define DECOMP5_ENABLE BIT(7)
33 #define ALL_COMP_DECOMP_EN (COMP0_ENABLE | COMP1_ENABLE | \
34 DECOMP0_ENABLE | DECOMP1_ENABLE | \
35 DECOMP2_ENABLE | DECOMP3_ENABLE | \
36 DECOMP4_ENABLE | DECOMP5_ENABLE)
37 #define DECOMP_CHECK_ENABLE BIT(16)
38 #define HZIP_FSM_MAX_CNT 0x301008
40 #define HZIP_PORT_ARCA_CHE_0 0x301040
41 #define HZIP_PORT_ARCA_CHE_1 0x301044
42 #define HZIP_PORT_AWCA_CHE_0 0x301060
43 #define HZIP_PORT_AWCA_CHE_1 0x301064
44 #define CACHE_ALL_EN 0xffffffff
46 #define HZIP_BD_RUSER_32_63 0x301110
47 #define HZIP_SGL_RUSER_32_63 0x30111c
48 #define HZIP_DATA_RUSER_32_63 0x301128
49 #define HZIP_DATA_WUSER_32_63 0x301134
50 #define HZIP_BD_WUSER_32_63 0x301140
52 #define HZIP_QM_IDEL_STATUS 0x3040e4
54 #define HZIP_CORE_DEBUG_COMP_0 0x302000
55 #define HZIP_CORE_DEBUG_COMP_1 0x303000
56 #define HZIP_CORE_DEBUG_DECOMP_0 0x304000
57 #define HZIP_CORE_DEBUG_DECOMP_1 0x305000
58 #define HZIP_CORE_DEBUG_DECOMP_2 0x306000
59 #define HZIP_CORE_DEBUG_DECOMP_3 0x307000
60 #define HZIP_CORE_DEBUG_DECOMP_4 0x308000
61 #define HZIP_CORE_DEBUG_DECOMP_5 0x309000
63 #define HZIP_CORE_INT_SOURCE 0x3010A0
64 #define HZIP_CORE_INT_MASK_REG 0x3010A4
65 #define HZIP_CORE_INT_SET 0x3010A8
66 #define HZIP_CORE_INT_STATUS 0x3010AC
67 #define HZIP_CORE_INT_STATUS_M_ECC BIT(1)
68 #define HZIP_CORE_SRAM_ECC_ERR_INFO 0x301148
69 #define HZIP_CORE_INT_RAS_CE_ENB 0x301160
70 #define HZIP_CORE_INT_RAS_NFE_ENB 0x301164
71 #define HZIP_CORE_INT_RAS_FE_ENB 0x301168
72 #define HZIP_CORE_INT_RAS_NFE_ENABLE 0x7FE
73 #define HZIP_SRAM_ECC_ERR_NUM_SHIFT 16
74 #define HZIP_SRAM_ECC_ERR_ADDR_SHIFT 24
75 #define HZIP_CORE_INT_MASK_ALL GENMASK(10, 0)
76 #define HZIP_COMP_CORE_NUM 2
77 #define HZIP_DECOMP_CORE_NUM 6
78 #define HZIP_CORE_NUM (HZIP_COMP_CORE_NUM + \
80 #define HZIP_SQE_SIZE 128
81 #define HZIP_SQ_SIZE (HZIP_SQE_SIZE * QM_Q_DEPTH)
82 #define HZIP_PF_DEF_Q_NUM 64
83 #define HZIP_PF_DEF_Q_BASE 0
85 #define HZIP_SOFT_CTRL_CNT_CLR_CE 0x301000
86 #define SOFT_CTRL_CNT_CLR_CE_BIT BIT(0)
87 #define HZIP_SOFT_CTRL_ZIP_CONTROL 0x30100C
88 #define HZIP_AXI_SHUTDOWN_ENABLE BIT(14)
89 #define HZIP_WR_PORT BIT(11)
91 #define HZIP_BUF_SIZE 22
92 #define HZIP_SQE_MASK_OFFSET 64
93 #define HZIP_SQE_MASK_LEN 48
95 static const char hisi_zip_name
[] = "hisi_zip";
96 static struct dentry
*hzip_debugfs_root
;
97 static struct hisi_qm_list zip_devices
;
99 struct hisi_zip_hw_error
{
104 struct zip_dfx_item
{
109 static struct zip_dfx_item zip_dfx_files
[] = {
110 {"send_cnt", offsetof(struct hisi_zip_dfx
, send_cnt
)},
111 {"recv_cnt", offsetof(struct hisi_zip_dfx
, recv_cnt
)},
112 {"send_busy_cnt", offsetof(struct hisi_zip_dfx
, send_busy_cnt
)},
113 {"err_bd_cnt", offsetof(struct hisi_zip_dfx
, err_bd_cnt
)},
116 static const struct hisi_zip_hw_error zip_hw_error
[] = {
117 { .int_msk
= BIT(0), .msg
= "zip_ecc_1bitt_err" },
118 { .int_msk
= BIT(1), .msg
= "zip_ecc_2bit_err" },
119 { .int_msk
= BIT(2), .msg
= "zip_axi_rresp_err" },
120 { .int_msk
= BIT(3), .msg
= "zip_axi_bresp_err" },
121 { .int_msk
= BIT(4), .msg
= "zip_src_addr_parse_err" },
122 { .int_msk
= BIT(5), .msg
= "zip_dst_addr_parse_err" },
123 { .int_msk
= BIT(6), .msg
= "zip_pre_in_addr_err" },
124 { .int_msk
= BIT(7), .msg
= "zip_pre_in_data_err" },
125 { .int_msk
= BIT(8), .msg
= "zip_com_inf_err" },
126 { .int_msk
= BIT(9), .msg
= "zip_enc_inf_err" },
127 { .int_msk
= BIT(10), .msg
= "zip_pre_out_err" },
131 enum ctrl_debug_file_index
{
137 static const char * const ctrl_debug_file_name
[] = {
138 [HZIP_CURRENT_QM
] = "current_qm",
139 [HZIP_CLEAR_ENABLE
] = "clear_enable",
142 struct ctrl_debug_file
{
143 enum ctrl_debug_file_index index
;
145 struct hisi_zip_ctrl
*ctrl
;
149 * One ZIP controller has one PF and multiple VFs, some global configurations
150 * which PF has need this structure.
152 * Just relevant for PF.
154 struct hisi_zip_ctrl
{
155 struct hisi_zip
*hisi_zip
;
156 struct dentry
*debug_root
;
157 struct ctrl_debug_file files
[HZIP_DEBUG_FILE_NUM
];
171 static const u64 core_offsets
[] = {
172 [HZIP_COMP_CORE0
] = 0x302000,
173 [HZIP_COMP_CORE1
] = 0x303000,
174 [HZIP_DECOMP_CORE0
] = 0x304000,
175 [HZIP_DECOMP_CORE1
] = 0x305000,
176 [HZIP_DECOMP_CORE2
] = 0x306000,
177 [HZIP_DECOMP_CORE3
] = 0x307000,
178 [HZIP_DECOMP_CORE4
] = 0x308000,
179 [HZIP_DECOMP_CORE5
] = 0x309000,
182 static const struct debugfs_reg32 hzip_dfx_regs
[] = {
183 {"HZIP_GET_BD_NUM ", 0x00ull
},
184 {"HZIP_GET_RIGHT_BD ", 0x04ull
},
185 {"HZIP_GET_ERROR_BD ", 0x08ull
},
186 {"HZIP_DONE_BD_NUM ", 0x0cull
},
187 {"HZIP_WORK_CYCLE ", 0x10ull
},
188 {"HZIP_IDLE_CYCLE ", 0x18ull
},
189 {"HZIP_MAX_DELAY ", 0x20ull
},
190 {"HZIP_MIN_DELAY ", 0x24ull
},
191 {"HZIP_AVG_DELAY ", 0x28ull
},
192 {"HZIP_MEM_VISIBLE_DATA ", 0x30ull
},
193 {"HZIP_MEM_VISIBLE_ADDR ", 0x34ull
},
194 {"HZIP_COMSUMED_BYTE ", 0x38ull
},
195 {"HZIP_PRODUCED_BYTE ", 0x40ull
},
196 {"HZIP_COMP_INF ", 0x70ull
},
197 {"HZIP_PRE_OUT ", 0x78ull
},
198 {"HZIP_BD_RD ", 0x7cull
},
199 {"HZIP_BD_WR ", 0x80ull
},
200 {"HZIP_GET_BD_AXI_ERR_NUM ", 0x84ull
},
201 {"HZIP_GET_BD_PARSE_ERR_NUM ", 0x88ull
},
202 {"HZIP_ADD_BD_AXI_ERR_NUM ", 0x8cull
},
203 {"HZIP_DECOMP_STF_RELOAD_CURR_ST ", 0x94ull
},
204 {"HZIP_DECOMP_LZ77_CURR_ST ", 0x9cull
},
207 static int pf_q_num_set(const char *val
, const struct kernel_param
*kp
)
209 return q_num_set(val
, kp
, PCI_DEVICE_ID_ZIP_PF
);
212 static const struct kernel_param_ops pf_q_num_ops
= {
214 .get
= param_get_int
,
217 static u32 pf_q_num
= HZIP_PF_DEF_Q_NUM
;
218 module_param_cb(pf_q_num
, &pf_q_num_ops
, &pf_q_num
, 0444);
219 MODULE_PARM_DESC(pf_q_num
, "Number of queues in PF(v1 1-4096, v2 1-1024)");
221 static const struct kernel_param_ops vfs_num_ops
= {
223 .get
= param_get_int
,
227 module_param_cb(vfs_num
, &vfs_num_ops
, &vfs_num
, 0444);
228 MODULE_PARM_DESC(vfs_num
, "Number of VFs to enable(1-63), 0(default)");
230 static const struct pci_device_id hisi_zip_dev_ids
[] = {
231 { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI
, PCI_DEVICE_ID_ZIP_PF
) },
232 { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI
, PCI_DEVICE_ID_ZIP_VF
) },
235 MODULE_DEVICE_TABLE(pci
, hisi_zip_dev_ids
);
237 int zip_create_qps(struct hisi_qp
**qps
, int qp_num
)
239 int node
= cpu_to_node(smp_processor_id());
241 return hisi_qm_alloc_qps_node(&zip_devices
, qp_num
, 0, node
, qps
);
244 static int hisi_zip_set_user_domain_and_cache(struct hisi_qm
*qm
)
246 void __iomem
*base
= qm
->io_base
;
249 writel(AXUSER_BASE
, base
+ QM_ARUSER_M_CFG_1
);
250 writel(ARUSER_M_CFG_ENABLE
, base
+ QM_ARUSER_M_CFG_ENABLE
);
251 writel(AXUSER_BASE
, base
+ QM_AWUSER_M_CFG_1
);
252 writel(AWUSER_M_CFG_ENABLE
, base
+ QM_AWUSER_M_CFG_ENABLE
);
253 writel(WUSER_M_CFG_ENABLE
, base
+ QM_WUSER_M_CFG_ENABLE
);
256 writel(AXI_M_CFG
, base
+ QM_AXI_M_CFG
);
257 writel(AXI_M_CFG_ENABLE
, base
+ QM_AXI_M_CFG_ENABLE
);
258 /* disable FLR triggered by BME(bus master enable) */
259 writel(PEH_AXUSER_CFG
, base
+ QM_PEH_AXUSER_CFG
);
260 writel(PEH_AXUSER_CFG_ENABLE
, base
+ QM_PEH_AXUSER_CFG_ENABLE
);
263 writel(CACHE_ALL_EN
, base
+ HZIP_PORT_ARCA_CHE_0
);
264 writel(CACHE_ALL_EN
, base
+ HZIP_PORT_ARCA_CHE_1
);
265 writel(CACHE_ALL_EN
, base
+ HZIP_PORT_AWCA_CHE_0
);
266 writel(CACHE_ALL_EN
, base
+ HZIP_PORT_AWCA_CHE_1
);
268 /* user domain configurations */
269 writel(AXUSER_BASE
, base
+ HZIP_BD_RUSER_32_63
);
270 writel(AXUSER_BASE
, base
+ HZIP_SGL_RUSER_32_63
);
271 writel(AXUSER_BASE
, base
+ HZIP_BD_WUSER_32_63
);
274 writel(AXUSER_BASE
| AXUSER_SSV
, base
+ HZIP_DATA_RUSER_32_63
);
275 writel(AXUSER_BASE
| AXUSER_SSV
, base
+ HZIP_DATA_WUSER_32_63
);
277 writel(AXUSER_BASE
, base
+ HZIP_DATA_RUSER_32_63
);
278 writel(AXUSER_BASE
, base
+ HZIP_DATA_WUSER_32_63
);
281 /* let's open all compression/decompression cores */
282 writel(DECOMP_CHECK_ENABLE
| ALL_COMP_DECOMP_EN
,
283 base
+ HZIP_CLOCK_GATE_CTRL
);
285 /* enable sqc writeback */
286 writel(SQC_CACHE_ENABLE
| CQC_CACHE_ENABLE
| SQC_CACHE_WB_ENABLE
|
287 CQC_CACHE_WB_ENABLE
| FIELD_PREP(SQC_CACHE_WB_THRD
, 1) |
288 FIELD_PREP(CQC_CACHE_WB_THRD
, 1), base
+ QM_CACHE_CTL
);
293 static void hisi_zip_hw_error_enable(struct hisi_qm
*qm
)
297 if (qm
->ver
== QM_HW_V1
) {
298 writel(HZIP_CORE_INT_MASK_ALL
,
299 qm
->io_base
+ HZIP_CORE_INT_MASK_REG
);
300 dev_info(&qm
->pdev
->dev
, "Does not support hw error handle\n");
304 /* clear ZIP hw error source if having */
305 writel(HZIP_CORE_INT_MASK_ALL
, qm
->io_base
+ HZIP_CORE_INT_SOURCE
);
307 /* configure error type */
308 writel(0x1, qm
->io_base
+ HZIP_CORE_INT_RAS_CE_ENB
);
309 writel(0x0, qm
->io_base
+ HZIP_CORE_INT_RAS_FE_ENB
);
310 writel(HZIP_CORE_INT_RAS_NFE_ENABLE
,
311 qm
->io_base
+ HZIP_CORE_INT_RAS_NFE_ENB
);
313 /* enable ZIP hw error interrupts */
314 writel(0, qm
->io_base
+ HZIP_CORE_INT_MASK_REG
);
316 /* enable ZIP block master OOO when m-bit error occur */
317 val
= readl(qm
->io_base
+ HZIP_SOFT_CTRL_ZIP_CONTROL
);
318 val
= val
| HZIP_AXI_SHUTDOWN_ENABLE
;
319 writel(val
, qm
->io_base
+ HZIP_SOFT_CTRL_ZIP_CONTROL
);
322 static void hisi_zip_hw_error_disable(struct hisi_qm
*qm
)
326 /* disable ZIP hw error interrupts */
327 writel(HZIP_CORE_INT_MASK_ALL
, qm
->io_base
+ HZIP_CORE_INT_MASK_REG
);
329 /* disable ZIP block master OOO when m-bit error occur */
330 val
= readl(qm
->io_base
+ HZIP_SOFT_CTRL_ZIP_CONTROL
);
331 val
= val
& ~HZIP_AXI_SHUTDOWN_ENABLE
;
332 writel(val
, qm
->io_base
+ HZIP_SOFT_CTRL_ZIP_CONTROL
);
335 static inline struct hisi_qm
*file_to_qm(struct ctrl_debug_file
*file
)
337 struct hisi_zip
*hisi_zip
= file
->ctrl
->hisi_zip
;
339 return &hisi_zip
->qm
;
342 static u32
current_qm_read(struct ctrl_debug_file
*file
)
344 struct hisi_qm
*qm
= file_to_qm(file
);
346 return readl(qm
->io_base
+ QM_DFX_MB_CNT_VF
);
349 static int current_qm_write(struct ctrl_debug_file
*file
, u32 val
)
351 struct hisi_qm
*qm
= file_to_qm(file
);
355 if (val
> qm
->vfs_num
)
358 /* Calculate curr_qm_qp_num and store */
360 qm
->debug
.curr_qm_qp_num
= qm
->qp_num
;
362 vfq_num
= (qm
->ctrl_qp_num
- qm
->qp_num
) / qm
->vfs_num
;
363 if (val
== qm
->vfs_num
)
364 qm
->debug
.curr_qm_qp_num
= qm
->ctrl_qp_num
-
365 qm
->qp_num
- (qm
->vfs_num
- 1) * vfq_num
;
367 qm
->debug
.curr_qm_qp_num
= vfq_num
;
370 writel(val
, qm
->io_base
+ QM_DFX_MB_CNT_VF
);
371 writel(val
, qm
->io_base
+ QM_DFX_DB_CNT_VF
);
374 (readl(qm
->io_base
+ QM_DFX_SQE_CNT_VF_SQN
) & CURRENT_Q_MASK
);
375 writel(tmp
, qm
->io_base
+ QM_DFX_SQE_CNT_VF_SQN
);
378 (readl(qm
->io_base
+ QM_DFX_CQE_CNT_VF_CQN
) & CURRENT_Q_MASK
);
379 writel(tmp
, qm
->io_base
+ QM_DFX_CQE_CNT_VF_CQN
);
384 static u32
clear_enable_read(struct ctrl_debug_file
*file
)
386 struct hisi_qm
*qm
= file_to_qm(file
);
388 return readl(qm
->io_base
+ HZIP_SOFT_CTRL_CNT_CLR_CE
) &
389 SOFT_CTRL_CNT_CLR_CE_BIT
;
392 static int clear_enable_write(struct ctrl_debug_file
*file
, u32 val
)
394 struct hisi_qm
*qm
= file_to_qm(file
);
397 if (val
!= 1 && val
!= 0)
400 tmp
= (readl(qm
->io_base
+ HZIP_SOFT_CTRL_CNT_CLR_CE
) &
401 ~SOFT_CTRL_CNT_CLR_CE_BIT
) | val
;
402 writel(tmp
, qm
->io_base
+ HZIP_SOFT_CTRL_CNT_CLR_CE
);
407 static ssize_t
ctrl_debug_read(struct file
*filp
, char __user
*buf
,
408 size_t count
, loff_t
*pos
)
410 struct ctrl_debug_file
*file
= filp
->private_data
;
411 char tbuf
[HZIP_BUF_SIZE
];
415 spin_lock_irq(&file
->lock
);
416 switch (file
->index
) {
417 case HZIP_CURRENT_QM
:
418 val
= current_qm_read(file
);
420 case HZIP_CLEAR_ENABLE
:
421 val
= clear_enable_read(file
);
424 spin_unlock_irq(&file
->lock
);
427 spin_unlock_irq(&file
->lock
);
428 ret
= sprintf(tbuf
, "%u\n", val
);
429 return simple_read_from_buffer(buf
, count
, pos
, tbuf
, ret
);
432 static ssize_t
ctrl_debug_write(struct file
*filp
, const char __user
*buf
,
433 size_t count
, loff_t
*pos
)
435 struct ctrl_debug_file
*file
= filp
->private_data
;
436 char tbuf
[HZIP_BUF_SIZE
];
443 if (count
>= HZIP_BUF_SIZE
)
446 len
= simple_write_to_buffer(tbuf
, HZIP_BUF_SIZE
- 1, pos
, buf
, count
);
451 if (kstrtoul(tbuf
, 0, &val
))
454 spin_lock_irq(&file
->lock
);
455 switch (file
->index
) {
456 case HZIP_CURRENT_QM
:
457 ret
= current_qm_write(file
, val
);
461 case HZIP_CLEAR_ENABLE
:
462 ret
= clear_enable_write(file
, val
);
470 spin_unlock_irq(&file
->lock
);
475 spin_unlock_irq(&file
->lock
);
479 static const struct file_operations ctrl_debug_fops
= {
480 .owner
= THIS_MODULE
,
482 .read
= ctrl_debug_read
,
483 .write
= ctrl_debug_write
,
487 static int zip_debugfs_atomic64_set(void *data
, u64 val
)
492 atomic64_set((atomic64_t
*)data
, 0);
497 static int zip_debugfs_atomic64_get(void *data
, u64
*val
)
499 *val
= atomic64_read((atomic64_t
*)data
);
504 DEFINE_DEBUGFS_ATTRIBUTE(zip_atomic64_ops
, zip_debugfs_atomic64_get
,
505 zip_debugfs_atomic64_set
, "%llu\n");
507 static int hisi_zip_core_debug_init(struct hisi_zip_ctrl
*ctrl
)
509 struct hisi_zip
*hisi_zip
= ctrl
->hisi_zip
;
510 struct hisi_qm
*qm
= &hisi_zip
->qm
;
511 struct device
*dev
= &qm
->pdev
->dev
;
512 struct debugfs_regset32
*regset
;
513 struct dentry
*tmp_d
;
514 char buf
[HZIP_BUF_SIZE
];
517 for (i
= 0; i
< HZIP_CORE_NUM
; i
++) {
518 if (i
< HZIP_COMP_CORE_NUM
)
519 sprintf(buf
, "comp_core%d", i
);
521 sprintf(buf
, "decomp_core%d", i
- HZIP_COMP_CORE_NUM
);
523 regset
= devm_kzalloc(dev
, sizeof(*regset
), GFP_KERNEL
);
527 regset
->regs
= hzip_dfx_regs
;
528 regset
->nregs
= ARRAY_SIZE(hzip_dfx_regs
);
529 regset
->base
= qm
->io_base
+ core_offsets
[i
];
531 tmp_d
= debugfs_create_dir(buf
, ctrl
->debug_root
);
532 debugfs_create_regset32("regs", 0444, tmp_d
, regset
);
538 static void hisi_zip_dfx_debug_init(struct hisi_qm
*qm
)
540 struct hisi_zip
*zip
= container_of(qm
, struct hisi_zip
, qm
);
541 struct hisi_zip_dfx
*dfx
= &zip
->dfx
;
542 struct dentry
*tmp_dir
;
546 tmp_dir
= debugfs_create_dir("zip_dfx", qm
->debug
.debug_root
);
547 for (i
= 0; i
< ARRAY_SIZE(zip_dfx_files
); i
++) {
548 data
= (atomic64_t
*)((uintptr_t)dfx
+ zip_dfx_files
[i
].offset
);
549 debugfs_create_file(zip_dfx_files
[i
].name
,
557 static int hisi_zip_ctrl_debug_init(struct hisi_zip_ctrl
*ctrl
)
561 for (i
= HZIP_CURRENT_QM
; i
< HZIP_DEBUG_FILE_NUM
; i
++) {
562 spin_lock_init(&ctrl
->files
[i
].lock
);
563 ctrl
->files
[i
].ctrl
= ctrl
;
564 ctrl
->files
[i
].index
= i
;
566 debugfs_create_file(ctrl_debug_file_name
[i
], 0600,
567 ctrl
->debug_root
, ctrl
->files
+ i
,
571 return hisi_zip_core_debug_init(ctrl
);
574 static int hisi_zip_debugfs_init(struct hisi_zip
*hisi_zip
)
576 struct hisi_qm
*qm
= &hisi_zip
->qm
;
577 struct device
*dev
= &qm
->pdev
->dev
;
578 struct dentry
*dev_d
;
581 dev_d
= debugfs_create_dir(dev_name(dev
), hzip_debugfs_root
);
583 qm
->debug
.sqe_mask_offset
= HZIP_SQE_MASK_OFFSET
;
584 qm
->debug
.sqe_mask_len
= HZIP_SQE_MASK_LEN
;
585 qm
->debug
.debug_root
= dev_d
;
586 ret
= hisi_qm_debug_init(qm
);
588 goto failed_to_create
;
590 if (qm
->fun_type
== QM_HW_PF
) {
591 hisi_zip
->ctrl
->debug_root
= dev_d
;
592 ret
= hisi_zip_ctrl_debug_init(hisi_zip
->ctrl
);
594 goto failed_to_create
;
597 hisi_zip_dfx_debug_init(qm
);
602 debugfs_remove_recursive(hzip_debugfs_root
);
606 static void hisi_zip_debug_regs_clear(struct hisi_zip
*hisi_zip
)
608 struct hisi_qm
*qm
= &hisi_zip
->qm
;
610 writel(0x0, qm
->io_base
+ QM_DFX_MB_CNT_VF
);
611 writel(0x0, qm
->io_base
+ QM_DFX_DB_CNT_VF
);
612 writel(0x0, qm
->io_base
+ HZIP_SOFT_CTRL_CNT_CLR_CE
);
614 hisi_qm_debug_regs_clear(qm
);
617 static void hisi_zip_debugfs_exit(struct hisi_zip
*hisi_zip
)
619 struct hisi_qm
*qm
= &hisi_zip
->qm
;
621 debugfs_remove_recursive(qm
->debug
.debug_root
);
623 if (qm
->fun_type
== QM_HW_PF
)
624 hisi_zip_debug_regs_clear(hisi_zip
);
627 static void hisi_zip_log_hw_error(struct hisi_qm
*qm
, u32 err_sts
)
629 const struct hisi_zip_hw_error
*err
= zip_hw_error
;
630 struct device
*dev
= &qm
->pdev
->dev
;
634 if (err
->int_msk
& err_sts
) {
635 dev_err(dev
, "%s [error status=0x%x] found\n",
636 err
->msg
, err
->int_msk
);
638 if (err
->int_msk
& HZIP_CORE_INT_STATUS_M_ECC
) {
639 err_val
= readl(qm
->io_base
+
640 HZIP_CORE_SRAM_ECC_ERR_INFO
);
641 dev_err(dev
, "hisi-zip multi ecc sram num=0x%x\n",
643 HZIP_SRAM_ECC_ERR_NUM_SHIFT
) & 0xFF));
644 dev_err(dev
, "hisi-zip multi ecc sram addr=0x%x\n",
646 HZIP_SRAM_ECC_ERR_ADDR_SHIFT
));
653 static u32
hisi_zip_get_hw_err_status(struct hisi_qm
*qm
)
655 return readl(qm
->io_base
+ HZIP_CORE_INT_STATUS
);
658 static void hisi_zip_clear_hw_err_status(struct hisi_qm
*qm
, u32 err_sts
)
660 writel(err_sts
, qm
->io_base
+ HZIP_CORE_INT_SOURCE
);
663 static void hisi_zip_open_axi_master_ooo(struct hisi_qm
*qm
)
667 val
= readl(qm
->io_base
+ HZIP_SOFT_CTRL_ZIP_CONTROL
);
669 writel(val
& ~HZIP_AXI_SHUTDOWN_ENABLE
,
670 qm
->io_base
+ HZIP_SOFT_CTRL_ZIP_CONTROL
);
672 writel(val
| HZIP_AXI_SHUTDOWN_ENABLE
,
673 qm
->io_base
+ HZIP_SOFT_CTRL_ZIP_CONTROL
);
676 static void hisi_zip_close_axi_master_ooo(struct hisi_qm
*qm
)
680 /* Disable ECC Mbit error report. */
681 nfe_enb
= readl(qm
->io_base
+ HZIP_CORE_INT_RAS_NFE_ENB
);
682 writel(nfe_enb
& ~HZIP_CORE_INT_STATUS_M_ECC
,
683 qm
->io_base
+ HZIP_CORE_INT_RAS_NFE_ENB
);
685 /* Inject zip ECC Mbit error to block master ooo. */
686 writel(HZIP_CORE_INT_STATUS_M_ECC
,
687 qm
->io_base
+ HZIP_CORE_INT_SET
);
690 static const struct hisi_qm_err_ini hisi_zip_err_ini
= {
691 .hw_init
= hisi_zip_set_user_domain_and_cache
,
692 .hw_err_enable
= hisi_zip_hw_error_enable
,
693 .hw_err_disable
= hisi_zip_hw_error_disable
,
694 .get_dev_hw_err_status
= hisi_zip_get_hw_err_status
,
695 .clear_dev_hw_err_status
= hisi_zip_clear_hw_err_status
,
696 .log_dev_hw_err
= hisi_zip_log_hw_error
,
697 .open_axi_master_ooo
= hisi_zip_open_axi_master_ooo
,
698 .close_axi_master_ooo
= hisi_zip_close_axi_master_ooo
,
702 QM_ACC_WB_NOT_READY_TIMEOUT
,
704 .ecc_2bits_mask
= HZIP_CORE_INT_STATUS_M_ECC
,
705 .msi_wr_port
= HZIP_WR_PORT
,
710 static int hisi_zip_pf_probe_init(struct hisi_zip
*hisi_zip
)
712 struct hisi_qm
*qm
= &hisi_zip
->qm
;
713 struct hisi_zip_ctrl
*ctrl
;
715 ctrl
= devm_kzalloc(&qm
->pdev
->dev
, sizeof(*ctrl
), GFP_KERNEL
);
719 hisi_zip
->ctrl
= ctrl
;
720 ctrl
->hisi_zip
= hisi_zip
;
722 if (qm
->ver
== QM_HW_V1
)
723 qm
->ctrl_qp_num
= HZIP_QUEUE_NUM_V1
;
725 qm
->ctrl_qp_num
= HZIP_QUEUE_NUM_V2
;
727 qm
->err_ini
= &hisi_zip_err_ini
;
729 hisi_zip_set_user_domain_and_cache(qm
);
730 hisi_qm_dev_err_init(qm
);
731 hisi_zip_debug_regs_clear(hisi_zip
);
736 static int hisi_zip_qm_init(struct hisi_qm
*qm
, struct pci_dev
*pdev
)
739 qm
->ver
= pdev
->revision
;
740 qm
->algs
= "zlib\ngzip";
741 qm
->sqe_size
= HZIP_SQE_SIZE
;
742 qm
->dev_name
= hisi_zip_name
;
744 qm
->fun_type
= (pdev
->device
== PCI_DEVICE_ID_ZIP_PF
) ?
746 if (qm
->fun_type
== QM_HW_PF
) {
747 qm
->qp_base
= HZIP_PF_DEF_Q_BASE
;
748 qm
->qp_num
= pf_q_num
;
749 qm
->qm_list
= &zip_devices
;
750 } else if (qm
->fun_type
== QM_HW_VF
&& qm
->ver
== QM_HW_V1
) {
752 * have no way to get qm configure in VM in v1 hardware,
753 * so currently force PF to uses HZIP_PF_DEF_Q_NUM, and force
754 * to trigger only one VF in v1 hardware.
756 * v2 hardware has no such problem.
758 qm
->qp_base
= HZIP_PF_DEF_Q_NUM
;
759 qm
->qp_num
= HZIP_QUEUE_NUM_V1
- HZIP_PF_DEF_Q_NUM
;
762 return hisi_qm_init(qm
);
765 static int hisi_zip_probe_init(struct hisi_zip
*hisi_zip
)
767 struct hisi_qm
*qm
= &hisi_zip
->qm
;
770 if (qm
->fun_type
== QM_HW_PF
) {
771 ret
= hisi_zip_pf_probe_init(hisi_zip
);
779 static int hisi_zip_probe(struct pci_dev
*pdev
, const struct pci_device_id
*id
)
781 struct hisi_zip
*hisi_zip
;
785 hisi_zip
= devm_kzalloc(&pdev
->dev
, sizeof(*hisi_zip
), GFP_KERNEL
);
791 ret
= hisi_zip_qm_init(qm
, pdev
);
793 pci_err(pdev
, "Failed to init ZIP QM (%d)!\n", ret
);
797 ret
= hisi_zip_probe_init(hisi_zip
);
799 pci_err(pdev
, "Failed to probe (%d)!\n", ret
);
803 ret
= hisi_qm_start(qm
);
807 ret
= hisi_zip_debugfs_init(hisi_zip
);
809 dev_err(&pdev
->dev
, "Failed to init debugfs (%d)!\n", ret
);
811 hisi_qm_add_to_list(qm
, &zip_devices
);
814 ret
= uacce_register(qm
->uacce
);
819 if (qm
->fun_type
== QM_HW_PF
&& vfs_num
> 0) {
820 ret
= hisi_qm_sriov_enable(pdev
, vfs_num
);
822 goto err_remove_from_list
;
827 err_remove_from_list
:
828 hisi_qm_del_from_list(qm
, &zip_devices
);
829 hisi_zip_debugfs_exit(hisi_zip
);
837 static void hisi_zip_remove(struct pci_dev
*pdev
)
839 struct hisi_zip
*hisi_zip
= pci_get_drvdata(pdev
);
840 struct hisi_qm
*qm
= &hisi_zip
->qm
;
842 if (qm
->fun_type
== QM_HW_PF
&& qm
->vfs_num
)
843 hisi_qm_sriov_disable(pdev
);
845 hisi_zip_debugfs_exit(hisi_zip
);
848 hisi_qm_dev_err_uninit(qm
);
850 hisi_qm_del_from_list(qm
, &zip_devices
);
853 static const struct pci_error_handlers hisi_zip_err_handler
= {
854 .error_detected
= hisi_qm_dev_err_detected
,
855 .slot_reset
= hisi_qm_dev_slot_reset
,
856 .reset_prepare
= hisi_qm_reset_prepare
,
857 .reset_done
= hisi_qm_reset_done
,
860 static struct pci_driver hisi_zip_pci_driver
= {
862 .id_table
= hisi_zip_dev_ids
,
863 .probe
= hisi_zip_probe
,
864 .remove
= hisi_zip_remove
,
865 .sriov_configure
= IS_ENABLED(CONFIG_PCI_IOV
) ?
866 hisi_qm_sriov_configure
: NULL
,
867 .err_handler
= &hisi_zip_err_handler
,
870 static void hisi_zip_register_debugfs(void)
872 if (!debugfs_initialized())
875 hzip_debugfs_root
= debugfs_create_dir("hisi_zip", NULL
);
878 static void hisi_zip_unregister_debugfs(void)
880 debugfs_remove_recursive(hzip_debugfs_root
);
883 static int __init
hisi_zip_init(void)
887 hisi_qm_init_list(&zip_devices
);
888 hisi_zip_register_debugfs();
890 ret
= pci_register_driver(&hisi_zip_pci_driver
);
892 pr_err("Failed to register pci driver.\n");
896 ret
= hisi_zip_register_to_crypto();
898 pr_err("Failed to register driver to crypto.\n");
905 pci_unregister_driver(&hisi_zip_pci_driver
);
907 hisi_zip_unregister_debugfs();
912 static void __exit
hisi_zip_exit(void)
914 hisi_zip_unregister_from_crypto();
915 pci_unregister_driver(&hisi_zip_pci_driver
);
916 hisi_zip_unregister_debugfs();
919 module_init(hisi_zip_init
);
920 module_exit(hisi_zip_exit
);
922 MODULE_LICENSE("GPL v2");
923 MODULE_AUTHOR("Zhou Wang <wangzhou1@hisilicon.com>");
924 MODULE_DESCRIPTION("Driver for HiSilicon ZIP accelerator");