1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2019 HiSilicon Limited. */
4 #include <linux/acpi.h>
6 #include <linux/bitops.h>
7 #include <linux/debugfs.h>
8 #include <linux/init.h>
10 #include <linux/iommu.h>
11 #include <linux/kernel.h>
12 #include <linux/module.h>
13 #include <linux/pci.h>
14 #include <linux/seq_file.h>
15 #include <linux/topology.h>
20 #define SEC_QUEUE_NUM_V1 4096
21 #define SEC_QUEUE_NUM_V2 1024
22 #define SEC_PF_PCI_DEVICE_ID 0xa255
23 #define SEC_VF_PCI_DEVICE_ID 0xa256
25 #define SEC_XTS_MIV_ENABLE_REG 0x301384
26 #define SEC_XTS_MIV_ENABLE_MSK 0x7FFFFFFF
27 #define SEC_XTS_MIV_DISABLE_MSK 0xFFFFFFFF
28 #define SEC_BD_ERR_CHK_EN1 0xfffff7fd
29 #define SEC_BD_ERR_CHK_EN2 0xffffbfff
31 #define SEC_SQE_SIZE 128
32 #define SEC_SQ_SIZE (SEC_SQE_SIZE * QM_Q_DEPTH)
33 #define SEC_PF_DEF_Q_NUM 64
34 #define SEC_PF_DEF_Q_BASE 0
35 #define SEC_CTX_Q_NUM_DEF 24
36 #define SEC_CTX_Q_NUM_MAX 32
38 #define SEC_CTRL_CNT_CLR_CE 0x301120
39 #define SEC_CTRL_CNT_CLR_CE_BIT BIT(0)
40 #define SEC_ENGINE_PF_CFG_OFF 0x300000
41 #define SEC_ACC_COMMON_REG_OFF 0x1000
42 #define SEC_CORE_INT_SOURCE 0x301010
43 #define SEC_CORE_INT_MASK 0x301000
44 #define SEC_CORE_INT_STATUS 0x301008
45 #define SEC_CORE_SRAM_ECC_ERR_INFO 0x301C14
46 #define SEC_ECC_NUM(err) (((err) >> 16) & 0xFF)
47 #define SEC_ECC_ADDR(err) ((err) >> 0)
48 #define SEC_CORE_INT_DISABLE 0x0
49 #define SEC_CORE_INT_ENABLE 0x1ff
51 #define SEC_RAS_CE_REG 0x50
52 #define SEC_RAS_FE_REG 0x54
53 #define SEC_RAS_NFE_REG 0x58
54 #define SEC_RAS_CE_ENB_MSK 0x88
55 #define SEC_RAS_FE_ENB_MSK 0x0
56 #define SEC_RAS_NFE_ENB_MSK 0x177
57 #define SEC_RAS_DISABLE 0x0
58 #define SEC_MEM_START_INIT_REG 0x0100
59 #define SEC_MEM_INIT_DONE_REG 0x0104
60 #define SEC_QM_ABNORMAL_INT_MASK 0x100004
62 #define SEC_CONTROL_REG 0x0200
63 #define SEC_TRNG_EN_SHIFT 8
64 #define SEC_CLK_GATE_ENABLE BIT(3)
65 #define SEC_CLK_GATE_DISABLE (~BIT(3))
66 #define SEC_AXI_SHUTDOWN_ENABLE BIT(12)
67 #define SEC_AXI_SHUTDOWN_DISABLE 0xFFFFEFFF
69 #define SEC_INTERFACE_USER_CTRL0_REG 0x0220
70 #define SEC_INTERFACE_USER_CTRL1_REG 0x0224
71 #define SEC_BD_ERR_CHK_EN_REG1 0x0384
72 #define SEC_BD_ERR_CHK_EN_REG2 0x038c
74 #define SEC_USER0_SMMU_NORMAL (BIT(23) | BIT(15))
75 #define SEC_USER1_SMMU_NORMAL (BIT(31) | BIT(23) | BIT(15) | BIT(7))
76 #define SEC_CORE_INT_STATUS_M_ECC BIT(2)
78 #define SEC_DELAY_10_US 10
79 #define SEC_POLL_TIMEOUT_US 1000
80 #define SEC_VF_CNT_MASK 0xffffffc0
81 #define SEC_DBGFS_VAL_MAX_LEN 20
83 #define SEC_SQE_MASK_OFFSET 64
84 #define SEC_SQE_MASK_LEN 48
86 #define SEC_ADDR(qm, offset) ((qm)->io_base + (offset) + \
87 SEC_ENGINE_PF_CFG_OFF + SEC_ACC_COMMON_REG_OFF)
99 static const char sec_name
[] = "hisi_sec2";
100 static struct dentry
*sec_debugfs_root
;
101 static struct hisi_qm_list sec_devices
;
103 static const struct sec_hw_error sec_hw_errors
[] = {
104 {.int_msk
= BIT(0), .msg
= "sec_axi_rresp_err_rint"},
105 {.int_msk
= BIT(1), .msg
= "sec_axi_bresp_err_rint"},
106 {.int_msk
= BIT(2), .msg
= "sec_ecc_2bit_err_rint"},
107 {.int_msk
= BIT(3), .msg
= "sec_ecc_1bit_err_rint"},
108 {.int_msk
= BIT(4), .msg
= "sec_req_trng_timeout_rint"},
109 {.int_msk
= BIT(5), .msg
= "sec_fsm_hbeat_rint"},
110 {.int_msk
= BIT(6), .msg
= "sec_channel_req_rng_timeout_rint"},
111 {.int_msk
= BIT(7), .msg
= "sec_bd_err_rint"},
112 {.int_msk
= BIT(8), .msg
= "sec_chain_buff_err_rint"},
116 static const char * const sec_dbg_file_name
[] = {
117 [SEC_CURRENT_QM
] = "current_qm",
118 [SEC_CLEAR_ENABLE
] = "clear_enable",
121 static struct sec_dfx_item sec_dfx_labels
[] = {
122 {"send_cnt", offsetof(struct sec_dfx
, send_cnt
)},
123 {"recv_cnt", offsetof(struct sec_dfx
, recv_cnt
)},
124 {"send_busy_cnt", offsetof(struct sec_dfx
, send_busy_cnt
)},
125 {"err_bd_cnt", offsetof(struct sec_dfx
, err_bd_cnt
)},
126 {"invalid_req_cnt", offsetof(struct sec_dfx
, invalid_req_cnt
)},
127 {"done_flag_cnt", offsetof(struct sec_dfx
, done_flag_cnt
)},
130 static const struct debugfs_reg32 sec_dfx_regs
[] = {
131 {"SEC_PF_ABNORMAL_INT_SOURCE ", 0x301010},
132 {"SEC_SAA_EN ", 0x301270},
133 {"SEC_BD_LATENCY_MIN ", 0x301600},
134 {"SEC_BD_LATENCY_MAX ", 0x301608},
135 {"SEC_BD_LATENCY_AVG ", 0x30160C},
136 {"SEC_BD_NUM_IN_SAA0 ", 0x301670},
137 {"SEC_BD_NUM_IN_SAA1 ", 0x301674},
138 {"SEC_BD_NUM_IN_SEC ", 0x301680},
139 {"SEC_ECC_1BIT_CNT ", 0x301C00},
140 {"SEC_ECC_1BIT_INFO ", 0x301C04},
141 {"SEC_ECC_2BIT_CNT ", 0x301C10},
142 {"SEC_ECC_2BIT_INFO ", 0x301C14},
143 {"SEC_BD_SAA0 ", 0x301C20},
144 {"SEC_BD_SAA1 ", 0x301C24},
145 {"SEC_BD_SAA2 ", 0x301C28},
146 {"SEC_BD_SAA3 ", 0x301C2C},
147 {"SEC_BD_SAA4 ", 0x301C30},
148 {"SEC_BD_SAA5 ", 0x301C34},
149 {"SEC_BD_SAA6 ", 0x301C38},
150 {"SEC_BD_SAA7 ", 0x301C3C},
151 {"SEC_BD_SAA8 ", 0x301C40},
154 static int sec_pf_q_num_set(const char *val
, const struct kernel_param
*kp
)
156 return q_num_set(val
, kp
, SEC_PF_PCI_DEVICE_ID
);
159 static const struct kernel_param_ops sec_pf_q_num_ops
= {
160 .set
= sec_pf_q_num_set
,
161 .get
= param_get_int
,
164 static u32 pf_q_num
= SEC_PF_DEF_Q_NUM
;
165 module_param_cb(pf_q_num
, &sec_pf_q_num_ops
, &pf_q_num
, 0444);
166 MODULE_PARM_DESC(pf_q_num
, "Number of queues in PF(v1 0-4096, v2 0-1024)");
168 static int sec_ctx_q_num_set(const char *val
, const struct kernel_param
*kp
)
176 ret
= kstrtou32(val
, 10, &ctx_q_num
);
180 if (!ctx_q_num
|| ctx_q_num
> SEC_CTX_Q_NUM_MAX
|| ctx_q_num
& 0x1) {
181 pr_err("ctx queue num[%u] is invalid!\n", ctx_q_num
);
185 return param_set_int(val
, kp
);
188 static const struct kernel_param_ops sec_ctx_q_num_ops
= {
189 .set
= sec_ctx_q_num_set
,
190 .get
= param_get_int
,
192 static u32 ctx_q_num
= SEC_CTX_Q_NUM_DEF
;
193 module_param_cb(ctx_q_num
, &sec_ctx_q_num_ops
, &ctx_q_num
, 0444);
194 MODULE_PARM_DESC(ctx_q_num
, "Queue num in ctx (24 default, 2, 4, ..., 32)");
196 static const struct kernel_param_ops vfs_num_ops
= {
198 .get
= param_get_int
,
202 module_param_cb(vfs_num
, &vfs_num_ops
, &vfs_num
, 0444);
203 MODULE_PARM_DESC(vfs_num
, "Number of VFs to enable(1-63), 0(default)");
205 void sec_destroy_qps(struct hisi_qp
**qps
, int qp_num
)
207 hisi_qm_free_qps(qps
, qp_num
);
211 struct hisi_qp
**sec_create_qps(void)
213 int node
= cpu_to_node(smp_processor_id());
214 u32 ctx_num
= ctx_q_num
;
215 struct hisi_qp
**qps
;
218 qps
= kcalloc(ctx_num
, sizeof(struct hisi_qp
*), GFP_KERNEL
);
222 ret
= hisi_qm_alloc_qps_node(&sec_devices
, ctx_num
, 0, node
, qps
);
231 static const struct pci_device_id sec_dev_ids
[] = {
232 { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI
, SEC_PF_PCI_DEVICE_ID
) },
233 { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI
, SEC_VF_PCI_DEVICE_ID
) },
236 MODULE_DEVICE_TABLE(pci
, sec_dev_ids
);
238 static u8
sec_get_endian(struct hisi_qm
*qm
)
243 * As for VF, it is a wrong way to get endian setting by
244 * reading a register of the engine
246 if (qm
->pdev
->is_virtfn
) {
247 dev_err_ratelimited(&qm
->pdev
->dev
,
248 "cannot access a register in VF!\n");
251 reg
= readl_relaxed(qm
->io_base
+ SEC_ENGINE_PF_CFG_OFF
+
252 SEC_ACC_COMMON_REG_OFF
+ SEC_CONTROL_REG
);
254 /* BD little endian mode */
258 /* BD 32-bits big endian mode */
259 else if (!(reg
& BIT(1)))
262 /* BD 64-bits big endian mode */
267 static int sec_engine_init(struct hisi_qm
*qm
)
272 /* disable clock gate control */
273 reg
= readl_relaxed(SEC_ADDR(qm
, SEC_CONTROL_REG
));
274 reg
&= SEC_CLK_GATE_DISABLE
;
275 writel_relaxed(reg
, SEC_ADDR(qm
, SEC_CONTROL_REG
));
277 writel_relaxed(0x1, SEC_ADDR(qm
, SEC_MEM_START_INIT_REG
));
279 ret
= readl_relaxed_poll_timeout(SEC_ADDR(qm
, SEC_MEM_INIT_DONE_REG
),
280 reg
, reg
& 0x1, SEC_DELAY_10_US
,
281 SEC_POLL_TIMEOUT_US
);
283 dev_err(&qm
->pdev
->dev
, "fail to init sec mem\n");
287 reg
= readl_relaxed(SEC_ADDR(qm
, SEC_CONTROL_REG
));
288 reg
|= (0x1 << SEC_TRNG_EN_SHIFT
);
289 writel_relaxed(reg
, SEC_ADDR(qm
, SEC_CONTROL_REG
));
291 reg
= readl_relaxed(SEC_ADDR(qm
, SEC_INTERFACE_USER_CTRL0_REG
));
292 reg
|= SEC_USER0_SMMU_NORMAL
;
293 writel_relaxed(reg
, SEC_ADDR(qm
, SEC_INTERFACE_USER_CTRL0_REG
));
295 reg
= readl_relaxed(SEC_ADDR(qm
, SEC_INTERFACE_USER_CTRL1_REG
));
296 reg
|= SEC_USER1_SMMU_NORMAL
;
297 writel_relaxed(reg
, SEC_ADDR(qm
, SEC_INTERFACE_USER_CTRL1_REG
));
299 writel_relaxed(SEC_BD_ERR_CHK_EN1
,
300 SEC_ADDR(qm
, SEC_BD_ERR_CHK_EN_REG1
));
301 writel_relaxed(SEC_BD_ERR_CHK_EN2
,
302 SEC_ADDR(qm
, SEC_BD_ERR_CHK_EN_REG2
));
304 /* enable clock gate control */
305 reg
= readl_relaxed(SEC_ADDR(qm
, SEC_CONTROL_REG
));
306 reg
|= SEC_CLK_GATE_ENABLE
;
307 writel_relaxed(reg
, SEC_ADDR(qm
, SEC_CONTROL_REG
));
310 reg
= readl_relaxed(SEC_ADDR(qm
, SEC_CONTROL_REG
));
311 reg
|= sec_get_endian(qm
);
312 writel_relaxed(reg
, SEC_ADDR(qm
, SEC_CONTROL_REG
));
314 /* Enable sm4 xts mode multiple iv */
315 writel_relaxed(SEC_XTS_MIV_ENABLE_MSK
,
316 qm
->io_base
+ SEC_XTS_MIV_ENABLE_REG
);
321 static int sec_set_user_domain_and_cache(struct hisi_qm
*qm
)
324 writel(AXUSER_BASE
, qm
->io_base
+ QM_ARUSER_M_CFG_1
);
325 writel(ARUSER_M_CFG_ENABLE
, qm
->io_base
+ QM_ARUSER_M_CFG_ENABLE
);
326 writel(AXUSER_BASE
, qm
->io_base
+ QM_AWUSER_M_CFG_1
);
327 writel(AWUSER_M_CFG_ENABLE
, qm
->io_base
+ QM_AWUSER_M_CFG_ENABLE
);
328 writel(WUSER_M_CFG_ENABLE
, qm
->io_base
+ QM_WUSER_M_CFG_ENABLE
);
331 writel(AXI_M_CFG
, qm
->io_base
+ QM_AXI_M_CFG
);
332 writel(AXI_M_CFG_ENABLE
, qm
->io_base
+ QM_AXI_M_CFG_ENABLE
);
334 /* disable FLR triggered by BME(bus master enable) */
335 writel(PEH_AXUSER_CFG
, qm
->io_base
+ QM_PEH_AXUSER_CFG
);
336 writel(PEH_AXUSER_CFG_ENABLE
, qm
->io_base
+ QM_PEH_AXUSER_CFG_ENABLE
);
338 /* enable sqc,cqc writeback */
339 writel(SQC_CACHE_ENABLE
| CQC_CACHE_ENABLE
| SQC_CACHE_WB_ENABLE
|
340 CQC_CACHE_WB_ENABLE
| FIELD_PREP(SQC_CACHE_WB_THRD
, 1) |
341 FIELD_PREP(CQC_CACHE_WB_THRD
, 1), qm
->io_base
+ QM_CACHE_CTL
);
343 return sec_engine_init(qm
);
346 /* sec_debug_regs_clear() - clear the sec debug regs */
347 static void sec_debug_regs_clear(struct hisi_qm
*qm
)
349 /* clear current_qm */
350 writel(0x0, qm
->io_base
+ QM_DFX_MB_CNT_VF
);
351 writel(0x0, qm
->io_base
+ QM_DFX_DB_CNT_VF
);
354 writel(0x0, qm
->io_base
+ SEC_CTRL_CNT_CLR_CE
);
356 hisi_qm_debug_regs_clear(qm
);
359 static void sec_hw_error_enable(struct hisi_qm
*qm
)
363 if (qm
->ver
== QM_HW_V1
) {
364 writel(SEC_CORE_INT_DISABLE
, qm
->io_base
+ SEC_CORE_INT_MASK
);
365 dev_info(&qm
->pdev
->dev
, "V1 not support hw error handle\n");
369 val
= readl(qm
->io_base
+ SEC_CONTROL_REG
);
371 /* clear SEC hw error source if having */
372 writel(SEC_CORE_INT_DISABLE
, qm
->io_base
+ SEC_CORE_INT_SOURCE
);
374 /* enable SEC hw error interrupts */
375 writel(SEC_CORE_INT_ENABLE
, qm
->io_base
+ SEC_CORE_INT_MASK
);
378 writel(SEC_RAS_CE_ENB_MSK
, qm
->io_base
+ SEC_RAS_CE_REG
);
379 writel(SEC_RAS_FE_ENB_MSK
, qm
->io_base
+ SEC_RAS_FE_REG
);
380 writel(SEC_RAS_NFE_ENB_MSK
, qm
->io_base
+ SEC_RAS_NFE_REG
);
382 /* enable SEC block master OOO when m-bit error occur */
383 val
= val
| SEC_AXI_SHUTDOWN_ENABLE
;
385 writel(val
, qm
->io_base
+ SEC_CONTROL_REG
);
388 static void sec_hw_error_disable(struct hisi_qm
*qm
)
392 val
= readl(qm
->io_base
+ SEC_CONTROL_REG
);
394 /* disable RAS int */
395 writel(SEC_RAS_DISABLE
, qm
->io_base
+ SEC_RAS_CE_REG
);
396 writel(SEC_RAS_DISABLE
, qm
->io_base
+ SEC_RAS_FE_REG
);
397 writel(SEC_RAS_DISABLE
, qm
->io_base
+ SEC_RAS_NFE_REG
);
399 /* disable SEC hw error interrupts */
400 writel(SEC_CORE_INT_DISABLE
, qm
->io_base
+ SEC_CORE_INT_MASK
);
402 /* disable SEC block master OOO when m-bit error occur */
403 val
= val
& SEC_AXI_SHUTDOWN_DISABLE
;
405 writel(val
, qm
->io_base
+ SEC_CONTROL_REG
);
408 static u32
sec_current_qm_read(struct sec_debug_file
*file
)
410 struct hisi_qm
*qm
= file
->qm
;
412 return readl(qm
->io_base
+ QM_DFX_MB_CNT_VF
);
415 static int sec_current_qm_write(struct sec_debug_file
*file
, u32 val
)
417 struct hisi_qm
*qm
= file
->qm
;
421 if (val
> qm
->vfs_num
)
424 /* According PF or VF Dev ID to calculation curr_qm_qp_num and store */
426 qm
->debug
.curr_qm_qp_num
= qm
->qp_num
;
428 vfq_num
= (qm
->ctrl_qp_num
- qm
->qp_num
) / qm
->vfs_num
;
430 if (val
== qm
->vfs_num
)
431 qm
->debug
.curr_qm_qp_num
=
432 qm
->ctrl_qp_num
- qm
->qp_num
-
433 (qm
->vfs_num
- 1) * vfq_num
;
435 qm
->debug
.curr_qm_qp_num
= vfq_num
;
438 writel(val
, qm
->io_base
+ QM_DFX_MB_CNT_VF
);
439 writel(val
, qm
->io_base
+ QM_DFX_DB_CNT_VF
);
442 (readl(qm
->io_base
+ QM_DFX_SQE_CNT_VF_SQN
) & CURRENT_Q_MASK
);
443 writel(tmp
, qm
->io_base
+ QM_DFX_SQE_CNT_VF_SQN
);
446 (readl(qm
->io_base
+ QM_DFX_CQE_CNT_VF_CQN
) & CURRENT_Q_MASK
);
447 writel(tmp
, qm
->io_base
+ QM_DFX_CQE_CNT_VF_CQN
);
452 static u32
sec_clear_enable_read(struct sec_debug_file
*file
)
454 struct hisi_qm
*qm
= file
->qm
;
456 return readl(qm
->io_base
+ SEC_CTRL_CNT_CLR_CE
) &
457 SEC_CTRL_CNT_CLR_CE_BIT
;
460 static int sec_clear_enable_write(struct sec_debug_file
*file
, u32 val
)
462 struct hisi_qm
*qm
= file
->qm
;
468 tmp
= (readl(qm
->io_base
+ SEC_CTRL_CNT_CLR_CE
) &
469 ~SEC_CTRL_CNT_CLR_CE_BIT
) | val
;
470 writel(tmp
, qm
->io_base
+ SEC_CTRL_CNT_CLR_CE
);
475 static ssize_t
sec_debug_read(struct file
*filp
, char __user
*buf
,
476 size_t count
, loff_t
*pos
)
478 struct sec_debug_file
*file
= filp
->private_data
;
479 char tbuf
[SEC_DBGFS_VAL_MAX_LEN
];
483 spin_lock_irq(&file
->lock
);
485 switch (file
->index
) {
487 val
= sec_current_qm_read(file
);
489 case SEC_CLEAR_ENABLE
:
490 val
= sec_clear_enable_read(file
);
493 spin_unlock_irq(&file
->lock
);
497 spin_unlock_irq(&file
->lock
);
498 ret
= snprintf(tbuf
, SEC_DBGFS_VAL_MAX_LEN
, "%u\n", val
);
500 return simple_read_from_buffer(buf
, count
, pos
, tbuf
, ret
);
503 static ssize_t
sec_debug_write(struct file
*filp
, const char __user
*buf
,
504 size_t count
, loff_t
*pos
)
506 struct sec_debug_file
*file
= filp
->private_data
;
507 char tbuf
[SEC_DBGFS_VAL_MAX_LEN
];
514 if (count
>= SEC_DBGFS_VAL_MAX_LEN
)
517 len
= simple_write_to_buffer(tbuf
, SEC_DBGFS_VAL_MAX_LEN
- 1,
523 if (kstrtoul(tbuf
, 0, &val
))
526 spin_lock_irq(&file
->lock
);
528 switch (file
->index
) {
530 ret
= sec_current_qm_write(file
, val
);
534 case SEC_CLEAR_ENABLE
:
535 ret
= sec_clear_enable_write(file
, val
);
544 spin_unlock_irq(&file
->lock
);
549 spin_unlock_irq(&file
->lock
);
553 static const struct file_operations sec_dbg_fops
= {
554 .owner
= THIS_MODULE
,
556 .read
= sec_debug_read
,
557 .write
= sec_debug_write
,
560 static int sec_debugfs_atomic64_get(void *data
, u64
*val
)
562 *val
= atomic64_read((atomic64_t
*)data
);
567 static int sec_debugfs_atomic64_set(void *data
, u64 val
)
572 atomic64_set((atomic64_t
*)data
, 0);
577 DEFINE_DEBUGFS_ATTRIBUTE(sec_atomic64_ops
, sec_debugfs_atomic64_get
,
578 sec_debugfs_atomic64_set
, "%lld\n");
580 static int sec_core_debug_init(struct sec_dev
*sec
)
582 struct hisi_qm
*qm
= &sec
->qm
;
583 struct device
*dev
= &qm
->pdev
->dev
;
584 struct sec_dfx
*dfx
= &sec
->debug
.dfx
;
585 struct debugfs_regset32
*regset
;
586 struct dentry
*tmp_d
;
589 tmp_d
= debugfs_create_dir("sec_dfx", sec
->qm
.debug
.debug_root
);
591 regset
= devm_kzalloc(dev
, sizeof(*regset
), GFP_KERNEL
);
595 regset
->regs
= sec_dfx_regs
;
596 regset
->nregs
= ARRAY_SIZE(sec_dfx_regs
);
597 regset
->base
= qm
->io_base
;
599 if (qm
->pdev
->device
== SEC_PF_PCI_DEVICE_ID
)
600 debugfs_create_regset32("regs", 0444, tmp_d
, regset
);
602 for (i
= 0; i
< ARRAY_SIZE(sec_dfx_labels
); i
++) {
603 atomic64_t
*data
= (atomic64_t
*)((uintptr_t)dfx
+
604 sec_dfx_labels
[i
].offset
);
605 debugfs_create_file(sec_dfx_labels
[i
].name
, 0644,
606 tmp_d
, data
, &sec_atomic64_ops
);
612 static int sec_debug_init(struct sec_dev
*sec
)
616 for (i
= SEC_CURRENT_QM
; i
< SEC_DEBUG_FILE_NUM
; i
++) {
617 spin_lock_init(&sec
->debug
.files
[i
].lock
);
618 sec
->debug
.files
[i
].index
= i
;
619 sec
->debug
.files
[i
].qm
= &sec
->qm
;
621 debugfs_create_file(sec_dbg_file_name
[i
], 0600,
622 sec
->qm
.debug
.debug_root
,
623 sec
->debug
.files
+ i
,
627 return sec_core_debug_init(sec
);
630 static int sec_debugfs_init(struct sec_dev
*sec
)
632 struct hisi_qm
*qm
= &sec
->qm
;
633 struct device
*dev
= &qm
->pdev
->dev
;
636 qm
->debug
.debug_root
= debugfs_create_dir(dev_name(dev
),
639 qm
->debug
.sqe_mask_offset
= SEC_SQE_MASK_OFFSET
;
640 qm
->debug
.sqe_mask_len
= SEC_SQE_MASK_LEN
;
641 ret
= hisi_qm_debug_init(qm
);
643 goto failed_to_create
;
645 if (qm
->pdev
->device
== SEC_PF_PCI_DEVICE_ID
) {
646 ret
= sec_debug_init(sec
);
648 goto failed_to_create
;
654 debugfs_remove_recursive(sec_debugfs_root
);
659 static void sec_debugfs_exit(struct sec_dev
*sec
)
661 debugfs_remove_recursive(sec
->qm
.debug
.debug_root
);
664 static void sec_log_hw_error(struct hisi_qm
*qm
, u32 err_sts
)
666 const struct sec_hw_error
*errs
= sec_hw_errors
;
667 struct device
*dev
= &qm
->pdev
->dev
;
671 if (errs
->int_msk
& err_sts
) {
672 dev_err(dev
, "%s [error status=0x%x] found\n",
673 errs
->msg
, errs
->int_msk
);
675 if (SEC_CORE_INT_STATUS_M_ECC
& errs
->int_msk
) {
676 err_val
= readl(qm
->io_base
+
677 SEC_CORE_SRAM_ECC_ERR_INFO
);
678 dev_err(dev
, "multi ecc sram num=0x%x\n",
679 SEC_ECC_NUM(err_val
));
680 dev_err(dev
, "multi ecc sram addr=0x%x\n",
681 SEC_ECC_ADDR(err_val
));
688 static u32
sec_get_hw_err_status(struct hisi_qm
*qm
)
690 return readl(qm
->io_base
+ SEC_CORE_INT_STATUS
);
693 static void sec_clear_hw_err_status(struct hisi_qm
*qm
, u32 err_sts
)
695 writel(err_sts
, qm
->io_base
+ SEC_CORE_INT_SOURCE
);
698 static void sec_open_axi_master_ooo(struct hisi_qm
*qm
)
702 val
= readl(SEC_ADDR(qm
, SEC_CONTROL_REG
));
703 writel(val
& SEC_AXI_SHUTDOWN_DISABLE
, SEC_ADDR(qm
, SEC_CONTROL_REG
));
704 writel(val
| SEC_AXI_SHUTDOWN_ENABLE
, SEC_ADDR(qm
, SEC_CONTROL_REG
));
707 static const struct hisi_qm_err_ini sec_err_ini
= {
708 .hw_init
= sec_set_user_domain_and_cache
,
709 .hw_err_enable
= sec_hw_error_enable
,
710 .hw_err_disable
= sec_hw_error_disable
,
711 .get_dev_hw_err_status
= sec_get_hw_err_status
,
712 .clear_dev_hw_err_status
= sec_clear_hw_err_status
,
713 .log_dev_hw_err
= sec_log_hw_error
,
714 .open_axi_master_ooo
= sec_open_axi_master_ooo
,
717 .nfe
= QM_BASE_NFE
| QM_ACC_DO_TASK_TIMEOUT
|
718 QM_ACC_WB_NOT_READY_TIMEOUT
,
720 .ecc_2bits_mask
= SEC_CORE_INT_STATUS_M_ECC
,
721 .msi_wr_port
= BIT(0),
726 static int sec_pf_probe_init(struct sec_dev
*sec
)
728 struct hisi_qm
*qm
= &sec
->qm
;
731 if (qm
->ver
== QM_HW_V1
)
732 qm
->ctrl_qp_num
= SEC_QUEUE_NUM_V1
;
734 qm
->ctrl_qp_num
= SEC_QUEUE_NUM_V2
;
736 qm
->err_ini
= &sec_err_ini
;
738 ret
= sec_set_user_domain_and_cache(qm
);
742 hisi_qm_dev_err_init(qm
);
743 sec_debug_regs_clear(qm
);
748 static int sec_qm_init(struct hisi_qm
*qm
, struct pci_dev
*pdev
)
753 qm
->ver
= pdev
->revision
;
754 qm
->sqe_size
= SEC_SQE_SIZE
;
755 qm
->dev_name
= sec_name
;
757 qm
->fun_type
= (pdev
->device
== SEC_PF_PCI_DEVICE_ID
) ?
759 if (qm
->fun_type
== QM_HW_PF
) {
760 qm
->qp_base
= SEC_PF_DEF_Q_BASE
;
761 qm
->qp_num
= pf_q_num
;
762 qm
->debug
.curr_qm_qp_num
= pf_q_num
;
763 qm
->qm_list
= &sec_devices
;
764 } else if (qm
->fun_type
== QM_HW_VF
&& qm
->ver
== QM_HW_V1
) {
766 * have no way to get qm configure in VM in v1 hardware,
767 * so currently force PF to uses SEC_PF_DEF_Q_NUM, and force
768 * to trigger only one VF in v1 hardware.
769 * v2 hardware has no such problem.
771 qm
->qp_base
= SEC_PF_DEF_Q_NUM
;
772 qm
->qp_num
= SEC_QUEUE_NUM_V1
- SEC_PF_DEF_Q_NUM
;
776 * WQ_HIGHPRI: SEC request must be low delayed,
777 * so need a high priority workqueue.
778 * WQ_UNBOUND: SEC task is likely with long
779 * running CPU intensive workloads.
781 qm
->wq
= alloc_workqueue("%s", WQ_HIGHPRI
| WQ_MEM_RECLAIM
|
782 WQ_UNBOUND
, num_online_cpus(),
785 pci_err(qm
->pdev
, "fail to alloc workqueue\n");
789 ret
= hisi_qm_init(qm
);
791 destroy_workqueue(qm
->wq
);
796 static void sec_qm_uninit(struct hisi_qm
*qm
)
801 static int sec_probe_init(struct sec_dev
*sec
)
803 struct hisi_qm
*qm
= &sec
->qm
;
806 if (qm
->fun_type
== QM_HW_PF
) {
807 ret
= sec_pf_probe_init(sec
);
815 static void sec_probe_uninit(struct hisi_qm
*qm
)
817 hisi_qm_dev_err_uninit(qm
);
819 destroy_workqueue(qm
->wq
);
822 static void sec_iommu_used_check(struct sec_dev
*sec
)
824 struct iommu_domain
*domain
;
825 struct device
*dev
= &sec
->qm
.pdev
->dev
;
827 domain
= iommu_get_domain_for_dev(dev
);
829 /* Check if iommu is used */
830 sec
->iommu_used
= false;
832 if (domain
->type
& __IOMMU_DOMAIN_PAGING
)
833 sec
->iommu_used
= true;
834 dev_info(dev
, "SMMU Opened, the iommu type = %u\n",
839 static int sec_probe(struct pci_dev
*pdev
, const struct pci_device_id
*id
)
845 sec
= devm_kzalloc(&pdev
->dev
, sizeof(*sec
), GFP_KERNEL
);
850 ret
= sec_qm_init(qm
, pdev
);
852 pci_err(pdev
, "Failed to init SEC QM (%d)!\n", ret
);
856 sec
->ctx_q_num
= ctx_q_num
;
857 sec_iommu_used_check(sec
);
859 ret
= sec_probe_init(sec
);
861 pci_err(pdev
, "Failed to probe!\n");
865 ret
= hisi_qm_start(qm
);
867 pci_err(pdev
, "Failed to start sec qm!\n");
868 goto err_probe_uninit
;
871 ret
= sec_debugfs_init(sec
);
873 pci_warn(pdev
, "Failed to init debugfs!\n");
875 hisi_qm_add_to_list(qm
, &sec_devices
);
877 ret
= sec_register_to_crypto();
879 pr_err("Failed to register driver to crypto.\n");
880 goto err_remove_from_list
;
883 if (qm
->fun_type
== QM_HW_PF
&& vfs_num
) {
884 ret
= hisi_qm_sriov_enable(pdev
, vfs_num
);
886 goto err_crypto_unregister
;
891 err_crypto_unregister
:
892 sec_unregister_from_crypto();
894 err_remove_from_list
:
895 hisi_qm_del_from_list(qm
, &sec_devices
);
896 sec_debugfs_exit(sec
);
900 sec_probe_uninit(qm
);
908 static void sec_remove(struct pci_dev
*pdev
)
910 struct sec_dev
*sec
= pci_get_drvdata(pdev
);
911 struct hisi_qm
*qm
= &sec
->qm
;
913 sec_unregister_from_crypto();
915 hisi_qm_del_from_list(qm
, &sec_devices
);
917 if (qm
->fun_type
== QM_HW_PF
&& qm
->vfs_num
)
918 hisi_qm_sriov_disable(pdev
);
920 sec_debugfs_exit(sec
);
922 (void)hisi_qm_stop(qm
);
924 if (qm
->fun_type
== QM_HW_PF
)
925 sec_debug_regs_clear(qm
);
927 sec_probe_uninit(qm
);
932 static const struct pci_error_handlers sec_err_handler
= {
933 .error_detected
= hisi_qm_dev_err_detected
,
934 .slot_reset
= hisi_qm_dev_slot_reset
,
935 .reset_prepare
= hisi_qm_reset_prepare
,
936 .reset_done
= hisi_qm_reset_done
,
939 static struct pci_driver sec_pci_driver
= {
941 .id_table
= sec_dev_ids
,
943 .remove
= sec_remove
,
944 .err_handler
= &sec_err_handler
,
945 .sriov_configure
= hisi_qm_sriov_configure
,
948 static void sec_register_debugfs(void)
950 if (!debugfs_initialized())
953 sec_debugfs_root
= debugfs_create_dir("hisi_sec2", NULL
);
956 static void sec_unregister_debugfs(void)
958 debugfs_remove_recursive(sec_debugfs_root
);
961 static int __init
sec_init(void)
965 hisi_qm_init_list(&sec_devices
);
966 sec_register_debugfs();
968 ret
= pci_register_driver(&sec_pci_driver
);
970 sec_unregister_debugfs();
971 pr_err("Failed to register pci driver.\n");
978 static void __exit
sec_exit(void)
980 pci_unregister_driver(&sec_pci_driver
);
981 sec_unregister_debugfs();
984 module_init(sec_init
);
985 module_exit(sec_exit
);
987 MODULE_LICENSE("GPL v2");
988 MODULE_AUTHOR("Zaibo Xu <xuzaibo@huawei.com>");
989 MODULE_AUTHOR("Longfang Liu <liulongfang@huawei.com>");
990 MODULE_AUTHOR("Wei Zhang <zhangwei375@huawei.com>");
991 MODULE_DESCRIPTION("Driver for HiSilicon SEC accelerator");