]> git.ipfire.org Git - thirdparty/linux.git/blob - drivers/crypto/hisilicon/sec2/sec_main.c
Merge tag 'x86-fpu-2020-06-01' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
[thirdparty/linux.git] / drivers / crypto / hisilicon / sec2 / sec_main.c
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2019 HiSilicon Limited. */
3
4 #include <linux/acpi.h>
5 #include <linux/aer.h>
6 #include <linux/bitops.h>
7 #include <linux/debugfs.h>
8 #include <linux/init.h>
9 #include <linux/io.h>
10 #include <linux/iommu.h>
11 #include <linux/kernel.h>
12 #include <linux/module.h>
13 #include <linux/pci.h>
14 #include <linux/seq_file.h>
15 #include <linux/topology.h>
16
17 #include "sec.h"
18
19 #define SEC_VF_NUM 63
20 #define SEC_QUEUE_NUM_V1 4096
21 #define SEC_QUEUE_NUM_V2 1024
22 #define SEC_PF_PCI_DEVICE_ID 0xa255
23 #define SEC_VF_PCI_DEVICE_ID 0xa256
24
25 #define SEC_XTS_MIV_ENABLE_REG 0x301384
26 #define SEC_XTS_MIV_ENABLE_MSK 0x7FFFFFFF
27 #define SEC_XTS_MIV_DISABLE_MSK 0xFFFFFFFF
28 #define SEC_BD_ERR_CHK_EN1 0xfffff7fd
29 #define SEC_BD_ERR_CHK_EN2 0xffffbfff
30
31 #define SEC_SQE_SIZE 128
32 #define SEC_SQ_SIZE (SEC_SQE_SIZE * QM_Q_DEPTH)
33 #define SEC_PF_DEF_Q_NUM 64
34 #define SEC_PF_DEF_Q_BASE 0
35 #define SEC_CTX_Q_NUM_DEF 24
36 #define SEC_CTX_Q_NUM_MAX 32
37
38 #define SEC_CTRL_CNT_CLR_CE 0x301120
39 #define SEC_CTRL_CNT_CLR_CE_BIT BIT(0)
40 #define SEC_ENGINE_PF_CFG_OFF 0x300000
41 #define SEC_ACC_COMMON_REG_OFF 0x1000
42 #define SEC_CORE_INT_SOURCE 0x301010
43 #define SEC_CORE_INT_MASK 0x301000
44 #define SEC_CORE_INT_STATUS 0x301008
45 #define SEC_CORE_SRAM_ECC_ERR_INFO 0x301C14
46 #define SEC_ECC_NUM(err) (((err) >> 16) & 0xFF)
47 #define SEC_ECC_ADDR(err) ((err) >> 0)
48 #define SEC_CORE_INT_DISABLE 0x0
49 #define SEC_CORE_INT_ENABLE 0x1ff
50
51 #define SEC_RAS_CE_REG 0x50
52 #define SEC_RAS_FE_REG 0x54
53 #define SEC_RAS_NFE_REG 0x58
54 #define SEC_RAS_CE_ENB_MSK 0x88
55 #define SEC_RAS_FE_ENB_MSK 0x0
56 #define SEC_RAS_NFE_ENB_MSK 0x177
57 #define SEC_RAS_DISABLE 0x0
58 #define SEC_MEM_START_INIT_REG 0x0100
59 #define SEC_MEM_INIT_DONE_REG 0x0104
60 #define SEC_QM_ABNORMAL_INT_MASK 0x100004
61
62 #define SEC_CONTROL_REG 0x0200
63 #define SEC_TRNG_EN_SHIFT 8
64 #define SEC_CLK_GATE_ENABLE BIT(3)
65 #define SEC_CLK_GATE_DISABLE (~BIT(3))
66 #define SEC_AXI_SHUTDOWN_ENABLE BIT(12)
67 #define SEC_AXI_SHUTDOWN_DISABLE 0xFFFFEFFF
68
69 #define SEC_INTERFACE_USER_CTRL0_REG 0x0220
70 #define SEC_INTERFACE_USER_CTRL1_REG 0x0224
71 #define SEC_BD_ERR_CHK_EN_REG1 0x0384
72 #define SEC_BD_ERR_CHK_EN_REG2 0x038c
73
74 #define SEC_USER0_SMMU_NORMAL (BIT(23) | BIT(15))
75 #define SEC_USER1_SMMU_NORMAL (BIT(31) | BIT(23) | BIT(15) | BIT(7))
76 #define SEC_CORE_INT_STATUS_M_ECC BIT(2)
77
78 #define SEC_DELAY_10_US 10
79 #define SEC_POLL_TIMEOUT_US 1000
80 #define SEC_VF_CNT_MASK 0xffffffc0
81 #define SEC_DBGFS_VAL_MAX_LEN 20
82
83 #define SEC_SQE_MASK_OFFSET 64
84 #define SEC_SQE_MASK_LEN 48
85
86 #define SEC_ADDR(qm, offset) ((qm)->io_base + (offset) + \
87 SEC_ENGINE_PF_CFG_OFF + SEC_ACC_COMMON_REG_OFF)
88
89 struct sec_hw_error {
90 u32 int_msk;
91 const char *msg;
92 };
93
94 struct sec_dfx_item {
95 const char *name;
96 u32 offset;
97 };
98
99 static const char sec_name[] = "hisi_sec2";
100 static struct dentry *sec_debugfs_root;
101 static struct hisi_qm_list sec_devices;
102
103 static const struct sec_hw_error sec_hw_errors[] = {
104 {.int_msk = BIT(0), .msg = "sec_axi_rresp_err_rint"},
105 {.int_msk = BIT(1), .msg = "sec_axi_bresp_err_rint"},
106 {.int_msk = BIT(2), .msg = "sec_ecc_2bit_err_rint"},
107 {.int_msk = BIT(3), .msg = "sec_ecc_1bit_err_rint"},
108 {.int_msk = BIT(4), .msg = "sec_req_trng_timeout_rint"},
109 {.int_msk = BIT(5), .msg = "sec_fsm_hbeat_rint"},
110 {.int_msk = BIT(6), .msg = "sec_channel_req_rng_timeout_rint"},
111 {.int_msk = BIT(7), .msg = "sec_bd_err_rint"},
112 {.int_msk = BIT(8), .msg = "sec_chain_buff_err_rint"},
113 { /* sentinel */ }
114 };
115
116 static const char * const sec_dbg_file_name[] = {
117 [SEC_CURRENT_QM] = "current_qm",
118 [SEC_CLEAR_ENABLE] = "clear_enable",
119 };
120
121 static struct sec_dfx_item sec_dfx_labels[] = {
122 {"send_cnt", offsetof(struct sec_dfx, send_cnt)},
123 {"recv_cnt", offsetof(struct sec_dfx, recv_cnt)},
124 {"send_busy_cnt", offsetof(struct sec_dfx, send_busy_cnt)},
125 {"err_bd_cnt", offsetof(struct sec_dfx, err_bd_cnt)},
126 {"invalid_req_cnt", offsetof(struct sec_dfx, invalid_req_cnt)},
127 {"done_flag_cnt", offsetof(struct sec_dfx, done_flag_cnt)},
128 };
129
130 static const struct debugfs_reg32 sec_dfx_regs[] = {
131 {"SEC_PF_ABNORMAL_INT_SOURCE ", 0x301010},
132 {"SEC_SAA_EN ", 0x301270},
133 {"SEC_BD_LATENCY_MIN ", 0x301600},
134 {"SEC_BD_LATENCY_MAX ", 0x301608},
135 {"SEC_BD_LATENCY_AVG ", 0x30160C},
136 {"SEC_BD_NUM_IN_SAA0 ", 0x301670},
137 {"SEC_BD_NUM_IN_SAA1 ", 0x301674},
138 {"SEC_BD_NUM_IN_SEC ", 0x301680},
139 {"SEC_ECC_1BIT_CNT ", 0x301C00},
140 {"SEC_ECC_1BIT_INFO ", 0x301C04},
141 {"SEC_ECC_2BIT_CNT ", 0x301C10},
142 {"SEC_ECC_2BIT_INFO ", 0x301C14},
143 {"SEC_BD_SAA0 ", 0x301C20},
144 {"SEC_BD_SAA1 ", 0x301C24},
145 {"SEC_BD_SAA2 ", 0x301C28},
146 {"SEC_BD_SAA3 ", 0x301C2C},
147 {"SEC_BD_SAA4 ", 0x301C30},
148 {"SEC_BD_SAA5 ", 0x301C34},
149 {"SEC_BD_SAA6 ", 0x301C38},
150 {"SEC_BD_SAA7 ", 0x301C3C},
151 {"SEC_BD_SAA8 ", 0x301C40},
152 };
153
154 static int sec_pf_q_num_set(const char *val, const struct kernel_param *kp)
155 {
156 return q_num_set(val, kp, SEC_PF_PCI_DEVICE_ID);
157 }
158
159 static const struct kernel_param_ops sec_pf_q_num_ops = {
160 .set = sec_pf_q_num_set,
161 .get = param_get_int,
162 };
163
164 static u32 pf_q_num = SEC_PF_DEF_Q_NUM;
165 module_param_cb(pf_q_num, &sec_pf_q_num_ops, &pf_q_num, 0444);
166 MODULE_PARM_DESC(pf_q_num, "Number of queues in PF(v1 0-4096, v2 0-1024)");
167
168 static int sec_ctx_q_num_set(const char *val, const struct kernel_param *kp)
169 {
170 u32 ctx_q_num;
171 int ret;
172
173 if (!val)
174 return -EINVAL;
175
176 ret = kstrtou32(val, 10, &ctx_q_num);
177 if (ret)
178 return -EINVAL;
179
180 if (!ctx_q_num || ctx_q_num > SEC_CTX_Q_NUM_MAX || ctx_q_num & 0x1) {
181 pr_err("ctx queue num[%u] is invalid!\n", ctx_q_num);
182 return -EINVAL;
183 }
184
185 return param_set_int(val, kp);
186 }
187
188 static const struct kernel_param_ops sec_ctx_q_num_ops = {
189 .set = sec_ctx_q_num_set,
190 .get = param_get_int,
191 };
192 static u32 ctx_q_num = SEC_CTX_Q_NUM_DEF;
193 module_param_cb(ctx_q_num, &sec_ctx_q_num_ops, &ctx_q_num, 0444);
194 MODULE_PARM_DESC(ctx_q_num, "Queue num in ctx (24 default, 2, 4, ..., 32)");
195
196 static const struct kernel_param_ops vfs_num_ops = {
197 .set = vfs_num_set,
198 .get = param_get_int,
199 };
200
201 static u32 vfs_num;
202 module_param_cb(vfs_num, &vfs_num_ops, &vfs_num, 0444);
203 MODULE_PARM_DESC(vfs_num, "Number of VFs to enable(1-63), 0(default)");
204
205 void sec_destroy_qps(struct hisi_qp **qps, int qp_num)
206 {
207 hisi_qm_free_qps(qps, qp_num);
208 kfree(qps);
209 }
210
211 struct hisi_qp **sec_create_qps(void)
212 {
213 int node = cpu_to_node(smp_processor_id());
214 u32 ctx_num = ctx_q_num;
215 struct hisi_qp **qps;
216 int ret;
217
218 qps = kcalloc(ctx_num, sizeof(struct hisi_qp *), GFP_KERNEL);
219 if (!qps)
220 return NULL;
221
222 ret = hisi_qm_alloc_qps_node(&sec_devices, ctx_num, 0, node, qps);
223 if (!ret)
224 return qps;
225
226 kfree(qps);
227 return NULL;
228 }
229
230
231 static const struct pci_device_id sec_dev_ids[] = {
232 { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, SEC_PF_PCI_DEVICE_ID) },
233 { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, SEC_VF_PCI_DEVICE_ID) },
234 { 0, }
235 };
236 MODULE_DEVICE_TABLE(pci, sec_dev_ids);
237
238 static u8 sec_get_endian(struct hisi_qm *qm)
239 {
240 u32 reg;
241
242 /*
243 * As for VF, it is a wrong way to get endian setting by
244 * reading a register of the engine
245 */
246 if (qm->pdev->is_virtfn) {
247 dev_err_ratelimited(&qm->pdev->dev,
248 "cannot access a register in VF!\n");
249 return SEC_LE;
250 }
251 reg = readl_relaxed(qm->io_base + SEC_ENGINE_PF_CFG_OFF +
252 SEC_ACC_COMMON_REG_OFF + SEC_CONTROL_REG);
253
254 /* BD little endian mode */
255 if (!(reg & BIT(0)))
256 return SEC_LE;
257
258 /* BD 32-bits big endian mode */
259 else if (!(reg & BIT(1)))
260 return SEC_32BE;
261
262 /* BD 64-bits big endian mode */
263 else
264 return SEC_64BE;
265 }
266
267 static int sec_engine_init(struct hisi_qm *qm)
268 {
269 int ret;
270 u32 reg;
271
272 /* disable clock gate control */
273 reg = readl_relaxed(SEC_ADDR(qm, SEC_CONTROL_REG));
274 reg &= SEC_CLK_GATE_DISABLE;
275 writel_relaxed(reg, SEC_ADDR(qm, SEC_CONTROL_REG));
276
277 writel_relaxed(0x1, SEC_ADDR(qm, SEC_MEM_START_INIT_REG));
278
279 ret = readl_relaxed_poll_timeout(SEC_ADDR(qm, SEC_MEM_INIT_DONE_REG),
280 reg, reg & 0x1, SEC_DELAY_10_US,
281 SEC_POLL_TIMEOUT_US);
282 if (ret) {
283 dev_err(&qm->pdev->dev, "fail to init sec mem\n");
284 return ret;
285 }
286
287 reg = readl_relaxed(SEC_ADDR(qm, SEC_CONTROL_REG));
288 reg |= (0x1 << SEC_TRNG_EN_SHIFT);
289 writel_relaxed(reg, SEC_ADDR(qm, SEC_CONTROL_REG));
290
291 reg = readl_relaxed(SEC_ADDR(qm, SEC_INTERFACE_USER_CTRL0_REG));
292 reg |= SEC_USER0_SMMU_NORMAL;
293 writel_relaxed(reg, SEC_ADDR(qm, SEC_INTERFACE_USER_CTRL0_REG));
294
295 reg = readl_relaxed(SEC_ADDR(qm, SEC_INTERFACE_USER_CTRL1_REG));
296 reg |= SEC_USER1_SMMU_NORMAL;
297 writel_relaxed(reg, SEC_ADDR(qm, SEC_INTERFACE_USER_CTRL1_REG));
298
299 writel_relaxed(SEC_BD_ERR_CHK_EN1,
300 SEC_ADDR(qm, SEC_BD_ERR_CHK_EN_REG1));
301 writel_relaxed(SEC_BD_ERR_CHK_EN2,
302 SEC_ADDR(qm, SEC_BD_ERR_CHK_EN_REG2));
303
304 /* enable clock gate control */
305 reg = readl_relaxed(SEC_ADDR(qm, SEC_CONTROL_REG));
306 reg |= SEC_CLK_GATE_ENABLE;
307 writel_relaxed(reg, SEC_ADDR(qm, SEC_CONTROL_REG));
308
309 /* config endian */
310 reg = readl_relaxed(SEC_ADDR(qm, SEC_CONTROL_REG));
311 reg |= sec_get_endian(qm);
312 writel_relaxed(reg, SEC_ADDR(qm, SEC_CONTROL_REG));
313
314 /* Enable sm4 xts mode multiple iv */
315 writel_relaxed(SEC_XTS_MIV_ENABLE_MSK,
316 qm->io_base + SEC_XTS_MIV_ENABLE_REG);
317
318 return 0;
319 }
320
321 static int sec_set_user_domain_and_cache(struct hisi_qm *qm)
322 {
323 /* qm user domain */
324 writel(AXUSER_BASE, qm->io_base + QM_ARUSER_M_CFG_1);
325 writel(ARUSER_M_CFG_ENABLE, qm->io_base + QM_ARUSER_M_CFG_ENABLE);
326 writel(AXUSER_BASE, qm->io_base + QM_AWUSER_M_CFG_1);
327 writel(AWUSER_M_CFG_ENABLE, qm->io_base + QM_AWUSER_M_CFG_ENABLE);
328 writel(WUSER_M_CFG_ENABLE, qm->io_base + QM_WUSER_M_CFG_ENABLE);
329
330 /* qm cache */
331 writel(AXI_M_CFG, qm->io_base + QM_AXI_M_CFG);
332 writel(AXI_M_CFG_ENABLE, qm->io_base + QM_AXI_M_CFG_ENABLE);
333
334 /* disable FLR triggered by BME(bus master enable) */
335 writel(PEH_AXUSER_CFG, qm->io_base + QM_PEH_AXUSER_CFG);
336 writel(PEH_AXUSER_CFG_ENABLE, qm->io_base + QM_PEH_AXUSER_CFG_ENABLE);
337
338 /* enable sqc,cqc writeback */
339 writel(SQC_CACHE_ENABLE | CQC_CACHE_ENABLE | SQC_CACHE_WB_ENABLE |
340 CQC_CACHE_WB_ENABLE | FIELD_PREP(SQC_CACHE_WB_THRD, 1) |
341 FIELD_PREP(CQC_CACHE_WB_THRD, 1), qm->io_base + QM_CACHE_CTL);
342
343 return sec_engine_init(qm);
344 }
345
346 /* sec_debug_regs_clear() - clear the sec debug regs */
347 static void sec_debug_regs_clear(struct hisi_qm *qm)
348 {
349 /* clear current_qm */
350 writel(0x0, qm->io_base + QM_DFX_MB_CNT_VF);
351 writel(0x0, qm->io_base + QM_DFX_DB_CNT_VF);
352
353 /* clear rdclr_en */
354 writel(0x0, qm->io_base + SEC_CTRL_CNT_CLR_CE);
355
356 hisi_qm_debug_regs_clear(qm);
357 }
358
359 static void sec_hw_error_enable(struct hisi_qm *qm)
360 {
361 u32 val;
362
363 if (qm->ver == QM_HW_V1) {
364 writel(SEC_CORE_INT_DISABLE, qm->io_base + SEC_CORE_INT_MASK);
365 dev_info(&qm->pdev->dev, "V1 not support hw error handle\n");
366 return;
367 }
368
369 val = readl(qm->io_base + SEC_CONTROL_REG);
370
371 /* clear SEC hw error source if having */
372 writel(SEC_CORE_INT_DISABLE, qm->io_base + SEC_CORE_INT_SOURCE);
373
374 /* enable SEC hw error interrupts */
375 writel(SEC_CORE_INT_ENABLE, qm->io_base + SEC_CORE_INT_MASK);
376
377 /* enable RAS int */
378 writel(SEC_RAS_CE_ENB_MSK, qm->io_base + SEC_RAS_CE_REG);
379 writel(SEC_RAS_FE_ENB_MSK, qm->io_base + SEC_RAS_FE_REG);
380 writel(SEC_RAS_NFE_ENB_MSK, qm->io_base + SEC_RAS_NFE_REG);
381
382 /* enable SEC block master OOO when m-bit error occur */
383 val = val | SEC_AXI_SHUTDOWN_ENABLE;
384
385 writel(val, qm->io_base + SEC_CONTROL_REG);
386 }
387
388 static void sec_hw_error_disable(struct hisi_qm *qm)
389 {
390 u32 val;
391
392 val = readl(qm->io_base + SEC_CONTROL_REG);
393
394 /* disable RAS int */
395 writel(SEC_RAS_DISABLE, qm->io_base + SEC_RAS_CE_REG);
396 writel(SEC_RAS_DISABLE, qm->io_base + SEC_RAS_FE_REG);
397 writel(SEC_RAS_DISABLE, qm->io_base + SEC_RAS_NFE_REG);
398
399 /* disable SEC hw error interrupts */
400 writel(SEC_CORE_INT_DISABLE, qm->io_base + SEC_CORE_INT_MASK);
401
402 /* disable SEC block master OOO when m-bit error occur */
403 val = val & SEC_AXI_SHUTDOWN_DISABLE;
404
405 writel(val, qm->io_base + SEC_CONTROL_REG);
406 }
407
408 static u32 sec_current_qm_read(struct sec_debug_file *file)
409 {
410 struct hisi_qm *qm = file->qm;
411
412 return readl(qm->io_base + QM_DFX_MB_CNT_VF);
413 }
414
415 static int sec_current_qm_write(struct sec_debug_file *file, u32 val)
416 {
417 struct hisi_qm *qm = file->qm;
418 u32 vfq_num;
419 u32 tmp;
420
421 if (val > qm->vfs_num)
422 return -EINVAL;
423
424 /* According PF or VF Dev ID to calculation curr_qm_qp_num and store */
425 if (!val) {
426 qm->debug.curr_qm_qp_num = qm->qp_num;
427 } else {
428 vfq_num = (qm->ctrl_qp_num - qm->qp_num) / qm->vfs_num;
429
430 if (val == qm->vfs_num)
431 qm->debug.curr_qm_qp_num =
432 qm->ctrl_qp_num - qm->qp_num -
433 (qm->vfs_num - 1) * vfq_num;
434 else
435 qm->debug.curr_qm_qp_num = vfq_num;
436 }
437
438 writel(val, qm->io_base + QM_DFX_MB_CNT_VF);
439 writel(val, qm->io_base + QM_DFX_DB_CNT_VF);
440
441 tmp = val |
442 (readl(qm->io_base + QM_DFX_SQE_CNT_VF_SQN) & CURRENT_Q_MASK);
443 writel(tmp, qm->io_base + QM_DFX_SQE_CNT_VF_SQN);
444
445 tmp = val |
446 (readl(qm->io_base + QM_DFX_CQE_CNT_VF_CQN) & CURRENT_Q_MASK);
447 writel(tmp, qm->io_base + QM_DFX_CQE_CNT_VF_CQN);
448
449 return 0;
450 }
451
452 static u32 sec_clear_enable_read(struct sec_debug_file *file)
453 {
454 struct hisi_qm *qm = file->qm;
455
456 return readl(qm->io_base + SEC_CTRL_CNT_CLR_CE) &
457 SEC_CTRL_CNT_CLR_CE_BIT;
458 }
459
460 static int sec_clear_enable_write(struct sec_debug_file *file, u32 val)
461 {
462 struct hisi_qm *qm = file->qm;
463 u32 tmp;
464
465 if (val != 1 && val)
466 return -EINVAL;
467
468 tmp = (readl(qm->io_base + SEC_CTRL_CNT_CLR_CE) &
469 ~SEC_CTRL_CNT_CLR_CE_BIT) | val;
470 writel(tmp, qm->io_base + SEC_CTRL_CNT_CLR_CE);
471
472 return 0;
473 }
474
475 static ssize_t sec_debug_read(struct file *filp, char __user *buf,
476 size_t count, loff_t *pos)
477 {
478 struct sec_debug_file *file = filp->private_data;
479 char tbuf[SEC_DBGFS_VAL_MAX_LEN];
480 u32 val;
481 int ret;
482
483 spin_lock_irq(&file->lock);
484
485 switch (file->index) {
486 case SEC_CURRENT_QM:
487 val = sec_current_qm_read(file);
488 break;
489 case SEC_CLEAR_ENABLE:
490 val = sec_clear_enable_read(file);
491 break;
492 default:
493 spin_unlock_irq(&file->lock);
494 return -EINVAL;
495 }
496
497 spin_unlock_irq(&file->lock);
498 ret = snprintf(tbuf, SEC_DBGFS_VAL_MAX_LEN, "%u\n", val);
499
500 return simple_read_from_buffer(buf, count, pos, tbuf, ret);
501 }
502
503 static ssize_t sec_debug_write(struct file *filp, const char __user *buf,
504 size_t count, loff_t *pos)
505 {
506 struct sec_debug_file *file = filp->private_data;
507 char tbuf[SEC_DBGFS_VAL_MAX_LEN];
508 unsigned long val;
509 int len, ret;
510
511 if (*pos != 0)
512 return 0;
513
514 if (count >= SEC_DBGFS_VAL_MAX_LEN)
515 return -ENOSPC;
516
517 len = simple_write_to_buffer(tbuf, SEC_DBGFS_VAL_MAX_LEN - 1,
518 pos, buf, count);
519 if (len < 0)
520 return len;
521
522 tbuf[len] = '\0';
523 if (kstrtoul(tbuf, 0, &val))
524 return -EFAULT;
525
526 spin_lock_irq(&file->lock);
527
528 switch (file->index) {
529 case SEC_CURRENT_QM:
530 ret = sec_current_qm_write(file, val);
531 if (ret)
532 goto err_input;
533 break;
534 case SEC_CLEAR_ENABLE:
535 ret = sec_clear_enable_write(file, val);
536 if (ret)
537 goto err_input;
538 break;
539 default:
540 ret = -EINVAL;
541 goto err_input;
542 }
543
544 spin_unlock_irq(&file->lock);
545
546 return count;
547
548 err_input:
549 spin_unlock_irq(&file->lock);
550 return ret;
551 }
552
553 static const struct file_operations sec_dbg_fops = {
554 .owner = THIS_MODULE,
555 .open = simple_open,
556 .read = sec_debug_read,
557 .write = sec_debug_write,
558 };
559
560 static int sec_debugfs_atomic64_get(void *data, u64 *val)
561 {
562 *val = atomic64_read((atomic64_t *)data);
563
564 return 0;
565 }
566
567 static int sec_debugfs_atomic64_set(void *data, u64 val)
568 {
569 if (val)
570 return -EINVAL;
571
572 atomic64_set((atomic64_t *)data, 0);
573
574 return 0;
575 }
576
577 DEFINE_DEBUGFS_ATTRIBUTE(sec_atomic64_ops, sec_debugfs_atomic64_get,
578 sec_debugfs_atomic64_set, "%lld\n");
579
580 static int sec_core_debug_init(struct sec_dev *sec)
581 {
582 struct hisi_qm *qm = &sec->qm;
583 struct device *dev = &qm->pdev->dev;
584 struct sec_dfx *dfx = &sec->debug.dfx;
585 struct debugfs_regset32 *regset;
586 struct dentry *tmp_d;
587 int i;
588
589 tmp_d = debugfs_create_dir("sec_dfx", sec->qm.debug.debug_root);
590
591 regset = devm_kzalloc(dev, sizeof(*regset), GFP_KERNEL);
592 if (!regset)
593 return -ENOENT;
594
595 regset->regs = sec_dfx_regs;
596 regset->nregs = ARRAY_SIZE(sec_dfx_regs);
597 regset->base = qm->io_base;
598
599 if (qm->pdev->device == SEC_PF_PCI_DEVICE_ID)
600 debugfs_create_regset32("regs", 0444, tmp_d, regset);
601
602 for (i = 0; i < ARRAY_SIZE(sec_dfx_labels); i++) {
603 atomic64_t *data = (atomic64_t *)((uintptr_t)dfx +
604 sec_dfx_labels[i].offset);
605 debugfs_create_file(sec_dfx_labels[i].name, 0644,
606 tmp_d, data, &sec_atomic64_ops);
607 }
608
609 return 0;
610 }
611
612 static int sec_debug_init(struct sec_dev *sec)
613 {
614 int i;
615
616 for (i = SEC_CURRENT_QM; i < SEC_DEBUG_FILE_NUM; i++) {
617 spin_lock_init(&sec->debug.files[i].lock);
618 sec->debug.files[i].index = i;
619 sec->debug.files[i].qm = &sec->qm;
620
621 debugfs_create_file(sec_dbg_file_name[i], 0600,
622 sec->qm.debug.debug_root,
623 sec->debug.files + i,
624 &sec_dbg_fops);
625 }
626
627 return sec_core_debug_init(sec);
628 }
629
630 static int sec_debugfs_init(struct sec_dev *sec)
631 {
632 struct hisi_qm *qm = &sec->qm;
633 struct device *dev = &qm->pdev->dev;
634 int ret;
635
636 qm->debug.debug_root = debugfs_create_dir(dev_name(dev),
637 sec_debugfs_root);
638
639 qm->debug.sqe_mask_offset = SEC_SQE_MASK_OFFSET;
640 qm->debug.sqe_mask_len = SEC_SQE_MASK_LEN;
641 ret = hisi_qm_debug_init(qm);
642 if (ret)
643 goto failed_to_create;
644
645 if (qm->pdev->device == SEC_PF_PCI_DEVICE_ID) {
646 ret = sec_debug_init(sec);
647 if (ret)
648 goto failed_to_create;
649 }
650
651 return 0;
652
653 failed_to_create:
654 debugfs_remove_recursive(sec_debugfs_root);
655
656 return ret;
657 }
658
659 static void sec_debugfs_exit(struct sec_dev *sec)
660 {
661 debugfs_remove_recursive(sec->qm.debug.debug_root);
662 }
663
664 static void sec_log_hw_error(struct hisi_qm *qm, u32 err_sts)
665 {
666 const struct sec_hw_error *errs = sec_hw_errors;
667 struct device *dev = &qm->pdev->dev;
668 u32 err_val;
669
670 while (errs->msg) {
671 if (errs->int_msk & err_sts) {
672 dev_err(dev, "%s [error status=0x%x] found\n",
673 errs->msg, errs->int_msk);
674
675 if (SEC_CORE_INT_STATUS_M_ECC & errs->int_msk) {
676 err_val = readl(qm->io_base +
677 SEC_CORE_SRAM_ECC_ERR_INFO);
678 dev_err(dev, "multi ecc sram num=0x%x\n",
679 SEC_ECC_NUM(err_val));
680 dev_err(dev, "multi ecc sram addr=0x%x\n",
681 SEC_ECC_ADDR(err_val));
682 }
683 }
684 errs++;
685 }
686 }
687
688 static u32 sec_get_hw_err_status(struct hisi_qm *qm)
689 {
690 return readl(qm->io_base + SEC_CORE_INT_STATUS);
691 }
692
693 static void sec_clear_hw_err_status(struct hisi_qm *qm, u32 err_sts)
694 {
695 writel(err_sts, qm->io_base + SEC_CORE_INT_SOURCE);
696 }
697
698 static void sec_open_axi_master_ooo(struct hisi_qm *qm)
699 {
700 u32 val;
701
702 val = readl(SEC_ADDR(qm, SEC_CONTROL_REG));
703 writel(val & SEC_AXI_SHUTDOWN_DISABLE, SEC_ADDR(qm, SEC_CONTROL_REG));
704 writel(val | SEC_AXI_SHUTDOWN_ENABLE, SEC_ADDR(qm, SEC_CONTROL_REG));
705 }
706
707 static const struct hisi_qm_err_ini sec_err_ini = {
708 .hw_init = sec_set_user_domain_and_cache,
709 .hw_err_enable = sec_hw_error_enable,
710 .hw_err_disable = sec_hw_error_disable,
711 .get_dev_hw_err_status = sec_get_hw_err_status,
712 .clear_dev_hw_err_status = sec_clear_hw_err_status,
713 .log_dev_hw_err = sec_log_hw_error,
714 .open_axi_master_ooo = sec_open_axi_master_ooo,
715 .err_info = {
716 .ce = QM_BASE_CE,
717 .nfe = QM_BASE_NFE | QM_ACC_DO_TASK_TIMEOUT |
718 QM_ACC_WB_NOT_READY_TIMEOUT,
719 .fe = 0,
720 .ecc_2bits_mask = SEC_CORE_INT_STATUS_M_ECC,
721 .msi_wr_port = BIT(0),
722 .acpi_rst = "SRST",
723 }
724 };
725
726 static int sec_pf_probe_init(struct sec_dev *sec)
727 {
728 struct hisi_qm *qm = &sec->qm;
729 int ret;
730
731 if (qm->ver == QM_HW_V1)
732 qm->ctrl_qp_num = SEC_QUEUE_NUM_V1;
733 else
734 qm->ctrl_qp_num = SEC_QUEUE_NUM_V2;
735
736 qm->err_ini = &sec_err_ini;
737
738 ret = sec_set_user_domain_and_cache(qm);
739 if (ret)
740 return ret;
741
742 hisi_qm_dev_err_init(qm);
743 sec_debug_regs_clear(qm);
744
745 return 0;
746 }
747
748 static int sec_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
749 {
750 int ret;
751
752 qm->pdev = pdev;
753 qm->ver = pdev->revision;
754 qm->sqe_size = SEC_SQE_SIZE;
755 qm->dev_name = sec_name;
756
757 qm->fun_type = (pdev->device == SEC_PF_PCI_DEVICE_ID) ?
758 QM_HW_PF : QM_HW_VF;
759 if (qm->fun_type == QM_HW_PF) {
760 qm->qp_base = SEC_PF_DEF_Q_BASE;
761 qm->qp_num = pf_q_num;
762 qm->debug.curr_qm_qp_num = pf_q_num;
763 qm->qm_list = &sec_devices;
764 } else if (qm->fun_type == QM_HW_VF && qm->ver == QM_HW_V1) {
765 /*
766 * have no way to get qm configure in VM in v1 hardware,
767 * so currently force PF to uses SEC_PF_DEF_Q_NUM, and force
768 * to trigger only one VF in v1 hardware.
769 * v2 hardware has no such problem.
770 */
771 qm->qp_base = SEC_PF_DEF_Q_NUM;
772 qm->qp_num = SEC_QUEUE_NUM_V1 - SEC_PF_DEF_Q_NUM;
773 }
774
775 /*
776 * WQ_HIGHPRI: SEC request must be low delayed,
777 * so need a high priority workqueue.
778 * WQ_UNBOUND: SEC task is likely with long
779 * running CPU intensive workloads.
780 */
781 qm->wq = alloc_workqueue("%s", WQ_HIGHPRI | WQ_MEM_RECLAIM |
782 WQ_UNBOUND, num_online_cpus(),
783 pci_name(qm->pdev));
784 if (!qm->wq) {
785 pci_err(qm->pdev, "fail to alloc workqueue\n");
786 return -ENOMEM;
787 }
788
789 ret = hisi_qm_init(qm);
790 if (ret)
791 destroy_workqueue(qm->wq);
792
793 return ret;
794 }
795
796 static void sec_qm_uninit(struct hisi_qm *qm)
797 {
798 hisi_qm_uninit(qm);
799 }
800
801 static int sec_probe_init(struct sec_dev *sec)
802 {
803 struct hisi_qm *qm = &sec->qm;
804 int ret;
805
806 if (qm->fun_type == QM_HW_PF) {
807 ret = sec_pf_probe_init(sec);
808 if (ret)
809 return ret;
810 }
811
812 return 0;
813 }
814
815 static void sec_probe_uninit(struct hisi_qm *qm)
816 {
817 hisi_qm_dev_err_uninit(qm);
818
819 destroy_workqueue(qm->wq);
820 }
821
822 static void sec_iommu_used_check(struct sec_dev *sec)
823 {
824 struct iommu_domain *domain;
825 struct device *dev = &sec->qm.pdev->dev;
826
827 domain = iommu_get_domain_for_dev(dev);
828
829 /* Check if iommu is used */
830 sec->iommu_used = false;
831 if (domain) {
832 if (domain->type & __IOMMU_DOMAIN_PAGING)
833 sec->iommu_used = true;
834 dev_info(dev, "SMMU Opened, the iommu type = %u\n",
835 domain->type);
836 }
837 }
838
839 static int sec_probe(struct pci_dev *pdev, const struct pci_device_id *id)
840 {
841 struct sec_dev *sec;
842 struct hisi_qm *qm;
843 int ret;
844
845 sec = devm_kzalloc(&pdev->dev, sizeof(*sec), GFP_KERNEL);
846 if (!sec)
847 return -ENOMEM;
848
849 qm = &sec->qm;
850 ret = sec_qm_init(qm, pdev);
851 if (ret) {
852 pci_err(pdev, "Failed to init SEC QM (%d)!\n", ret);
853 return ret;
854 }
855
856 sec->ctx_q_num = ctx_q_num;
857 sec_iommu_used_check(sec);
858
859 ret = sec_probe_init(sec);
860 if (ret) {
861 pci_err(pdev, "Failed to probe!\n");
862 goto err_qm_uninit;
863 }
864
865 ret = hisi_qm_start(qm);
866 if (ret) {
867 pci_err(pdev, "Failed to start sec qm!\n");
868 goto err_probe_uninit;
869 }
870
871 ret = sec_debugfs_init(sec);
872 if (ret)
873 pci_warn(pdev, "Failed to init debugfs!\n");
874
875 hisi_qm_add_to_list(qm, &sec_devices);
876
877 ret = sec_register_to_crypto();
878 if (ret < 0) {
879 pr_err("Failed to register driver to crypto.\n");
880 goto err_remove_from_list;
881 }
882
883 if (qm->fun_type == QM_HW_PF && vfs_num) {
884 ret = hisi_qm_sriov_enable(pdev, vfs_num);
885 if (ret < 0)
886 goto err_crypto_unregister;
887 }
888
889 return 0;
890
891 err_crypto_unregister:
892 sec_unregister_from_crypto();
893
894 err_remove_from_list:
895 hisi_qm_del_from_list(qm, &sec_devices);
896 sec_debugfs_exit(sec);
897 hisi_qm_stop(qm);
898
899 err_probe_uninit:
900 sec_probe_uninit(qm);
901
902 err_qm_uninit:
903 sec_qm_uninit(qm);
904
905 return ret;
906 }
907
908 static void sec_remove(struct pci_dev *pdev)
909 {
910 struct sec_dev *sec = pci_get_drvdata(pdev);
911 struct hisi_qm *qm = &sec->qm;
912
913 sec_unregister_from_crypto();
914
915 hisi_qm_del_from_list(qm, &sec_devices);
916
917 if (qm->fun_type == QM_HW_PF && qm->vfs_num)
918 hisi_qm_sriov_disable(pdev);
919
920 sec_debugfs_exit(sec);
921
922 (void)hisi_qm_stop(qm);
923
924 if (qm->fun_type == QM_HW_PF)
925 sec_debug_regs_clear(qm);
926
927 sec_probe_uninit(qm);
928
929 sec_qm_uninit(qm);
930 }
931
932 static const struct pci_error_handlers sec_err_handler = {
933 .error_detected = hisi_qm_dev_err_detected,
934 .slot_reset = hisi_qm_dev_slot_reset,
935 .reset_prepare = hisi_qm_reset_prepare,
936 .reset_done = hisi_qm_reset_done,
937 };
938
939 static struct pci_driver sec_pci_driver = {
940 .name = "hisi_sec2",
941 .id_table = sec_dev_ids,
942 .probe = sec_probe,
943 .remove = sec_remove,
944 .err_handler = &sec_err_handler,
945 .sriov_configure = hisi_qm_sriov_configure,
946 };
947
948 static void sec_register_debugfs(void)
949 {
950 if (!debugfs_initialized())
951 return;
952
953 sec_debugfs_root = debugfs_create_dir("hisi_sec2", NULL);
954 }
955
956 static void sec_unregister_debugfs(void)
957 {
958 debugfs_remove_recursive(sec_debugfs_root);
959 }
960
961 static int __init sec_init(void)
962 {
963 int ret;
964
965 hisi_qm_init_list(&sec_devices);
966 sec_register_debugfs();
967
968 ret = pci_register_driver(&sec_pci_driver);
969 if (ret < 0) {
970 sec_unregister_debugfs();
971 pr_err("Failed to register pci driver.\n");
972 return ret;
973 }
974
975 return 0;
976 }
977
978 static void __exit sec_exit(void)
979 {
980 pci_unregister_driver(&sec_pci_driver);
981 sec_unregister_debugfs();
982 }
983
984 module_init(sec_init);
985 module_exit(sec_exit);
986
987 MODULE_LICENSE("GPL v2");
988 MODULE_AUTHOR("Zaibo Xu <xuzaibo@huawei.com>");
989 MODULE_AUTHOR("Longfang Liu <liulongfang@huawei.com>");
990 MODULE_AUTHOR("Wei Zhang <zhangwei375@huawei.com>");
991 MODULE_DESCRIPTION("Driver for HiSilicon SEC accelerator");