]> git.ipfire.org Git - thirdparty/linux.git/blame - drivers/crypto/hisilicon/qm.c
Merge tag 'x86-fpu-2020-06-01' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
[thirdparty/linux.git] / drivers / crypto / hisilicon / qm.c
CommitLineData
263c9959
ZW
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (c) 2019 HiSilicon Limited. */
3#include <asm/page.h>
6c6dd580
ST
4#include <linux/acpi.h>
5#include <linux/aer.h>
263c9959 6#include <linux/bitmap.h>
72c7a68d 7#include <linux/debugfs.h>
263c9959 8#include <linux/dma-mapping.h>
5308f660 9#include <linux/idr.h>
263c9959
ZW
10#include <linux/io.h>
11#include <linux/irqreturn.h>
12#include <linux/log2.h>
72c7a68d 13#include <linux/seq_file.h>
263c9959 14#include <linux/slab.h>
9e00df71
ZG
15#include <linux/uacce.h>
16#include <linux/uaccess.h>
17#include <uapi/misc/uacce/hisi_qm.h>
263c9959
ZW
18#include "qm.h"
19
20/* eq/aeq irq enable */
21#define QM_VF_AEQ_INT_SOURCE 0x0
22#define QM_VF_AEQ_INT_MASK 0x4
23#define QM_VF_EQ_INT_SOURCE 0x8
24#define QM_VF_EQ_INT_MASK 0xc
25#define QM_IRQ_NUM_V1 1
26#define QM_IRQ_NUM_PF_V2 4
79e09f30 27#define QM_IRQ_NUM_VF_V2 2
263c9959
ZW
28
29#define QM_EQ_EVENT_IRQ_VECTOR 0
30#define QM_AEQ_EVENT_IRQ_VECTOR 1
31#define QM_ABNORMAL_EVENT_IRQ_VECTOR 3
32
33/* mailbox */
34#define QM_MB_CMD_SQC 0x0
35#define QM_MB_CMD_CQC 0x1
36#define QM_MB_CMD_EQC 0x2
37#define QM_MB_CMD_AEQC 0x3
38#define QM_MB_CMD_SQC_BT 0x4
39#define QM_MB_CMD_CQC_BT 0x5
40#define QM_MB_CMD_SQC_VFT_V2 0x6
41
42#define QM_MB_CMD_SEND_BASE 0x300
43#define QM_MB_EVENT_SHIFT 8
44#define QM_MB_BUSY_SHIFT 13
45#define QM_MB_OP_SHIFT 14
46#define QM_MB_CMD_DATA_ADDR_L 0x304
47#define QM_MB_CMD_DATA_ADDR_H 0x308
48
49/* sqc shift */
50#define QM_SQ_HOP_NUM_SHIFT 0
51#define QM_SQ_PAGE_SIZE_SHIFT 4
52#define QM_SQ_BUF_SIZE_SHIFT 8
53#define QM_SQ_SQE_SIZE_SHIFT 12
54#define QM_SQ_PRIORITY_SHIFT 0
55#define QM_SQ_ORDERS_SHIFT 4
56#define QM_SQ_TYPE_SHIFT 8
57
58#define QM_SQ_TYPE_MASK GENMASK(3, 0)
f037fc5f 59#define QM_SQ_TAIL_IDX(sqc) ((le16_to_cpu((sqc)->w11) >> 6) & 0x1)
263c9959
ZW
60
61/* cqc shift */
62#define QM_CQ_HOP_NUM_SHIFT 0
63#define QM_CQ_PAGE_SIZE_SHIFT 4
64#define QM_CQ_BUF_SIZE_SHIFT 8
65#define QM_CQ_CQE_SIZE_SHIFT 12
66#define QM_CQ_PHASE_SHIFT 0
67#define QM_CQ_FLAG_SHIFT 1
68
9a8641a7 69#define QM_CQE_PHASE(cqe) (le16_to_cpu((cqe)->w7) & 0x1)
263c9959 70#define QM_QC_CQE_SIZE 4
f037fc5f 71#define QM_CQ_TAIL_IDX(cqc) ((le16_to_cpu((cqc)->w11) >> 6) & 0x1)
263c9959
ZW
72
73/* eqc shift */
74#define QM_EQE_AEQE_SIZE (2UL << 12)
75#define QM_EQC_PHASE_SHIFT 16
76
9a8641a7 77#define QM_EQE_PHASE(eqe) ((le32_to_cpu((eqe)->dw0) >> 16) & 0x1)
263c9959
ZW
78#define QM_EQE_CQN_MASK GENMASK(15, 0)
79
9a8641a7 80#define QM_AEQE_PHASE(aeqe) ((le32_to_cpu((aeqe)->dw0) >> 16) & 0x1)
263c9959
ZW
81#define QM_AEQE_TYPE_SHIFT 17
82
83#define QM_DOORBELL_CMD_SQ 0
84#define QM_DOORBELL_CMD_CQ 1
85#define QM_DOORBELL_CMD_EQ 2
86#define QM_DOORBELL_CMD_AEQ 3
87
88#define QM_DOORBELL_BASE_V1 0x340
89#define QM_DB_CMD_SHIFT_V1 16
90#define QM_DB_INDEX_SHIFT_V1 32
91#define QM_DB_PRIORITY_SHIFT_V1 48
92#define QM_DOORBELL_SQ_CQ_BASE_V2 0x1000
93#define QM_DOORBELL_EQ_AEQ_BASE_V2 0x2000
94#define QM_DB_CMD_SHIFT_V2 12
95#define QM_DB_RAND_SHIFT_V2 16
96#define QM_DB_INDEX_SHIFT_V2 32
97#define QM_DB_PRIORITY_SHIFT_V2 48
98
99#define QM_MEM_START_INIT 0x100040
100#define QM_MEM_INIT_DONE 0x100044
101#define QM_VFT_CFG_RDY 0x10006c
102#define QM_VFT_CFG_OP_WR 0x100058
103#define QM_VFT_CFG_TYPE 0x10005c
104#define QM_SQC_VFT 0x0
105#define QM_CQC_VFT 0x1
106#define QM_VFT_CFG 0x100060
107#define QM_VFT_CFG_OP_ENABLE 0x100054
108
109#define QM_VFT_CFG_DATA_L 0x100064
110#define QM_VFT_CFG_DATA_H 0x100068
111#define QM_SQC_VFT_BUF_SIZE (7ULL << 8)
112#define QM_SQC_VFT_SQC_SIZE (5ULL << 12)
113#define QM_SQC_VFT_INDEX_NUMBER (1ULL << 16)
114#define QM_SQC_VFT_START_SQN_SHIFT 28
115#define QM_SQC_VFT_VALID (1ULL << 44)
116#define QM_SQC_VFT_SQN_SHIFT 45
117#define QM_CQC_VFT_BUF_SIZE (7ULL << 8)
118#define QM_CQC_VFT_SQC_SIZE (5ULL << 12)
119#define QM_CQC_VFT_INDEX_NUMBER (1ULL << 16)
120#define QM_CQC_VFT_VALID (1ULL << 28)
121
122#define QM_SQC_VFT_BASE_SHIFT_V2 28
123#define QM_SQC_VFT_BASE_MASK_V2 GENMASK(5, 0)
124#define QM_SQC_VFT_NUM_SHIFT_V2 45
125#define QM_SQC_VFT_NUM_MASK_v2 GENMASK(9, 0)
126
72c7a68d 127#define QM_DFX_CNT_CLR_CE 0x100118
263c9959
ZW
128
129#define QM_ABNORMAL_INT_SOURCE 0x100000
6c6dd580 130#define QM_ABNORMAL_INT_SOURCE_CLR GENMASK(12, 0)
263c9959
ZW
131#define QM_ABNORMAL_INT_MASK 0x100004
132#define QM_ABNORMAL_INT_MASK_VALUE 0x1fff
133#define QM_ABNORMAL_INT_STATUS 0x100008
6c6dd580 134#define QM_ABNORMAL_INT_SET 0x10000c
263c9959
ZW
135#define QM_ABNORMAL_INF00 0x100010
136#define QM_FIFO_OVERFLOW_TYPE 0xc0
137#define QM_FIFO_OVERFLOW_TYPE_SHIFT 6
138#define QM_FIFO_OVERFLOW_VF 0x3f
139#define QM_ABNORMAL_INF01 0x100014
140#define QM_DB_TIMEOUT_TYPE 0xc0
141#define QM_DB_TIMEOUT_TYPE_SHIFT 6
142#define QM_DB_TIMEOUT_VF 0x3f
143#define QM_RAS_CE_ENABLE 0x1000ec
144#define QM_RAS_FE_ENABLE 0x1000f0
145#define QM_RAS_NFE_ENABLE 0x1000f4
146#define QM_RAS_CE_THRESHOLD 0x1000f8
147#define QM_RAS_CE_TIMES_PER_IRQ 1
148#define QM_RAS_MSI_INT_SEL 0x1040f4
149
6c6dd580
ST
150#define QM_DEV_RESET_FLAG 0
151#define QM_RESET_WAIT_TIMEOUT 400
152#define QM_PEH_VENDOR_ID 0x1000d8
153#define ACC_VENDOR_ID_VALUE 0x5a5a
154#define QM_PEH_DFX_INFO0 0x1000fc
155#define ACC_PEH_SRIOV_CTRL_VF_MSE_SHIFT 3
156#define ACC_PEH_MSI_DISABLE GENMASK(31, 0)
157#define ACC_MASTER_GLOBAL_CTRL_SHUTDOWN 0x1
158#define ACC_MASTER_TRANS_RETURN_RW 3
159#define ACC_MASTER_TRANS_RETURN 0x300150
160#define ACC_MASTER_GLOBAL_CTRL 0x300000
161#define ACC_AM_CFG_PORT_WR_EN 0x30001c
162#define QM_RAS_NFE_MBIT_DISABLE ~QM_ECC_MBIT
163#define ACC_AM_ROB_ECC_INT_STS 0x300104
164#define ACC_ROB_ECC_ERR_MULTPL BIT(1)
165
166#define POLL_PERIOD 10
167#define POLL_TIMEOUT 1000
f037fc5f
YS
168#define WAIT_PERIOD_US_MAX 200
169#define WAIT_PERIOD_US_MIN 100
6c6dd580 170#define MAX_WAIT_COUNTS 1000
263c9959
ZW
171#define QM_CACHE_WB_START 0x204
172#define QM_CACHE_WB_DONE 0x208
173
174#define PCI_BAR_2 2
175#define QM_SQE_DATA_ALIGN_MASK GENMASK(6, 0)
176#define QMC_ALIGN(sz) ALIGN(sz, 32)
177
0a3a3960 178#define QM_DBG_READ_LEN 256
c31dc9fe 179#define QM_DBG_WRITE_LEN 1024
72c7a68d 180#define QM_DBG_TMP_BUF_LEN 22
7ce396fa 181#define QM_PCI_COMMAND_INVALID ~0
263c9959 182
c31dc9fe
ST
183#define QM_SQE_ADDR_MASK GENMASK(7, 0)
184
263c9959
ZW
185#define QM_MK_CQC_DW3_V1(hop_num, pg_sz, buf_sz, cqe_sz) \
186 (((hop_num) << QM_CQ_HOP_NUM_SHIFT) | \
187 ((pg_sz) << QM_CQ_PAGE_SIZE_SHIFT) | \
188 ((buf_sz) << QM_CQ_BUF_SIZE_SHIFT) | \
189 ((cqe_sz) << QM_CQ_CQE_SIZE_SHIFT))
190
191#define QM_MK_CQC_DW3_V2(cqe_sz) \
192 ((QM_Q_DEPTH - 1) | ((cqe_sz) << QM_CQ_CQE_SIZE_SHIFT))
193
194#define QM_MK_SQC_W13(priority, orders, alg_type) \
195 (((priority) << QM_SQ_PRIORITY_SHIFT) | \
196 ((orders) << QM_SQ_ORDERS_SHIFT) | \
197 (((alg_type) & QM_SQ_TYPE_MASK) << QM_SQ_TYPE_SHIFT))
198
199#define QM_MK_SQC_DW3_V1(hop_num, pg_sz, buf_sz, sqe_sz) \
200 (((hop_num) << QM_SQ_HOP_NUM_SHIFT) | \
201 ((pg_sz) << QM_SQ_PAGE_SIZE_SHIFT) | \
202 ((buf_sz) << QM_SQ_BUF_SIZE_SHIFT) | \
203 ((u32)ilog2(sqe_sz) << QM_SQ_SQE_SIZE_SHIFT))
204
205#define QM_MK_SQC_DW3_V2(sqe_sz) \
206 ((QM_Q_DEPTH - 1) | ((u32)ilog2(sqe_sz) << QM_SQ_SQE_SIZE_SHIFT))
207
9a8641a7
ST
208#define INIT_QC_COMMON(qc, base, pasid) do { \
209 (qc)->head = 0; \
210 (qc)->tail = 0; \
211 (qc)->base_l = cpu_to_le32(lower_32_bits(base)); \
212 (qc)->base_h = cpu_to_le32(upper_32_bits(base)); \
213 (qc)->dw3 = 0; \
214 (qc)->w8 = 0; \
215 (qc)->rsvd0 = 0; \
216 (qc)->pasid = cpu_to_le16(pasid); \
217 (qc)->w11 = 0; \
218 (qc)->rsvd1 = 0; \
263c9959
ZW
219} while (0)
220
221enum vft_type {
222 SQC_VFT = 0,
223 CQC_VFT,
224};
225
dbdc1ec3
ST
226enum acc_err_result {
227 ACC_ERR_NONE,
228 ACC_ERR_NEED_RESET,
229 ACC_ERR_RECOVERED,
230};
231
263c9959
ZW
232struct qm_cqe {
233 __le32 rsvd0;
234 __le16 cmd_id;
235 __le16 rsvd1;
236 __le16 sq_head;
237 __le16 sq_num;
238 __le16 rsvd2;
239 __le16 w7;
240};
241
242struct qm_eqe {
243 __le32 dw0;
244};
245
246struct qm_aeqe {
247 __le32 dw0;
248};
249
250struct qm_sqc {
251 __le16 head;
252 __le16 tail;
253 __le32 base_l;
254 __le32 base_h;
255 __le32 dw3;
256 __le16 w8;
257 __le16 rsvd0;
258 __le16 pasid;
259 __le16 w11;
260 __le16 cq_num;
261 __le16 w13;
262 __le32 rsvd1;
263};
264
265struct qm_cqc {
266 __le16 head;
267 __le16 tail;
268 __le32 base_l;
269 __le32 base_h;
270 __le32 dw3;
271 __le16 w8;
272 __le16 rsvd0;
273 __le16 pasid;
274 __le16 w11;
275 __le32 dw6;
276 __le32 rsvd1;
277};
278
279struct qm_eqc {
280 __le16 head;
281 __le16 tail;
282 __le32 base_l;
283 __le32 base_h;
284 __le32 dw3;
285 __le32 rsvd[2];
286 __le32 dw6;
287};
288
289struct qm_aeqc {
290 __le16 head;
291 __le16 tail;
292 __le32 base_l;
293 __le32 base_h;
294 __le32 dw3;
295 __le32 rsvd[2];
296 __le32 dw6;
297};
298
299struct qm_mailbox {
300 __le16 w0;
301 __le16 queue_num;
302 __le32 base_l;
303 __le32 base_h;
304 __le32 rsvd;
305};
306
307struct qm_doorbell {
308 __le16 queue_num;
309 __le16 cmd;
310 __le16 index;
311 __le16 priority;
312};
313
3f1ec97a
WQ
314struct hisi_qm_resource {
315 struct hisi_qm *qm;
316 int distance;
317 struct list_head list;
318};
319
263c9959 320struct hisi_qm_hw_ops {
79e09f30 321 int (*get_vft)(struct hisi_qm *qm, u32 *base, u32 *number);
263c9959
ZW
322 void (*qm_db)(struct hisi_qm *qm, u16 qn,
323 u8 cmd, u16 index, u8 priority);
324 u32 (*get_irq_num)(struct hisi_qm *qm);
72c7a68d 325 int (*debug_init)(struct hisi_qm *qm);
3176637a 326 void (*hw_error_init)(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe);
eaebf4c3 327 void (*hw_error_uninit)(struct hisi_qm *qm);
dbdc1ec3 328 enum acc_err_result (*hw_error_handle)(struct hisi_qm *qm);
263c9959
ZW
329};
330
85026525
LL
331struct qm_dfx_item {
332 const char *name;
333 u32 offset;
334};
335
336static struct qm_dfx_item qm_dfx_files[] = {
337 {"err_irq", offsetof(struct qm_dfx, err_irq_cnt)},
338 {"aeq_irq", offsetof(struct qm_dfx, aeq_irq_cnt)},
339 {"abnormal_irq", offsetof(struct qm_dfx, abnormal_irq_cnt)},
340 {"create_qp_err", offsetof(struct qm_dfx, create_qp_err_cnt)},
341 {"mb_err", offsetof(struct qm_dfx, mb_err_cnt)},
342};
343
72c7a68d
ZW
344static const char * const qm_debug_file_name[] = {
345 [CURRENT_Q] = "current_q",
346 [CLEAR_ENABLE] = "clear_enable",
347};
348
263c9959
ZW
349struct hisi_qm_hw_error {
350 u32 int_msk;
351 const char *msg;
352};
353
354static const struct hisi_qm_hw_error qm_hw_error[] = {
355 { .int_msk = BIT(0), .msg = "qm_axi_rresp" },
356 { .int_msk = BIT(1), .msg = "qm_axi_bresp" },
357 { .int_msk = BIT(2), .msg = "qm_ecc_mbit" },
358 { .int_msk = BIT(3), .msg = "qm_ecc_1bit" },
359 { .int_msk = BIT(4), .msg = "qm_acc_get_task_timeout" },
360 { .int_msk = BIT(5), .msg = "qm_acc_do_task_timeout" },
361 { .int_msk = BIT(6), .msg = "qm_acc_wb_not_ready_timeout" },
362 { .int_msk = BIT(7), .msg = "qm_sq_cq_vf_invalid" },
363 { .int_msk = BIT(8), .msg = "qm_cq_vf_invalid" },
364 { .int_msk = BIT(9), .msg = "qm_sq_vf_invalid" },
365 { .int_msk = BIT(10), .msg = "qm_db_timeout" },
366 { .int_msk = BIT(11), .msg = "qm_of_fifo_of" },
367 { .int_msk = BIT(12), .msg = "qm_db_random_invalid" },
368 { /* sentinel */ }
369};
370
371static const char * const qm_db_timeout[] = {
372 "sq", "cq", "eq", "aeq",
373};
374
375static const char * const qm_fifo_overflow[] = {
376 "cq", "eq", "aeq",
377};
378
b67202e8
ZW
379static const char * const qm_s[] = {
380 "init", "start", "close", "stop",
381};
382
383static const char * const qp_s[] = {
384 "none", "init", "start", "stop", "close",
385};
386
387static bool qm_avail_state(struct hisi_qm *qm, enum qm_state new)
388{
389 enum qm_state curr = atomic_read(&qm->status.flags);
390 bool avail = false;
391
392 switch (curr) {
393 case QM_INIT:
394 if (new == QM_START || new == QM_CLOSE)
395 avail = true;
396 break;
397 case QM_START:
398 if (new == QM_STOP)
399 avail = true;
400 break;
401 case QM_STOP:
402 if (new == QM_CLOSE || new == QM_START)
403 avail = true;
404 break;
405 default:
406 break;
407 }
408
409 dev_dbg(&qm->pdev->dev, "change qm state from %s to %s\n",
410 qm_s[curr], qm_s[new]);
411
412 if (!avail)
413 dev_warn(&qm->pdev->dev, "Can not change qm state from %s to %s\n",
414 qm_s[curr], qm_s[new]);
415
416 return avail;
417}
418
419static bool qm_qp_avail_state(struct hisi_qm *qm, struct hisi_qp *qp,
420 enum qp_state new)
421{
422 enum qm_state qm_curr = atomic_read(&qm->status.flags);
423 enum qp_state qp_curr = 0;
424 bool avail = false;
425
426 if (qp)
427 qp_curr = atomic_read(&qp->qp_status.flags);
428
429 switch (new) {
430 case QP_INIT:
431 if (qm_curr == QM_START || qm_curr == QM_INIT)
432 avail = true;
433 break;
434 case QP_START:
435 if ((qm_curr == QM_START && qp_curr == QP_INIT) ||
436 (qm_curr == QM_START && qp_curr == QP_STOP))
437 avail = true;
438 break;
439 case QP_STOP:
440 if ((qm_curr == QM_START && qp_curr == QP_START) ||
441 (qp_curr == QP_INIT))
442 avail = true;
443 break;
444 case QP_CLOSE:
445 if ((qm_curr == QM_START && qp_curr == QP_INIT) ||
446 (qm_curr == QM_START && qp_curr == QP_STOP) ||
447 (qm_curr == QM_STOP && qp_curr == QP_STOP) ||
448 (qm_curr == QM_STOP && qp_curr == QP_INIT))
449 avail = true;
450 break;
451 default:
452 break;
453 }
454
455 dev_dbg(&qm->pdev->dev, "change qp state from %s to %s in QM %s\n",
456 qp_s[qp_curr], qp_s[new], qm_s[qm_curr]);
457
458 if (!avail)
459 dev_warn(&qm->pdev->dev,
460 "Can not change qp state from %s to %s in QM %s\n",
461 qp_s[qp_curr], qp_s[new], qm_s[qm_curr]);
462
463 return avail;
464}
465
263c9959
ZW
466/* return 0 mailbox ready, -ETIMEDOUT hardware timeout */
467static int qm_wait_mb_ready(struct hisi_qm *qm)
468{
469 u32 val;
470
471 return readl_relaxed_poll_timeout(qm->io_base + QM_MB_CMD_SEND_BASE,
472 val, !((val >> QM_MB_BUSY_SHIFT) &
473 0x1), 10, 1000);
474}
475
476/* 128 bit should be written to hardware at one time to trigger a mailbox */
477static void qm_mb_write(struct hisi_qm *qm, const void *src)
478{
479 void __iomem *fun_base = qm->io_base + QM_MB_CMD_SEND_BASE;
480 unsigned long tmp0 = 0, tmp1 = 0;
481
a7174f97
AB
482 if (!IS_ENABLED(CONFIG_ARM64)) {
483 memcpy_toio(fun_base, src, 16);
484 wmb();
485 return;
486 }
487
263c9959
ZW
488 asm volatile("ldp %0, %1, %3\n"
489 "stp %0, %1, %2\n"
490 "dsb sy\n"
491 : "=&r" (tmp0),
492 "=&r" (tmp1),
9a8641a7 493 "+Q" (*((char __iomem *)fun_base))
263c9959
ZW
494 : "Q" (*((char *)src))
495 : "memory");
496}
497
498static int qm_mb(struct hisi_qm *qm, u8 cmd, dma_addr_t dma_addr, u16 queue,
499 bool op)
500{
501 struct qm_mailbox mailbox;
502 int ret = 0;
503
b395ed4f
HX
504 dev_dbg(&qm->pdev->dev, "QM mailbox request to q%u: %u-%llx\n",
505 queue, cmd, (unsigned long long)dma_addr);
263c9959 506
9a8641a7 507 mailbox.w0 = cpu_to_le16(cmd |
263c9959 508 (op ? 0x1 << QM_MB_OP_SHIFT : 0) |
9a8641a7
ST
509 (0x1 << QM_MB_BUSY_SHIFT));
510 mailbox.queue_num = cpu_to_le16(queue);
511 mailbox.base_l = cpu_to_le32(lower_32_bits(dma_addr));
512 mailbox.base_h = cpu_to_le32(upper_32_bits(dma_addr));
263c9959
ZW
513 mailbox.rsvd = 0;
514
515 mutex_lock(&qm->mailbox_lock);
516
517 if (unlikely(qm_wait_mb_ready(qm))) {
518 ret = -EBUSY;
519 dev_err(&qm->pdev->dev, "QM mailbox is busy to start!\n");
520 goto busy_unlock;
521 }
522
523 qm_mb_write(qm, &mailbox);
524
525 if (unlikely(qm_wait_mb_ready(qm))) {
526 ret = -EBUSY;
527 dev_err(&qm->pdev->dev, "QM mailbox operation timeout!\n");
528 goto busy_unlock;
529 }
530
531busy_unlock:
532 mutex_unlock(&qm->mailbox_lock);
533
85026525
LL
534 if (ret)
535 atomic64_inc(&qm->debug.dfx.mb_err_cnt);
263c9959
ZW
536 return ret;
537}
538
539static void qm_db_v1(struct hisi_qm *qm, u16 qn, u8 cmd, u16 index, u8 priority)
540{
541 u64 doorbell;
542
543 doorbell = qn | ((u64)cmd << QM_DB_CMD_SHIFT_V1) |
544 ((u64)index << QM_DB_INDEX_SHIFT_V1) |
545 ((u64)priority << QM_DB_PRIORITY_SHIFT_V1);
546
547 writeq(doorbell, qm->io_base + QM_DOORBELL_BASE_V1);
548}
549
550static void qm_db_v2(struct hisi_qm *qm, u16 qn, u8 cmd, u16 index, u8 priority)
551{
552 u64 doorbell;
553 u64 dbase;
554 u16 randata = 0;
555
556 if (cmd == QM_DOORBELL_CMD_SQ || cmd == QM_DOORBELL_CMD_CQ)
557 dbase = QM_DOORBELL_SQ_CQ_BASE_V2;
558 else
559 dbase = QM_DOORBELL_EQ_AEQ_BASE_V2;
560
561 doorbell = qn | ((u64)cmd << QM_DB_CMD_SHIFT_V2) |
562 ((u64)randata << QM_DB_RAND_SHIFT_V2) |
563 ((u64)index << QM_DB_INDEX_SHIFT_V2) |
564 ((u64)priority << QM_DB_PRIORITY_SHIFT_V2);
565
566 writeq(doorbell, qm->io_base + dbase);
567}
568
569static void qm_db(struct hisi_qm *qm, u16 qn, u8 cmd, u16 index, u8 priority)
570{
571 dev_dbg(&qm->pdev->dev, "QM doorbell request: qn=%u, cmd=%u, index=%u\n",
572 qn, cmd, index);
573
574 qm->ops->qm_db(qm, qn, cmd, index, priority);
575}
576
577static int qm_dev_mem_reset(struct hisi_qm *qm)
578{
579 u32 val;
580
581 writel(0x1, qm->io_base + QM_MEM_START_INIT);
582 return readl_relaxed_poll_timeout(qm->io_base + QM_MEM_INIT_DONE, val,
583 val & BIT(0), 10, 1000);
584}
585
586static u32 qm_get_irq_num_v1(struct hisi_qm *qm)
587{
588 return QM_IRQ_NUM_V1;
589}
590
591static u32 qm_get_irq_num_v2(struct hisi_qm *qm)
592{
79e09f30
ZW
593 if (qm->fun_type == QM_HW_PF)
594 return QM_IRQ_NUM_PF_V2;
595 else
596 return QM_IRQ_NUM_VF_V2;
263c9959
ZW
597}
598
599static struct hisi_qp *qm_to_hisi_qp(struct hisi_qm *qm, struct qm_eqe *eqe)
600{
9a8641a7 601 u16 cqn = le32_to_cpu(eqe->dw0) & QM_EQE_CQN_MASK;
263c9959 602
5308f660 603 return &qm->qp_array[cqn];
263c9959
ZW
604}
605
606static void qm_cq_head_update(struct hisi_qp *qp)
607{
608 if (qp->qp_status.cq_head == QM_Q_DEPTH - 1) {
609 qp->qp_status.cqc_phase = !qp->qp_status.cqc_phase;
610 qp->qp_status.cq_head = 0;
611 } else {
612 qp->qp_status.cq_head++;
613 }
614}
615
616static void qm_poll_qp(struct hisi_qp *qp, struct hisi_qm *qm)
617{
9e00df71
ZG
618 if (qp->event_cb) {
619 qp->event_cb(qp);
620 return;
621 }
263c9959
ZW
622
623 if (qp->req_cb) {
9e00df71
ZG
624 struct qm_cqe *cqe = qp->cqe + qp->qp_status.cq_head;
625
263c9959
ZW
626 while (QM_CQE_PHASE(cqe) == qp->qp_status.cqc_phase) {
627 dma_rmb();
9a8641a7
ST
628 qp->req_cb(qp, qp->sqe + qm->sqe_size *
629 le16_to_cpu(cqe->sq_head));
263c9959
ZW
630 qm_cq_head_update(qp);
631 cqe = qp->cqe + qp->qp_status.cq_head;
632 qm_db(qm, qp->qp_id, QM_DOORBELL_CMD_CQ,
633 qp->qp_status.cq_head, 0);
634 atomic_dec(&qp->qp_status.used);
635 }
636
637 /* set c_flag */
638 qm_db(qm, qp->qp_id, QM_DOORBELL_CMD_CQ,
639 qp->qp_status.cq_head, 1);
640 }
641}
642
57ca8124 643static void qm_work_process(struct work_struct *work)
263c9959 644{
57ca8124 645 struct hisi_qm *qm = container_of(work, struct hisi_qm, work);
263c9959
ZW
646 struct qm_eqe *eqe = qm->eqe + qm->status.eq_head;
647 struct hisi_qp *qp;
648 int eqe_num = 0;
649
650 while (QM_EQE_PHASE(eqe) == qm->status.eqc_phase) {
651 eqe_num++;
652 qp = qm_to_hisi_qp(qm, eqe);
5308f660 653 qm_poll_qp(qp, qm);
263c9959
ZW
654
655 if (qm->status.eq_head == QM_Q_DEPTH - 1) {
656 qm->status.eqc_phase = !qm->status.eqc_phase;
657 eqe = qm->eqe;
658 qm->status.eq_head = 0;
659 } else {
660 eqe++;
661 qm->status.eq_head++;
662 }
663
664 if (eqe_num == QM_Q_DEPTH / 2 - 1) {
665 eqe_num = 0;
666 qm_db(qm, 0, QM_DOORBELL_CMD_EQ, qm->status.eq_head, 0);
667 }
668 }
669
670 qm_db(qm, 0, QM_DOORBELL_CMD_EQ, qm->status.eq_head, 0);
57ca8124
ST
671}
672
673static irqreturn_t do_qm_irq(int irq, void *data)
674{
675 struct hisi_qm *qm = (struct hisi_qm *)data;
676
677 /* the workqueue created by device driver of QM */
678 if (qm->wq)
679 queue_work(qm->wq, &qm->work);
680 else
681 schedule_work(&qm->work);
263c9959
ZW
682
683 return IRQ_HANDLED;
684}
685
686static irqreturn_t qm_irq(int irq, void *data)
687{
688 struct hisi_qm *qm = data;
689
690 if (readl(qm->io_base + QM_VF_EQ_INT_SOURCE))
57ca8124 691 return do_qm_irq(irq, data);
263c9959 692
85026525 693 atomic64_inc(&qm->debug.dfx.err_irq_cnt);
263c9959
ZW
694 dev_err(&qm->pdev->dev, "invalid int source\n");
695 qm_db(qm, 0, QM_DOORBELL_CMD_EQ, qm->status.eq_head, 0);
696
697 return IRQ_NONE;
698}
699
700static irqreturn_t qm_aeq_irq(int irq, void *data)
701{
702 struct hisi_qm *qm = data;
703 struct qm_aeqe *aeqe = qm->aeqe + qm->status.aeq_head;
704 u32 type;
705
85026525 706 atomic64_inc(&qm->debug.dfx.aeq_irq_cnt);
263c9959
ZW
707 if (!readl(qm->io_base + QM_VF_AEQ_INT_SOURCE))
708 return IRQ_NONE;
709
710 while (QM_AEQE_PHASE(aeqe) == qm->status.aeqc_phase) {
9a8641a7 711 type = le32_to_cpu(aeqe->dw0) >> QM_AEQE_TYPE_SHIFT;
263c9959
ZW
712 if (type < ARRAY_SIZE(qm_fifo_overflow))
713 dev_err(&qm->pdev->dev, "%s overflow\n",
714 qm_fifo_overflow[type]);
715 else
716 dev_err(&qm->pdev->dev, "unknown error type %d\n",
717 type);
718
719 if (qm->status.aeq_head == QM_Q_DEPTH - 1) {
720 qm->status.aeqc_phase = !qm->status.aeqc_phase;
721 aeqe = qm->aeqe;
722 qm->status.aeq_head = 0;
723 } else {
724 aeqe++;
725 qm->status.aeq_head++;
726 }
727
728 qm_db(qm, 0, QM_DOORBELL_CMD_AEQ, qm->status.aeq_head, 0);
729 }
730
731 return IRQ_HANDLED;
732}
733
263c9959
ZW
734static void qm_irq_unregister(struct hisi_qm *qm)
735{
736 struct pci_dev *pdev = qm->pdev;
737
738 free_irq(pci_irq_vector(pdev, QM_EQ_EVENT_IRQ_VECTOR), qm);
739
58ca0060
WQ
740 if (qm->ver == QM_HW_V1)
741 return;
79e09f30 742
58ca0060
WQ
743 free_irq(pci_irq_vector(pdev, QM_AEQ_EVENT_IRQ_VECTOR), qm);
744
745 if (qm->fun_type == QM_HW_PF)
746 free_irq(pci_irq_vector(pdev,
747 QM_ABNORMAL_EVENT_IRQ_VECTOR), qm);
263c9959
ZW
748}
749
750static void qm_init_qp_status(struct hisi_qp *qp)
751{
752 struct hisi_qp_status *qp_status = &qp->qp_status;
753
754 qp_status->sq_tail = 0;
755 qp_status->cq_head = 0;
9a8641a7 756 qp_status->cqc_phase = true;
b67202e8 757 atomic_set(&qp_status->flags, 0);
263c9959
ZW
758}
759
760static void qm_vft_data_cfg(struct hisi_qm *qm, enum vft_type type, u32 base,
761 u32 number)
762{
763 u64 tmp = 0;
764
765 if (number > 0) {
766 switch (type) {
767 case SQC_VFT:
58ca0060 768 if (qm->ver == QM_HW_V1) {
263c9959
ZW
769 tmp = QM_SQC_VFT_BUF_SIZE |
770 QM_SQC_VFT_SQC_SIZE |
771 QM_SQC_VFT_INDEX_NUMBER |
772 QM_SQC_VFT_VALID |
773 (u64)base << QM_SQC_VFT_START_SQN_SHIFT;
58ca0060 774 } else {
263c9959
ZW
775 tmp = (u64)base << QM_SQC_VFT_START_SQN_SHIFT |
776 QM_SQC_VFT_VALID |
777 (u64)(number - 1) << QM_SQC_VFT_SQN_SHIFT;
263c9959
ZW
778 }
779 break;
780 case CQC_VFT:
58ca0060 781 if (qm->ver == QM_HW_V1) {
263c9959
ZW
782 tmp = QM_CQC_VFT_BUF_SIZE |
783 QM_CQC_VFT_SQC_SIZE |
784 QM_CQC_VFT_INDEX_NUMBER |
785 QM_CQC_VFT_VALID;
58ca0060 786 } else {
263c9959 787 tmp = QM_CQC_VFT_VALID;
263c9959
ZW
788 }
789 break;
790 }
791 }
792
793 writel(lower_32_bits(tmp), qm->io_base + QM_VFT_CFG_DATA_L);
794 writel(upper_32_bits(tmp), qm->io_base + QM_VFT_CFG_DATA_H);
795}
796
797static int qm_set_vft_common(struct hisi_qm *qm, enum vft_type type,
798 u32 fun_num, u32 base, u32 number)
799{
800 unsigned int val;
801 int ret;
802
803 ret = readl_relaxed_poll_timeout(qm->io_base + QM_VFT_CFG_RDY, val,
804 val & BIT(0), 10, 1000);
805 if (ret)
806 return ret;
807
808 writel(0x0, qm->io_base + QM_VFT_CFG_OP_WR);
809 writel(type, qm->io_base + QM_VFT_CFG_TYPE);
810 writel(fun_num, qm->io_base + QM_VFT_CFG);
811
812 qm_vft_data_cfg(qm, type, base, number);
813
814 writel(0x0, qm->io_base + QM_VFT_CFG_RDY);
815 writel(0x1, qm->io_base + QM_VFT_CFG_OP_ENABLE);
816
817 return readl_relaxed_poll_timeout(qm->io_base + QM_VFT_CFG_RDY, val,
818 val & BIT(0), 10, 1000);
819}
820
821/* The config should be conducted after qm_dev_mem_reset() */
822static int qm_set_sqc_cqc_vft(struct hisi_qm *qm, u32 fun_num, u32 base,
823 u32 number)
824{
825 int ret, i;
826
827 for (i = SQC_VFT; i <= CQC_VFT; i++) {
828 ret = qm_set_vft_common(qm, i, fun_num, base, number);
829 if (ret)
830 return ret;
831 }
832
833 return 0;
834}
835
79e09f30
ZW
836static int qm_get_vft_v2(struct hisi_qm *qm, u32 *base, u32 *number)
837{
838 u64 sqc_vft;
839 int ret;
840
841 ret = qm_mb(qm, QM_MB_CMD_SQC_VFT_V2, 0, 0, 1);
842 if (ret)
843 return ret;
844
845 sqc_vft = readl(qm->io_base + QM_MB_CMD_DATA_ADDR_L) |
846 ((u64)readl(qm->io_base + QM_MB_CMD_DATA_ADDR_H) << 32);
847 *base = QM_SQC_VFT_BASE_MASK_V2 & (sqc_vft >> QM_SQC_VFT_BASE_SHIFT_V2);
848 *number = (QM_SQC_VFT_NUM_MASK_v2 &
849 (sqc_vft >> QM_SQC_VFT_NUM_SHIFT_V2)) + 1;
850
851 return 0;
852}
853
72c7a68d
ZW
854static struct hisi_qm *file_to_qm(struct debugfs_file *file)
855{
856 struct qm_debug *debug = file->debug;
857
858 return container_of(debug, struct hisi_qm, debug);
859}
860
861static u32 current_q_read(struct debugfs_file *file)
862{
863 struct hisi_qm *qm = file_to_qm(file);
864
865 return readl(qm->io_base + QM_DFX_SQE_CNT_VF_SQN) >> QM_DFX_QN_SHIFT;
866}
867
868static int current_q_write(struct debugfs_file *file, u32 val)
869{
870 struct hisi_qm *qm = file_to_qm(file);
871 u32 tmp;
872
873 if (val >= qm->debug.curr_qm_qp_num)
874 return -EINVAL;
875
876 tmp = val << QM_DFX_QN_SHIFT |
877 (readl(qm->io_base + QM_DFX_SQE_CNT_VF_SQN) & CURRENT_FUN_MASK);
878 writel(tmp, qm->io_base + QM_DFX_SQE_CNT_VF_SQN);
879
880 tmp = val << QM_DFX_QN_SHIFT |
881 (readl(qm->io_base + QM_DFX_CQE_CNT_VF_CQN) & CURRENT_FUN_MASK);
882 writel(tmp, qm->io_base + QM_DFX_CQE_CNT_VF_CQN);
883
884 return 0;
885}
886
887static u32 clear_enable_read(struct debugfs_file *file)
888{
889 struct hisi_qm *qm = file_to_qm(file);
890
891 return readl(qm->io_base + QM_DFX_CNT_CLR_CE);
892}
893
894/* rd_clr_ctrl 1 enable read clear, otherwise 0 disable it */
895static int clear_enable_write(struct debugfs_file *file, u32 rd_clr_ctrl)
896{
897 struct hisi_qm *qm = file_to_qm(file);
898
899 if (rd_clr_ctrl > 1)
900 return -EINVAL;
901
902 writel(rd_clr_ctrl, qm->io_base + QM_DFX_CNT_CLR_CE);
903
904 return 0;
905}
906
907static ssize_t qm_debug_read(struct file *filp, char __user *buf,
908 size_t count, loff_t *pos)
909{
910 struct debugfs_file *file = filp->private_data;
911 enum qm_debug_file index = file->index;
912 char tbuf[QM_DBG_TMP_BUF_LEN];
913 u32 val;
914 int ret;
915
916 mutex_lock(&file->lock);
917 switch (index) {
918 case CURRENT_Q:
919 val = current_q_read(file);
920 break;
921 case CLEAR_ENABLE:
922 val = clear_enable_read(file);
923 break;
924 default:
925 mutex_unlock(&file->lock);
926 return -EINVAL;
927 }
928 mutex_unlock(&file->lock);
929 ret = sprintf(tbuf, "%u\n", val);
930 return simple_read_from_buffer(buf, count, pos, tbuf, ret);
931}
932
933static ssize_t qm_debug_write(struct file *filp, const char __user *buf,
934 size_t count, loff_t *pos)
935{
936 struct debugfs_file *file = filp->private_data;
937 enum qm_debug_file index = file->index;
938 unsigned long val;
939 char tbuf[QM_DBG_TMP_BUF_LEN];
940 int len, ret;
941
942 if (*pos != 0)
943 return 0;
944
945 if (count >= QM_DBG_TMP_BUF_LEN)
946 return -ENOSPC;
947
948 len = simple_write_to_buffer(tbuf, QM_DBG_TMP_BUF_LEN - 1, pos, buf,
949 count);
950 if (len < 0)
951 return len;
952
953 tbuf[len] = '\0';
954 if (kstrtoul(tbuf, 0, &val))
955 return -EFAULT;
956
957 mutex_lock(&file->lock);
958 switch (index) {
959 case CURRENT_Q:
960 ret = current_q_write(file, val);
961 if (ret)
962 goto err_input;
963 break;
964 case CLEAR_ENABLE:
965 ret = clear_enable_write(file, val);
966 if (ret)
967 goto err_input;
968 break;
969 default:
970 ret = -EINVAL;
971 goto err_input;
972 }
973 mutex_unlock(&file->lock);
974
975 return count;
976
977err_input:
978 mutex_unlock(&file->lock);
979 return ret;
980}
981
982static const struct file_operations qm_debug_fops = {
983 .owner = THIS_MODULE,
984 .open = simple_open,
985 .read = qm_debug_read,
986 .write = qm_debug_write,
987};
988
989struct qm_dfx_registers {
990 char *reg_name;
991 u64 reg_offset;
992};
993
994#define CNT_CYC_REGS_NUM 10
995static struct qm_dfx_registers qm_dfx_regs[] = {
996 /* XXX_CNT are reading clear register */
997 {"QM_ECC_1BIT_CNT ", 0x104000ull},
998 {"QM_ECC_MBIT_CNT ", 0x104008ull},
999 {"QM_DFX_MB_CNT ", 0x104018ull},
1000 {"QM_DFX_DB_CNT ", 0x104028ull},
1001 {"QM_DFX_SQE_CNT ", 0x104038ull},
1002 {"QM_DFX_CQE_CNT ", 0x104048ull},
1003 {"QM_DFX_SEND_SQE_TO_ACC_CNT ", 0x104050ull},
1004 {"QM_DFX_WB_SQE_FROM_ACC_CNT ", 0x104058ull},
1005 {"QM_DFX_ACC_FINISH_CNT ", 0x104060ull},
1006 {"QM_DFX_CQE_ERR_CNT ", 0x1040b4ull},
1007 {"QM_DFX_FUNS_ACTIVE_ST ", 0x200ull},
1008 {"QM_ECC_1BIT_INF ", 0x104004ull},
1009 {"QM_ECC_MBIT_INF ", 0x10400cull},
1010 {"QM_DFX_ACC_RDY_VLD0 ", 0x1040a0ull},
1011 {"QM_DFX_ACC_RDY_VLD1 ", 0x1040a4ull},
1012 {"QM_DFX_AXI_RDY_VLD ", 0x1040a8ull},
1013 {"QM_DFX_FF_ST0 ", 0x1040c8ull},
1014 {"QM_DFX_FF_ST1 ", 0x1040ccull},
1015 {"QM_DFX_FF_ST2 ", 0x1040d0ull},
1016 {"QM_DFX_FF_ST3 ", 0x1040d4ull},
1017 {"QM_DFX_FF_ST4 ", 0x1040d8ull},
1018 {"QM_DFX_FF_ST5 ", 0x1040dcull},
1019 {"QM_DFX_FF_ST6 ", 0x1040e0ull},
1020 {"QM_IN_IDLE_ST ", 0x1040e4ull},
1021 { NULL, 0}
1022};
1023
1024static struct qm_dfx_registers qm_vf_dfx_regs[] = {
1025 {"QM_DFX_FUNS_ACTIVE_ST ", 0x200ull},
1026 { NULL, 0}
1027};
1028
1029static int qm_regs_show(struct seq_file *s, void *unused)
1030{
1031 struct hisi_qm *qm = s->private;
1032 struct qm_dfx_registers *regs;
1033 u32 val;
1034
1035 if (qm->fun_type == QM_HW_PF)
1036 regs = qm_dfx_regs;
1037 else
1038 regs = qm_vf_dfx_regs;
1039
1040 while (regs->reg_name) {
1041 val = readl(qm->io_base + regs->reg_offset);
1042 seq_printf(s, "%s= 0x%08x\n", regs->reg_name, val);
1043 regs++;
1044 }
1045
1046 return 0;
1047}
1048
1049static int qm_regs_open(struct inode *inode, struct file *file)
1050{
1051 return single_open(file, qm_regs_show, inode->i_private);
1052}
1053
1054static const struct file_operations qm_regs_fops = {
1055 .owner = THIS_MODULE,
1056 .open = qm_regs_open,
1057 .read = seq_read,
902f0bab 1058 .release = single_release,
72c7a68d
ZW
1059};
1060
c31dc9fe
ST
1061static ssize_t qm_cmd_read(struct file *filp, char __user *buffer,
1062 size_t count, loff_t *pos)
1063{
1064 char buf[QM_DBG_READ_LEN];
1065 int len;
1066
1067 if (*pos)
1068 return 0;
1069
1070 if (count < QM_DBG_READ_LEN)
1071 return -ENOSPC;
1072
1073 len = snprintf(buf, QM_DBG_READ_LEN, "%s\n",
1074 "Please echo help to cmd to get help information");
1075
1076 if (copy_to_user(buffer, buf, len))
1077 return -EFAULT;
1078
1079 return (*pos = len);
1080}
1081
1082static void *qm_ctx_alloc(struct hisi_qm *qm, size_t ctx_size,
1083 dma_addr_t *dma_addr)
1084{
1085 struct device *dev = &qm->pdev->dev;
1086 void *ctx_addr;
1087
1088 ctx_addr = kzalloc(ctx_size, GFP_KERNEL);
1089 if (!ctx_addr)
1090 return ERR_PTR(-ENOMEM);
1091
1092 *dma_addr = dma_map_single(dev, ctx_addr, ctx_size, DMA_FROM_DEVICE);
1093 if (dma_mapping_error(dev, *dma_addr)) {
1094 dev_err(dev, "DMA mapping error!\n");
1095 kfree(ctx_addr);
1096 return ERR_PTR(-ENOMEM);
1097 }
1098
1099 return ctx_addr;
1100}
1101
1102static void qm_ctx_free(struct hisi_qm *qm, size_t ctx_size,
1103 const void *ctx_addr, dma_addr_t *dma_addr)
1104{
1105 struct device *dev = &qm->pdev->dev;
1106
1107 dma_unmap_single(dev, *dma_addr, ctx_size, DMA_FROM_DEVICE);
1108 kfree(ctx_addr);
1109}
1110
1111static int dump_show(struct hisi_qm *qm, void *info,
1112 unsigned int info_size, char *info_name)
1113{
1114 struct device *dev = &qm->pdev->dev;
1115 u8 *info_buf, *info_curr = info;
1116 u32 i;
1117#define BYTE_PER_DW 4
1118
1119 info_buf = kzalloc(info_size, GFP_KERNEL);
1120 if (!info_buf)
1121 return -ENOMEM;
1122
1123 for (i = 0; i < info_size; i++, info_curr++) {
1124 if (i % BYTE_PER_DW == 0)
1125 info_buf[i + 3UL] = *info_curr;
1126 else if (i % BYTE_PER_DW == 1)
1127 info_buf[i + 1UL] = *info_curr;
1128 else if (i % BYTE_PER_DW == 2)
1129 info_buf[i - 1] = *info_curr;
1130 else if (i % BYTE_PER_DW == 3)
1131 info_buf[i - 3] = *info_curr;
1132 }
1133
1134 dev_info(dev, "%s DUMP\n", info_name);
1135 for (i = 0; i < info_size; i += BYTE_PER_DW) {
1136 pr_info("DW%d: %02X%02X %02X%02X\n", i / BYTE_PER_DW,
1137 info_buf[i], info_buf[i + 1UL],
1138 info_buf[i + 2UL], info_buf[i + 3UL]);
1139 }
1140
1141 kfree(info_buf);
1142
1143 return 0;
1144}
1145
1146static int qm_dump_sqc_raw(struct hisi_qm *qm, dma_addr_t dma_addr, u16 qp_id)
1147{
1148 return qm_mb(qm, QM_MB_CMD_SQC, dma_addr, qp_id, 1);
1149}
1150
1151static int qm_dump_cqc_raw(struct hisi_qm *qm, dma_addr_t dma_addr, u16 qp_id)
1152{
1153 return qm_mb(qm, QM_MB_CMD_CQC, dma_addr, qp_id, 1);
1154}
1155
1156static int qm_sqc_dump(struct hisi_qm *qm, const char *s)
1157{
1158 struct device *dev = &qm->pdev->dev;
1159 struct qm_sqc *sqc, *sqc_curr;
1160 dma_addr_t sqc_dma;
1161 u32 qp_id;
1162 int ret;
1163
1164 if (!s)
1165 return -EINVAL;
1166
1167 ret = kstrtou32(s, 0, &qp_id);
1168 if (ret || qp_id >= qm->qp_num) {
1169 dev_err(dev, "Please input qp num (0-%d)", qm->qp_num - 1);
1170 return -EINVAL;
1171 }
1172
1173 sqc = qm_ctx_alloc(qm, sizeof(*sqc), &sqc_dma);
1174 if (IS_ERR(sqc))
1175 return PTR_ERR(sqc);
1176
1177 ret = qm_dump_sqc_raw(qm, sqc_dma, qp_id);
1178 if (ret) {
1179 down_read(&qm->qps_lock);
1180 if (qm->sqc) {
1181 sqc_curr = qm->sqc + qp_id;
1182
1183 ret = dump_show(qm, sqc_curr, sizeof(*sqc),
1184 "SOFT SQC");
1185 if (ret)
1186 dev_info(dev, "Show soft sqc failed!\n");
1187 }
1188 up_read(&qm->qps_lock);
1189
1190 goto err_free_ctx;
1191 }
1192
1193 ret = dump_show(qm, sqc, sizeof(*sqc), "SQC");
1194 if (ret)
1195 dev_info(dev, "Show hw sqc failed!\n");
1196
1197err_free_ctx:
1198 qm_ctx_free(qm, sizeof(*sqc), sqc, &sqc_dma);
1199 return ret;
1200}
1201
1202static int qm_cqc_dump(struct hisi_qm *qm, const char *s)
1203{
1204 struct device *dev = &qm->pdev->dev;
1205 struct qm_cqc *cqc, *cqc_curr;
1206 dma_addr_t cqc_dma;
1207 u32 qp_id;
1208 int ret;
1209
1210 if (!s)
1211 return -EINVAL;
1212
1213 ret = kstrtou32(s, 0, &qp_id);
1214 if (ret || qp_id >= qm->qp_num) {
1215 dev_err(dev, "Please input qp num (0-%d)", qm->qp_num - 1);
1216 return -EINVAL;
1217 }
1218
1219 cqc = qm_ctx_alloc(qm, sizeof(*cqc), &cqc_dma);
1220 if (IS_ERR(cqc))
1221 return PTR_ERR(cqc);
1222
1223 ret = qm_dump_cqc_raw(qm, cqc_dma, qp_id);
1224 if (ret) {
1225 down_read(&qm->qps_lock);
1226 if (qm->cqc) {
1227 cqc_curr = qm->cqc + qp_id;
1228
1229 ret = dump_show(qm, cqc_curr, sizeof(*cqc),
1230 "SOFT CQC");
1231 if (ret)
1232 dev_info(dev, "Show soft cqc failed!\n");
1233 }
1234 up_read(&qm->qps_lock);
1235
1236 goto err_free_ctx;
1237 }
1238
1239 ret = dump_show(qm, cqc, sizeof(*cqc), "CQC");
1240 if (ret)
1241 dev_info(dev, "Show hw cqc failed!\n");
1242
1243err_free_ctx:
1244 qm_ctx_free(qm, sizeof(*cqc), cqc, &cqc_dma);
1245 return ret;
1246}
1247
1248static int qm_eqc_aeqc_dump(struct hisi_qm *qm, char *s, size_t size,
1249 int cmd, char *name)
1250{
1251 struct device *dev = &qm->pdev->dev;
1252 dma_addr_t xeqc_dma;
1253 void *xeqc;
1254 int ret;
1255
1256 if (strsep(&s, " ")) {
1257 dev_err(dev, "Please do not input extra characters!\n");
1258 return -EINVAL;
1259 }
1260
1261 xeqc = qm_ctx_alloc(qm, size, &xeqc_dma);
1262 if (IS_ERR(xeqc))
1263 return PTR_ERR(xeqc);
1264
1265 ret = qm_mb(qm, cmd, xeqc_dma, 0, 1);
1266 if (ret)
1267 goto err_free_ctx;
1268
1269 ret = dump_show(qm, xeqc, size, name);
1270 if (ret)
1271 dev_info(dev, "Show hw %s failed!\n", name);
1272
1273err_free_ctx:
1274 qm_ctx_free(qm, size, xeqc, &xeqc_dma);
1275 return ret;
1276}
1277
1278static int q_dump_param_parse(struct hisi_qm *qm, char *s,
1279 u32 *e_id, u32 *q_id)
1280{
1281 struct device *dev = &qm->pdev->dev;
1282 unsigned int qp_num = qm->qp_num;
1283 char *presult;
1284 int ret;
1285
1286 presult = strsep(&s, " ");
1287 if (!presult) {
1288 dev_err(dev, "Please input qp number!\n");
1289 return -EINVAL;
1290 }
1291
1292 ret = kstrtou32(presult, 0, q_id);
1293 if (ret || *q_id >= qp_num) {
1294 dev_err(dev, "Please input qp num (0-%d)", qp_num - 1);
1295 return -EINVAL;
1296 }
1297
1298 presult = strsep(&s, " ");
1299 if (!presult) {
1300 dev_err(dev, "Please input sqe number!\n");
1301 return -EINVAL;
1302 }
1303
1304 ret = kstrtou32(presult, 0, e_id);
1305 if (ret || *e_id >= QM_Q_DEPTH) {
1306 dev_err(dev, "Please input sqe num (0-%d)", QM_Q_DEPTH - 1);
1307 return -EINVAL;
1308 }
1309
1310 if (strsep(&s, " ")) {
1311 dev_err(dev, "Please do not input extra characters!\n");
1312 return -EINVAL;
1313 }
1314
1315 return 0;
1316}
1317
1318static int qm_sq_dump(struct hisi_qm *qm, char *s)
1319{
1320 struct device *dev = &qm->pdev->dev;
1321 void *sqe, *sqe_curr;
1322 struct hisi_qp *qp;
1323 u32 qp_id, sqe_id;
1324 int ret;
1325
1326 ret = q_dump_param_parse(qm, s, &sqe_id, &qp_id);
1327 if (ret)
1328 return ret;
1329
1330 sqe = kzalloc(qm->sqe_size * QM_Q_DEPTH, GFP_KERNEL);
1331 if (!sqe)
1332 return -ENOMEM;
1333
1334 qp = &qm->qp_array[qp_id];
1335 memcpy(sqe, qp->sqe, qm->sqe_size * QM_Q_DEPTH);
1336 sqe_curr = sqe + (u32)(sqe_id * qm->sqe_size);
1337 memset(sqe_curr + qm->debug.sqe_mask_offset, QM_SQE_ADDR_MASK,
1338 qm->debug.sqe_mask_len);
1339
1340 ret = dump_show(qm, sqe_curr, qm->sqe_size, "SQE");
1341 if (ret)
1342 dev_info(dev, "Show sqe failed!\n");
1343
1344 kfree(sqe);
1345
1346 return ret;
1347}
1348
1349static int qm_cq_dump(struct hisi_qm *qm, char *s)
1350{
1351 struct device *dev = &qm->pdev->dev;
1352 struct qm_cqe *cqe_curr;
1353 struct hisi_qp *qp;
1354 u32 qp_id, cqe_id;
1355 int ret;
1356
1357 ret = q_dump_param_parse(qm, s, &cqe_id, &qp_id);
1358 if (ret)
1359 return ret;
1360
1361 qp = &qm->qp_array[qp_id];
1362 cqe_curr = qp->cqe + cqe_id;
1363 ret = dump_show(qm, cqe_curr, sizeof(struct qm_cqe), "CQE");
1364 if (ret)
1365 dev_info(dev, "Show cqe failed!\n");
1366
1367 return ret;
1368}
1369
1370static int qm_eq_aeq_dump(struct hisi_qm *qm, const char *s,
1371 size_t size, char *name)
1372{
1373 struct device *dev = &qm->pdev->dev;
1374 void *xeqe;
1375 u32 xeqe_id;
1376 int ret;
1377
1378 if (!s)
1379 return -EINVAL;
1380
1381 ret = kstrtou32(s, 0, &xeqe_id);
1382 if (ret || xeqe_id >= QM_Q_DEPTH) {
1383 dev_err(dev, "Please input aeqe num (0-%d)", QM_Q_DEPTH - 1);
1384 return -EINVAL;
1385 }
1386
1387 down_read(&qm->qps_lock);
1388
1389 if (qm->eqe && !strcmp(name, "EQE")) {
1390 xeqe = qm->eqe + xeqe_id;
1391 } else if (qm->aeqe && !strcmp(name, "AEQE")) {
1392 xeqe = qm->aeqe + xeqe_id;
1393 } else {
1394 ret = -EINVAL;
1395 goto err_unlock;
1396 }
1397
1398 ret = dump_show(qm, xeqe, size, name);
1399 if (ret)
1400 dev_info(dev, "Show %s failed!\n", name);
1401
1402err_unlock:
1403 up_read(&qm->qps_lock);
1404 return ret;
1405}
1406
1407static int qm_dbg_help(struct hisi_qm *qm, char *s)
1408{
1409 struct device *dev = &qm->pdev->dev;
1410
1411 if (strsep(&s, " ")) {
1412 dev_err(dev, "Please do not input extra characters!\n");
1413 return -EINVAL;
1414 }
1415
1416 dev_info(dev, "available commands:\n");
1417 dev_info(dev, "sqc <num>\n");
1418 dev_info(dev, "cqc <num>\n");
1419 dev_info(dev, "eqc\n");
1420 dev_info(dev, "aeqc\n");
1421 dev_info(dev, "sq <num> <e>\n");
1422 dev_info(dev, "cq <num> <e>\n");
1423 dev_info(dev, "eq <e>\n");
1424 dev_info(dev, "aeq <e>\n");
1425
1426 return 0;
1427}
1428
1429static int qm_cmd_write_dump(struct hisi_qm *qm, const char *cmd_buf)
1430{
1431 struct device *dev = &qm->pdev->dev;
1432 char *presult, *s;
1433 int ret;
1434
1435 s = kstrdup(cmd_buf, GFP_KERNEL);
1436 if (!s)
1437 return -ENOMEM;
1438
1439 presult = strsep(&s, " ");
1440 if (!presult) {
1441 kfree(s);
1442 return -EINVAL;
1443 }
1444
1445 if (!strcmp(presult, "sqc"))
1446 ret = qm_sqc_dump(qm, s);
1447 else if (!strcmp(presult, "cqc"))
1448 ret = qm_cqc_dump(qm, s);
1449 else if (!strcmp(presult, "eqc"))
1450 ret = qm_eqc_aeqc_dump(qm, s, sizeof(struct qm_eqc),
1451 QM_MB_CMD_EQC, "EQC");
1452 else if (!strcmp(presult, "aeqc"))
1453 ret = qm_eqc_aeqc_dump(qm, s, sizeof(struct qm_aeqc),
1454 QM_MB_CMD_AEQC, "AEQC");
1455 else if (!strcmp(presult, "sq"))
1456 ret = qm_sq_dump(qm, s);
1457 else if (!strcmp(presult, "cq"))
1458 ret = qm_cq_dump(qm, s);
1459 else if (!strcmp(presult, "eq"))
1460 ret = qm_eq_aeq_dump(qm, s, sizeof(struct qm_eqe), "EQE");
1461 else if (!strcmp(presult, "aeq"))
1462 ret = qm_eq_aeq_dump(qm, s, sizeof(struct qm_aeqe), "AEQE");
1463 else if (!strcmp(presult, "help"))
1464 ret = qm_dbg_help(qm, s);
1465 else
1466 ret = -EINVAL;
1467
1468 if (ret)
1469 dev_info(dev, "Please echo help\n");
1470
1471 kfree(s);
1472
1473 return ret;
1474}
1475
1476static ssize_t qm_cmd_write(struct file *filp, const char __user *buffer,
1477 size_t count, loff_t *pos)
1478{
1479 struct hisi_qm *qm = filp->private_data;
1480 char *cmd_buf, *cmd_buf_tmp;
1481 int ret;
1482
1483 if (*pos)
1484 return 0;
1485
1486 /* Judge if the instance is being reset. */
1487 if (unlikely(atomic_read(&qm->status.flags) == QM_STOP))
1488 return 0;
1489
1490 if (count > QM_DBG_WRITE_LEN)
1491 return -ENOSPC;
1492
1493 cmd_buf = kzalloc(count + 1, GFP_KERNEL);
1494 if (!cmd_buf)
1495 return -ENOMEM;
1496
1497 if (copy_from_user(cmd_buf, buffer, count)) {
1498 kfree(cmd_buf);
1499 return -EFAULT;
1500 }
1501
1502 cmd_buf[count] = '\0';
1503
1504 cmd_buf_tmp = strchr(cmd_buf, '\n');
1505 if (cmd_buf_tmp) {
1506 *cmd_buf_tmp = '\0';
1507 count = cmd_buf_tmp - cmd_buf + 1;
1508 }
1509
1510 ret = qm_cmd_write_dump(qm, cmd_buf);
1511 if (ret) {
1512 kfree(cmd_buf);
1513 return ret;
1514 }
1515
1516 kfree(cmd_buf);
1517
1518 return count;
1519}
1520
1521static const struct file_operations qm_cmd_fops = {
1522 .owner = THIS_MODULE,
1523 .open = simple_open,
1524 .read = qm_cmd_read,
1525 .write = qm_cmd_write,
1526};
1527
72c7a68d
ZW
1528static int qm_create_debugfs_file(struct hisi_qm *qm, enum qm_debug_file index)
1529{
4a97bfc7 1530 struct dentry *qm_d = qm->debug.qm_d;
72c7a68d
ZW
1531 struct debugfs_file *file = qm->debug.files + index;
1532
4a97bfc7
GKH
1533 debugfs_create_file(qm_debug_file_name[index], 0600, qm_d, file,
1534 &qm_debug_fops);
72c7a68d
ZW
1535
1536 file->index = index;
1537 mutex_init(&file->lock);
1538 file->debug = &qm->debug;
1539
1540 return 0;
1541}
1542
3176637a 1543static void qm_hw_error_init_v1(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe)
263c9959 1544{
263c9959
ZW
1545 writel(QM_ABNORMAL_INT_MASK_VALUE, qm->io_base + QM_ABNORMAL_INT_MASK);
1546}
1547
3176637a 1548static void qm_hw_error_init_v2(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe)
263c9959 1549{
3176637a 1550 u32 irq_enable = ce | nfe | fe;
263c9959
ZW
1551 u32 irq_unmask = ~irq_enable;
1552
1553 qm->error_mask = ce | nfe | fe;
263c9959 1554
6c6dd580 1555 /* clear QM hw residual error source */
3176637a
ST
1556 writel(QM_ABNORMAL_INT_SOURCE_CLR,
1557 qm->io_base + QM_ABNORMAL_INT_SOURCE);
6c6dd580 1558
263c9959
ZW
1559 /* configure error type */
1560 writel(ce, qm->io_base + QM_RAS_CE_ENABLE);
1561 writel(QM_RAS_CE_TIMES_PER_IRQ, qm->io_base + QM_RAS_CE_THRESHOLD);
1562 writel(nfe, qm->io_base + QM_RAS_NFE_ENABLE);
1563 writel(fe, qm->io_base + QM_RAS_FE_ENABLE);
1564
263c9959
ZW
1565 irq_unmask &= readl(qm->io_base + QM_ABNORMAL_INT_MASK);
1566 writel(irq_unmask, qm->io_base + QM_ABNORMAL_INT_MASK);
1567}
1568
eaebf4c3
ST
1569static void qm_hw_error_uninit_v2(struct hisi_qm *qm)
1570{
1571 writel(QM_ABNORMAL_INT_MASK_VALUE, qm->io_base + QM_ABNORMAL_INT_MASK);
1572}
1573
263c9959
ZW
1574static void qm_log_hw_error(struct hisi_qm *qm, u32 error_status)
1575{
00e62e86 1576 const struct hisi_qm_hw_error *err;
263c9959
ZW
1577 struct device *dev = &qm->pdev->dev;
1578 u32 reg_val, type, vf_num;
00e62e86
ST
1579 int i;
1580
1581 for (i = 0; i < ARRAY_SIZE(qm_hw_error); i++) {
1582 err = &qm_hw_error[i];
1583 if (!(err->int_msk & error_status))
1584 continue;
1585
1586 dev_err(dev, "%s [error status=0x%x] found\n",
1587 err->msg, err->int_msk);
1588
1589 if (err->int_msk & QM_DB_TIMEOUT) {
1590 reg_val = readl(qm->io_base + QM_ABNORMAL_INF01);
1591 type = (reg_val & QM_DB_TIMEOUT_TYPE) >>
1592 QM_DB_TIMEOUT_TYPE_SHIFT;
1593 vf_num = reg_val & QM_DB_TIMEOUT_VF;
1594 dev_err(dev, "qm %s doorbell timeout in function %u\n",
1595 qm_db_timeout[type], vf_num);
1596 } else if (err->int_msk & QM_OF_FIFO_OF) {
1597 reg_val = readl(qm->io_base + QM_ABNORMAL_INF00);
1598 type = (reg_val & QM_FIFO_OVERFLOW_TYPE) >>
1599 QM_FIFO_OVERFLOW_TYPE_SHIFT;
1600 vf_num = reg_val & QM_FIFO_OVERFLOW_VF;
1601
1602 if (type < ARRAY_SIZE(qm_fifo_overflow))
1603 dev_err(dev, "qm %s fifo overflow in function %u\n",
1604 qm_fifo_overflow[type], vf_num);
1605 else
1606 dev_err(dev, "unknown error type\n");
263c9959 1607 }
263c9959
ZW
1608 }
1609}
1610
dbdc1ec3 1611static enum acc_err_result qm_hw_error_handle_v2(struct hisi_qm *qm)
263c9959
ZW
1612{
1613 u32 error_status, tmp;
1614
1615 /* read err sts */
1616 tmp = readl(qm->io_base + QM_ABNORMAL_INT_STATUS);
1617 error_status = qm->error_mask & tmp;
1618
1619 if (error_status) {
6c6dd580
ST
1620 if (error_status & QM_ECC_MBIT)
1621 qm->err_status.is_qm_ecc_mbit = true;
1622
263c9959 1623 qm_log_hw_error(qm, error_status);
3176637a
ST
1624 if (error_status == QM_DB_RANDOM_INVALID) {
1625 writel(error_status, qm->io_base +
1626 QM_ABNORMAL_INT_SOURCE);
dbdc1ec3 1627 return ACC_ERR_RECOVERED;
3176637a 1628 }
263c9959 1629
dbdc1ec3 1630 return ACC_ERR_NEED_RESET;
263c9959
ZW
1631 }
1632
dbdc1ec3 1633 return ACC_ERR_RECOVERED;
263c9959
ZW
1634}
1635
1636static const struct hisi_qm_hw_ops qm_hw_ops_v1 = {
1637 .qm_db = qm_db_v1,
1638 .get_irq_num = qm_get_irq_num_v1,
1639 .hw_error_init = qm_hw_error_init_v1,
1640};
1641
1642static const struct hisi_qm_hw_ops qm_hw_ops_v2 = {
79e09f30 1643 .get_vft = qm_get_vft_v2,
263c9959
ZW
1644 .qm_db = qm_db_v2,
1645 .get_irq_num = qm_get_irq_num_v2,
1646 .hw_error_init = qm_hw_error_init_v2,
eaebf4c3 1647 .hw_error_uninit = qm_hw_error_uninit_v2,
263c9959
ZW
1648 .hw_error_handle = qm_hw_error_handle_v2,
1649};
1650
1651static void *qm_get_avail_sqe(struct hisi_qp *qp)
1652{
1653 struct hisi_qp_status *qp_status = &qp->qp_status;
1654 u16 sq_tail = qp_status->sq_tail;
1655
1656 if (unlikely(atomic_read(&qp->qp_status.used) == QM_Q_DEPTH))
1657 return NULL;
1658
1659 return qp->sqe + sq_tail * qp->qm->sqe_size;
1660}
1661
b67202e8 1662static struct hisi_qp *qm_create_qp_nolock(struct hisi_qm *qm, u8 alg_type)
263c9959
ZW
1663{
1664 struct device *dev = &qm->pdev->dev;
1665 struct hisi_qp *qp;
5308f660 1666 int qp_id;
263c9959 1667
b67202e8
ZW
1668 if (!qm_qp_avail_state(qm, NULL, QP_INIT))
1669 return ERR_PTR(-EPERM);
1670
5308f660
WQ
1671 if (qm->qp_in_used == qm->qp_num) {
1672 dev_info_ratelimited(dev, "All %u queues of QM are busy!\n",
1673 qm->qp_num);
85026525 1674 atomic64_inc(&qm->debug.dfx.create_qp_err_cnt);
5308f660 1675 return ERR_PTR(-EBUSY);
263c9959 1676 }
263c9959 1677
5308f660
WQ
1678 qp_id = idr_alloc_cyclic(&qm->qp_idr, NULL, 0, qm->qp_num, GFP_ATOMIC);
1679 if (qp_id < 0) {
1680 dev_info_ratelimited(dev, "All %u queues of QM are busy!\n",
1681 qm->qp_num);
85026525 1682 atomic64_inc(&qm->debug.dfx.create_qp_err_cnt);
5308f660 1683 return ERR_PTR(-EBUSY);
263c9959
ZW
1684 }
1685
5308f660
WQ
1686 qp = &qm->qp_array[qp_id];
1687
1688 memset(qp->cqe, 0, sizeof(struct qm_cqe) * QM_Q_DEPTH);
b977e030 1689
5308f660
WQ
1690 qp->event_cb = NULL;
1691 qp->req_cb = NULL;
263c9959
ZW
1692 qp->qp_id = qp_id;
1693 qp->alg_type = alg_type;
5308f660 1694 qm->qp_in_used++;
b67202e8 1695 atomic_set(&qp->qp_status.flags, QP_INIT);
263c9959
ZW
1696
1697 return qp;
263c9959 1698}
b67202e8
ZW
1699
1700/**
1701 * hisi_qm_create_qp() - Create a queue pair from qm.
1702 * @qm: The qm we create a qp from.
1703 * @alg_type: Accelerator specific algorithm type in sqc.
1704 *
1705 * return created qp, -EBUSY if all qps in qm allocated, -ENOMEM if allocating
1706 * qp memory fails.
1707 */
1708struct hisi_qp *hisi_qm_create_qp(struct hisi_qm *qm, u8 alg_type)
1709{
1710 struct hisi_qp *qp;
1711
1712 down_write(&qm->qps_lock);
1713 qp = qm_create_qp_nolock(qm, alg_type);
1714 up_write(&qm->qps_lock);
1715
1716 return qp;
1717}
263c9959
ZW
1718EXPORT_SYMBOL_GPL(hisi_qm_create_qp);
1719
1720/**
1721 * hisi_qm_release_qp() - Release a qp back to its qm.
1722 * @qp: The qp we want to release.
1723 *
1724 * This function releases the resource of a qp.
1725 */
1726void hisi_qm_release_qp(struct hisi_qp *qp)
1727{
1728 struct hisi_qm *qm = qp->qm;
263c9959 1729
b67202e8
ZW
1730 down_write(&qm->qps_lock);
1731
1732 if (!qm_qp_avail_state(qm, qp, QP_CLOSE)) {
1733 up_write(&qm->qps_lock);
1734 return;
1735 }
1736
700f7d0d 1737 qm->qp_in_used--;
5308f660 1738 idr_remove(&qm->qp_idr, qp->qp_id);
b67202e8
ZW
1739
1740 up_write(&qm->qps_lock);
263c9959
ZW
1741}
1742EXPORT_SYMBOL_GPL(hisi_qm_release_qp);
1743
1744static int qm_qp_ctx_cfg(struct hisi_qp *qp, int qp_id, int pasid)
1745{
1746 struct hisi_qm *qm = qp->qm;
1747 struct device *dev = &qm->pdev->dev;
1748 enum qm_hw_ver ver = qm->ver;
1749 struct qm_sqc *sqc;
1750 struct qm_cqc *cqc;
1751 dma_addr_t sqc_dma;
1752 dma_addr_t cqc_dma;
1753 int ret;
1754
1755 qm_init_qp_status(qp);
1756
1757 sqc = kzalloc(sizeof(struct qm_sqc), GFP_KERNEL);
1758 if (!sqc)
1759 return -ENOMEM;
1760 sqc_dma = dma_map_single(dev, sqc, sizeof(struct qm_sqc),
1761 DMA_TO_DEVICE);
1762 if (dma_mapping_error(dev, sqc_dma)) {
1763 kfree(sqc);
1764 return -ENOMEM;
1765 }
1766
1767 INIT_QC_COMMON(sqc, qp->sqe_dma, pasid);
1768 if (ver == QM_HW_V1) {
9a8641a7
ST
1769 sqc->dw3 = cpu_to_le32(QM_MK_SQC_DW3_V1(0, 0, 0, qm->sqe_size));
1770 sqc->w8 = cpu_to_le16(QM_Q_DEPTH - 1);
58ca0060 1771 } else {
9a8641a7 1772 sqc->dw3 = cpu_to_le32(QM_MK_SQC_DW3_V2(qm->sqe_size));
263c9959
ZW
1773 sqc->w8 = 0; /* rand_qc */
1774 }
9a8641a7
ST
1775 sqc->cq_num = cpu_to_le16(qp_id);
1776 sqc->w13 = cpu_to_le16(QM_MK_SQC_W13(0, 1, qp->alg_type));
263c9959
ZW
1777
1778 ret = qm_mb(qm, QM_MB_CMD_SQC, sqc_dma, qp_id, 0);
1779 dma_unmap_single(dev, sqc_dma, sizeof(struct qm_sqc), DMA_TO_DEVICE);
1780 kfree(sqc);
1781 if (ret)
1782 return ret;
1783
1784 cqc = kzalloc(sizeof(struct qm_cqc), GFP_KERNEL);
1785 if (!cqc)
1786 return -ENOMEM;
1787 cqc_dma = dma_map_single(dev, cqc, sizeof(struct qm_cqc),
1788 DMA_TO_DEVICE);
1789 if (dma_mapping_error(dev, cqc_dma)) {
1790 kfree(cqc);
1791 return -ENOMEM;
1792 }
1793
1794 INIT_QC_COMMON(cqc, qp->cqe_dma, pasid);
1795 if (ver == QM_HW_V1) {
9a8641a7
ST
1796 cqc->dw3 = cpu_to_le32(QM_MK_CQC_DW3_V1(0, 0, 0, 4));
1797 cqc->w8 = cpu_to_le16(QM_Q_DEPTH - 1);
58ca0060 1798 } else {
9a8641a7 1799 cqc->dw3 = cpu_to_le32(QM_MK_CQC_DW3_V2(4));
263c9959
ZW
1800 cqc->w8 = 0;
1801 }
9a8641a7 1802 cqc->dw6 = cpu_to_le32(1 << QM_CQ_PHASE_SHIFT | 1 << QM_CQ_FLAG_SHIFT);
263c9959
ZW
1803
1804 ret = qm_mb(qm, QM_MB_CMD_CQC, cqc_dma, qp_id, 0);
1805 dma_unmap_single(dev, cqc_dma, sizeof(struct qm_cqc), DMA_TO_DEVICE);
1806 kfree(cqc);
1807
1808 return ret;
1809}
1810
b67202e8 1811static int qm_start_qp_nolock(struct hisi_qp *qp, unsigned long arg)
263c9959
ZW
1812{
1813 struct hisi_qm *qm = qp->qm;
1814 struct device *dev = &qm->pdev->dev;
263c9959
ZW
1815 int qp_id = qp->qp_id;
1816 int pasid = arg;
263c9959
ZW
1817 int ret;
1818
b67202e8
ZW
1819 if (!qm_qp_avail_state(qm, qp, QP_START))
1820 return -EPERM;
1821
263c9959
ZW
1822 ret = qm_qp_ctx_cfg(qp, qp_id, pasid);
1823 if (ret)
1824 return ret;
1825
b67202e8 1826 atomic_set(&qp->qp_status.flags, QP_START);
263c9959
ZW
1827 dev_dbg(dev, "queue %d started\n", qp_id);
1828
9e00df71 1829 return 0;
263c9959 1830}
b67202e8
ZW
1831
1832/**
1833 * hisi_qm_start_qp() - Start a qp into running.
1834 * @qp: The qp we want to start to run.
1835 * @arg: Accelerator specific argument.
1836 *
1837 * After this function, qp can receive request from user. Return 0 if
1838 * successful, Return -EBUSY if failed.
1839 */
1840int hisi_qm_start_qp(struct hisi_qp *qp, unsigned long arg)
1841{
1842 struct hisi_qm *qm = qp->qm;
1843 int ret;
1844
1845 down_write(&qm->qps_lock);
1846 ret = qm_start_qp_nolock(qp, arg);
1847 up_write(&qm->qps_lock);
1848
1849 return ret;
1850}
263c9959
ZW
1851EXPORT_SYMBOL_GPL(hisi_qm_start_qp);
1852
f037fc5f
YS
1853/**
1854 * Determine whether the queue is cleared by judging the tail pointers of
1855 * sq and cq.
1856 */
1857static int qm_drain_qp(struct hisi_qp *qp)
1858{
1859 size_t size = sizeof(struct qm_sqc) + sizeof(struct qm_cqc);
1860 struct hisi_qm *qm = qp->qm;
1861 struct device *dev = &qm->pdev->dev;
1862 struct qm_sqc *sqc;
1863 struct qm_cqc *cqc;
1864 dma_addr_t dma_addr;
1865 int ret = 0, i = 0;
1866 void *addr;
1867
1868 /*
1869 * No need to judge if ECC multi-bit error occurs because the
1870 * master OOO will be blocked.
1871 */
1872 if (qm->err_status.is_qm_ecc_mbit || qm->err_status.is_dev_ecc_mbit)
1873 return 0;
1874
1875 addr = qm_ctx_alloc(qm, size, &dma_addr);
1876 if (IS_ERR(addr)) {
1877 dev_err(dev, "Failed to alloc ctx for sqc and cqc!\n");
1878 return -ENOMEM;
1879 }
1880
1881 while (++i) {
1882 ret = qm_dump_sqc_raw(qm, dma_addr, qp->qp_id);
1883 if (ret) {
1884 dev_err_ratelimited(dev, "Failed to dump sqc!\n");
1885 break;
1886 }
1887 sqc = addr;
1888
1889 ret = qm_dump_cqc_raw(qm, (dma_addr + sizeof(struct qm_sqc)),
1890 qp->qp_id);
1891 if (ret) {
1892 dev_err_ratelimited(dev, "Failed to dump cqc!\n");
1893 break;
1894 }
1895 cqc = addr + sizeof(struct qm_sqc);
1896
1897 if ((sqc->tail == cqc->tail) &&
1898 (QM_SQ_TAIL_IDX(sqc) == QM_CQ_TAIL_IDX(cqc)))
1899 break;
1900
1901 if (i == MAX_WAIT_COUNTS) {
1902 dev_err(dev, "Fail to empty queue %u!\n", qp->qp_id);
1903 ret = -EBUSY;
1904 break;
1905 }
1906
1907 usleep_range(WAIT_PERIOD_US_MIN, WAIT_PERIOD_US_MAX);
1908 }
1909
1910 qm_ctx_free(qm, size, addr, &dma_addr);
1911
1912 return ret;
1913}
1914
b67202e8 1915static int qm_stop_qp_nolock(struct hisi_qp *qp)
263c9959
ZW
1916{
1917 struct device *dev = &qp->qm->pdev->dev;
f037fc5f 1918 int ret;
263c9959 1919
b67202e8
ZW
1920 /*
1921 * It is allowed to stop and release qp when reset, If the qp is
1922 * stopped when reset but still want to be released then, the
1923 * is_resetting flag should be set negative so that this qp will not
1924 * be restarted after reset.
1925 */
1926 if (atomic_read(&qp->qp_status.flags) == QP_STOP) {
1927 qp->is_resetting = false;
263c9959 1928 return 0;
b67202e8
ZW
1929 }
1930
1931 if (!qm_qp_avail_state(qp->qm, qp, QP_STOP))
1932 return -EPERM;
1933
1934 atomic_set(&qp->qp_status.flags, QP_STOP);
263c9959 1935
f037fc5f
YS
1936 ret = qm_drain_qp(qp);
1937 if (ret)
1938 dev_err(dev, "Failed to drain out data for stopping!\n");
1939
1940 if (qp->qm->wq)
1941 flush_workqueue(qp->qm->wq);
1942 else
1943 flush_work(&qp->qm->work);
263c9959 1944
263c9959
ZW
1945 dev_dbg(dev, "stop queue %u!", qp->qp_id);
1946
1947 return 0;
1948}
b67202e8
ZW
1949
1950/**
1951 * hisi_qm_stop_qp() - Stop a qp in qm.
1952 * @qp: The qp we want to stop.
1953 *
1954 * This function is reverse of hisi_qm_start_qp. Return 0 if successful.
1955 */
1956int hisi_qm_stop_qp(struct hisi_qp *qp)
1957{
1958 int ret;
1959
1960 down_write(&qp->qm->qps_lock);
1961 ret = qm_stop_qp_nolock(qp);
1962 up_write(&qp->qm->qps_lock);
1963
1964 return ret;
1965}
263c9959
ZW
1966EXPORT_SYMBOL_GPL(hisi_qm_stop_qp);
1967
1968/**
1969 * hisi_qp_send() - Queue up a task in the hardware queue.
1970 * @qp: The qp in which to put the message.
1971 * @msg: The message.
1972 *
1973 * This function will return -EBUSY if qp is currently full, and -EAGAIN
1974 * if qp related qm is resetting.
b67202e8
ZW
1975 *
1976 * Note: This function may run with qm_irq_thread and ACC reset at same time.
1977 * It has no race with qm_irq_thread. However, during hisi_qp_send, ACC
1978 * reset may happen, we have no lock here considering performance. This
1979 * causes current qm_db sending fail or can not receive sended sqe. QM
1980 * sync/async receive function should handle the error sqe. ACC reset
1981 * done function should clear used sqe to 0.
263c9959
ZW
1982 */
1983int hisi_qp_send(struct hisi_qp *qp, const void *msg)
1984{
1985 struct hisi_qp_status *qp_status = &qp->qp_status;
1986 u16 sq_tail = qp_status->sq_tail;
1987 u16 sq_tail_next = (sq_tail + 1) % QM_Q_DEPTH;
1988 void *sqe = qm_get_avail_sqe(qp);
1989
b67202e8
ZW
1990 if (unlikely(atomic_read(&qp->qp_status.flags) == QP_STOP ||
1991 atomic_read(&qp->qm->status.flags) == QM_STOP ||
1992 qp->is_resetting)) {
263c9959
ZW
1993 dev_info(&qp->qm->pdev->dev, "QP is stopped or resetting\n");
1994 return -EAGAIN;
1995 }
1996
1997 if (!sqe)
1998 return -EBUSY;
1999
2000 memcpy(sqe, msg, qp->qm->sqe_size);
2001
2002 qm_db(qp->qm, qp->qp_id, QM_DOORBELL_CMD_SQ, sq_tail_next, 0);
2003 atomic_inc(&qp->qp_status.used);
2004 qp_status->sq_tail = sq_tail_next;
2005
2006 return 0;
2007}
2008EXPORT_SYMBOL_GPL(hisi_qp_send);
2009
2010static void hisi_qm_cache_wb(struct hisi_qm *qm)
2011{
2012 unsigned int val;
2013
58ca0060
WQ
2014 if (qm->ver == QM_HW_V1)
2015 return;
2016
2017 writel(0x1, qm->io_base + QM_CACHE_WB_START);
2018 if (readl_relaxed_poll_timeout(qm->io_base + QM_CACHE_WB_DONE,
2019 val, val & BIT(0), 10, 1000))
2020 dev_err(&qm->pdev->dev, "QM writeback sqc cache fail!\n");
263c9959
ZW
2021}
2022
9e00df71
ZG
2023static void qm_qp_event_notifier(struct hisi_qp *qp)
2024{
2025 wake_up_interruptible(&qp->uacce_q->wait);
2026}
2027
2028static int hisi_qm_get_available_instances(struct uacce_device *uacce)
2029{
5308f660 2030 return hisi_qm_get_free_qp_num(uacce->priv);
9e00df71
ZG
2031}
2032
2033static int hisi_qm_uacce_get_queue(struct uacce_device *uacce,
2034 unsigned long arg,
2035 struct uacce_queue *q)
2036{
2037 struct hisi_qm *qm = uacce->priv;
2038 struct hisi_qp *qp;
2039 u8 alg_type = 0;
2040
2041 qp = hisi_qm_create_qp(qm, alg_type);
2042 if (IS_ERR(qp))
2043 return PTR_ERR(qp);
2044
2045 q->priv = qp;
2046 q->uacce = uacce;
2047 qp->uacce_q = q;
2048 qp->event_cb = qm_qp_event_notifier;
2049 qp->pasid = arg;
2050
2051 return 0;
2052}
2053
2054static void hisi_qm_uacce_put_queue(struct uacce_queue *q)
2055{
2056 struct hisi_qp *qp = q->priv;
2057
2058 hisi_qm_cache_wb(qp->qm);
2059 hisi_qm_release_qp(qp);
2060}
2061
2062/* map sq/cq/doorbell to user space */
2063static int hisi_qm_uacce_mmap(struct uacce_queue *q,
2064 struct vm_area_struct *vma,
2065 struct uacce_qfile_region *qfr)
2066{
2067 struct hisi_qp *qp = q->priv;
2068 struct hisi_qm *qm = qp->qm;
2069 size_t sz = vma->vm_end - vma->vm_start;
2070 struct pci_dev *pdev = qm->pdev;
2071 struct device *dev = &pdev->dev;
2072 unsigned long vm_pgoff;
2073 int ret;
2074
2075 switch (qfr->type) {
2076 case UACCE_QFRT_MMIO:
58ca0060
WQ
2077 if (qm->ver == QM_HW_V1) {
2078 if (sz > PAGE_SIZE * QM_DOORBELL_PAGE_NR)
9e00df71
ZG
2079 return -EINVAL;
2080 } else {
58ca0060
WQ
2081 if (sz > PAGE_SIZE * (QM_DOORBELL_PAGE_NR +
2082 QM_DOORBELL_SQ_CQ_BASE_V2 / PAGE_SIZE))
9e00df71
ZG
2083 return -EINVAL;
2084 }
2085
2086 vma->vm_flags |= VM_IO;
2087
2088 return remap_pfn_range(vma, vma->vm_start,
2089 qm->phys_base >> PAGE_SHIFT,
2090 sz, pgprot_noncached(vma->vm_page_prot));
2091 case UACCE_QFRT_DUS:
2092 if (sz != qp->qdma.size)
2093 return -EINVAL;
2094
2095 /*
2096 * dma_mmap_coherent() requires vm_pgoff as 0
2097 * restore vm_pfoff to initial value for mmap()
2098 */
2099 vm_pgoff = vma->vm_pgoff;
2100 vma->vm_pgoff = 0;
2101 ret = dma_mmap_coherent(dev, vma, qp->qdma.va,
2102 qp->qdma.dma, sz);
2103 vma->vm_pgoff = vm_pgoff;
2104 return ret;
2105
2106 default:
2107 return -EINVAL;
2108 }
2109}
2110
2111static int hisi_qm_uacce_start_queue(struct uacce_queue *q)
2112{
2113 struct hisi_qp *qp = q->priv;
2114
2115 return hisi_qm_start_qp(qp, qp->pasid);
2116}
2117
2118static void hisi_qm_uacce_stop_queue(struct uacce_queue *q)
2119{
2120 hisi_qm_stop_qp(q->priv);
2121}
2122
2123static int qm_set_sqctype(struct uacce_queue *q, u16 type)
2124{
2125 struct hisi_qm *qm = q->uacce->priv;
2126 struct hisi_qp *qp = q->priv;
2127
b67202e8 2128 down_write(&qm->qps_lock);
9e00df71 2129 qp->alg_type = type;
b67202e8 2130 up_write(&qm->qps_lock);
9e00df71
ZG
2131
2132 return 0;
2133}
2134
2135static long hisi_qm_uacce_ioctl(struct uacce_queue *q, unsigned int cmd,
2136 unsigned long arg)
2137{
2138 struct hisi_qp *qp = q->priv;
2139 struct hisi_qp_ctx qp_ctx;
2140
2141 if (cmd == UACCE_CMD_QM_SET_QP_CTX) {
2142 if (copy_from_user(&qp_ctx, (void __user *)arg,
2143 sizeof(struct hisi_qp_ctx)))
2144 return -EFAULT;
2145
2146 if (qp_ctx.qc_type != 0 && qp_ctx.qc_type != 1)
2147 return -EINVAL;
2148
2149 qm_set_sqctype(q, qp_ctx.qc_type);
2150 qp_ctx.id = qp->qp_id;
2151
2152 if (copy_to_user((void __user *)arg, &qp_ctx,
2153 sizeof(struct hisi_qp_ctx)))
2154 return -EFAULT;
2155 } else {
2156 return -EINVAL;
2157 }
2158
2159 return 0;
2160}
2161
2162static const struct uacce_ops uacce_qm_ops = {
2163 .get_available_instances = hisi_qm_get_available_instances,
2164 .get_queue = hisi_qm_uacce_get_queue,
2165 .put_queue = hisi_qm_uacce_put_queue,
2166 .start_queue = hisi_qm_uacce_start_queue,
2167 .stop_queue = hisi_qm_uacce_stop_queue,
2168 .mmap = hisi_qm_uacce_mmap,
2169 .ioctl = hisi_qm_uacce_ioctl,
2170};
2171
2172static int qm_alloc_uacce(struct hisi_qm *qm)
2173{
2174 struct pci_dev *pdev = qm->pdev;
2175 struct uacce_device *uacce;
2176 unsigned long mmio_page_nr;
2177 unsigned long dus_page_nr;
2178 struct uacce_interface interface = {
2179 .flags = UACCE_DEV_SVA,
2180 .ops = &uacce_qm_ops,
2181 };
2182
2183 strncpy(interface.name, pdev->driver->name, sizeof(interface.name));
2184
2185 uacce = uacce_alloc(&pdev->dev, &interface);
2186 if (IS_ERR(uacce))
2187 return PTR_ERR(uacce);
2188
2189 if (uacce->flags & UACCE_DEV_SVA) {
2190 qm->use_sva = true;
2191 } else {
2192 /* only consider sva case */
2193 uacce_remove(uacce);
2194 qm->uacce = NULL;
2195 return -EINVAL;
2196 }
2197
2198 uacce->is_vf = pdev->is_virtfn;
2199 uacce->priv = qm;
2200 uacce->algs = qm->algs;
2201
2202 if (qm->ver == QM_HW_V1) {
2203 mmio_page_nr = QM_DOORBELL_PAGE_NR;
2204 uacce->api_ver = HISI_QM_API_VER_BASE;
2205 } else {
2206 mmio_page_nr = QM_DOORBELL_PAGE_NR +
2207 QM_DOORBELL_SQ_CQ_BASE_V2 / PAGE_SIZE;
2208 uacce->api_ver = HISI_QM_API_VER2_BASE;
2209 }
2210
2211 dus_page_nr = (PAGE_SIZE - 1 + qm->sqe_size * QM_Q_DEPTH +
2212 sizeof(struct qm_cqe) * QM_Q_DEPTH) >> PAGE_SHIFT;
2213
2214 uacce->qf_pg_num[UACCE_QFRT_MMIO] = mmio_page_nr;
2215 uacce->qf_pg_num[UACCE_QFRT_DUS] = dus_page_nr;
2216
2217 qm->uacce = uacce;
2218
2219 return 0;
2220}
2221
700f7d0d
ZW
2222/**
2223 * hisi_qm_get_free_qp_num() - Get free number of qp in qm.
2224 * @qm: The qm which want to get free qp.
2225 *
2226 * This function return free number of qp in qm.
2227 */
2228int hisi_qm_get_free_qp_num(struct hisi_qm *qm)
2229{
2230 int ret;
2231
b67202e8 2232 down_read(&qm->qps_lock);
700f7d0d 2233 ret = qm->qp_num - qm->qp_in_used;
b67202e8 2234 up_read(&qm->qps_lock);
700f7d0d
ZW
2235
2236 return ret;
2237}
2238EXPORT_SYMBOL_GPL(hisi_qm_get_free_qp_num);
2239
5308f660
WQ
2240static void hisi_qp_memory_uninit(struct hisi_qm *qm, int num)
2241{
2242 struct device *dev = &qm->pdev->dev;
2243 struct qm_dma *qdma;
2244 int i;
2245
2246 for (i = num - 1; i >= 0; i--) {
2247 qdma = &qm->qp_array[i].qdma;
2248 dma_free_coherent(dev, qdma->size, qdma->va, qdma->dma);
2249 }
2250
2251 kfree(qm->qp_array);
2252}
2253
2254static int hisi_qp_memory_init(struct hisi_qm *qm, size_t dma_size, int id)
2255{
2256 struct device *dev = &qm->pdev->dev;
2257 size_t off = qm->sqe_size * QM_Q_DEPTH;
2258 struct hisi_qp *qp;
2259
2260 qp = &qm->qp_array[id];
2261 qp->qdma.va = dma_alloc_coherent(dev, dma_size, &qp->qdma.dma,
2262 GFP_KERNEL);
2263 if (!qp->qdma.va)
2264 return -ENOMEM;
2265
2266 qp->sqe = qp->qdma.va;
2267 qp->sqe_dma = qp->qdma.dma;
2268 qp->cqe = qp->qdma.va + off;
2269 qp->cqe_dma = qp->qdma.dma + off;
2270 qp->qdma.size = dma_size;
2271 qp->qm = qm;
2272 qp->qp_id = id;
2273
2274 return 0;
2275}
2276
2277static int hisi_qm_memory_init(struct hisi_qm *qm)
2278{
2279 struct device *dev = &qm->pdev->dev;
2280 size_t qp_dma_size, off = 0;
2281 int i, ret = 0;
2282
2283#define QM_INIT_BUF(qm, type, num) do { \
2284 (qm)->type = ((qm)->qdma.va + (off)); \
2285 (qm)->type##_dma = (qm)->qdma.dma + (off); \
2286 off += QMC_ALIGN(sizeof(struct qm_##type) * (num)); \
2287} while (0)
2288
2289 idr_init(&qm->qp_idr);
2290 qm->qdma.size = QMC_ALIGN(sizeof(struct qm_eqe) * QM_Q_DEPTH) +
2291 QMC_ALIGN(sizeof(struct qm_aeqe) * QM_Q_DEPTH) +
2292 QMC_ALIGN(sizeof(struct qm_sqc) * qm->qp_num) +
2293 QMC_ALIGN(sizeof(struct qm_cqc) * qm->qp_num);
2294 qm->qdma.va = dma_alloc_coherent(dev, qm->qdma.size, &qm->qdma.dma,
2295 GFP_ATOMIC);
2296 dev_dbg(dev, "allocate qm dma buf size=%zx)\n", qm->qdma.size);
2297 if (!qm->qdma.va)
2298 return -ENOMEM;
2299
2300 QM_INIT_BUF(qm, eqe, QM_Q_DEPTH);
2301 QM_INIT_BUF(qm, aeqe, QM_Q_DEPTH);
2302 QM_INIT_BUF(qm, sqc, qm->qp_num);
2303 QM_INIT_BUF(qm, cqc, qm->qp_num);
2304
2305 qm->qp_array = kcalloc(qm->qp_num, sizeof(struct hisi_qp), GFP_KERNEL);
2306 if (!qm->qp_array) {
2307 ret = -ENOMEM;
2308 goto err_alloc_qp_array;
2309 }
2310
2311 /* one more page for device or qp statuses */
2312 qp_dma_size = qm->sqe_size * QM_Q_DEPTH +
2313 sizeof(struct qm_cqe) * QM_Q_DEPTH;
2314 qp_dma_size = PAGE_ALIGN(qp_dma_size);
2315 for (i = 0; i < qm->qp_num; i++) {
2316 ret = hisi_qp_memory_init(qm, qp_dma_size, i);
2317 if (ret)
2318 goto err_init_qp_mem;
2319
2320 dev_dbg(dev, "allocate qp dma buf size=%zx)\n", qp_dma_size);
2321 }
2322
2323 return ret;
2324
2325err_init_qp_mem:
2326 hisi_qp_memory_uninit(qm, i);
2327err_alloc_qp_array:
2328 dma_free_coherent(dev, qm->qdma.size, qm->qdma.va, qm->qdma.dma);
2329
2330 return ret;
2331}
2332
d9701f8d
WQ
2333static void hisi_qm_pre_init(struct hisi_qm *qm)
2334{
2335 struct pci_dev *pdev = qm->pdev;
2336
58ca0060 2337 if (qm->ver == QM_HW_V1)
d9701f8d 2338 qm->ops = &qm_hw_ops_v1;
58ca0060 2339 else
d9701f8d 2340 qm->ops = &qm_hw_ops_v2;
d9701f8d
WQ
2341
2342 pci_set_drvdata(pdev, qm);
2343 mutex_init(&qm->mailbox_lock);
2344 init_rwsem(&qm->qps_lock);
2345 qm->qp_in_used = 0;
2346}
2347
263c9959
ZW
2348/**
2349 * hisi_qm_uninit() - Uninitialize qm.
2350 * @qm: The qm needed uninit.
2351 *
2352 * This function uninits qm related device resources.
2353 */
2354void hisi_qm_uninit(struct hisi_qm *qm)
2355{
2356 struct pci_dev *pdev = qm->pdev;
2357 struct device *dev = &pdev->dev;
2358
b67202e8
ZW
2359 down_write(&qm->qps_lock);
2360
2361 if (!qm_avail_state(qm, QM_CLOSE)) {
2362 up_write(&qm->qps_lock);
2363 return;
2364 }
2365
9e00df71
ZG
2366 uacce_remove(qm->uacce);
2367 qm->uacce = NULL;
2368
5308f660
WQ
2369 hisi_qp_memory_uninit(qm, qm->qp_num);
2370 idr_destroy(&qm->qp_idr);
2371
b977e030 2372 if (qm->qdma.va) {
263c9959
ZW
2373 hisi_qm_cache_wb(qm);
2374 dma_free_coherent(dev, qm->qdma.size,
2375 qm->qdma.va, qm->qdma.dma);
2376 memset(&qm->qdma, 0, sizeof(qm->qdma));
2377 }
2378
2379 qm_irq_unregister(qm);
2380 pci_free_irq_vectors(pdev);
2381 iounmap(qm->io_base);
2382 pci_release_mem_regions(pdev);
2383 pci_disable_device(pdev);
b67202e8
ZW
2384
2385 up_write(&qm->qps_lock);
263c9959
ZW
2386}
2387EXPORT_SYMBOL_GPL(hisi_qm_uninit);
2388
79e09f30
ZW
2389/**
2390 * hisi_qm_get_vft() - Get vft from a qm.
2391 * @qm: The qm we want to get its vft.
2392 * @base: The base number of queue in vft.
2393 * @number: The number of queues in vft.
2394 *
2395 * We can allocate multiple queues to a qm by configuring virtual function
2396 * table. We get related configures by this function. Normally, we call this
2397 * function in VF driver to get the queue information.
2398 *
2399 * qm hw v1 does not support this interface.
2400 */
2401int hisi_qm_get_vft(struct hisi_qm *qm, u32 *base, u32 *number)
2402{
2403 if (!base || !number)
2404 return -EINVAL;
2405
2406 if (!qm->ops->get_vft) {
2407 dev_err(&qm->pdev->dev, "Don't support vft read!\n");
2408 return -EINVAL;
2409 }
2410
2411 return qm->ops->get_vft(qm, base, number);
2412}
2413EXPORT_SYMBOL_GPL(hisi_qm_get_vft);
2414
263c9959 2415/**
263c9959
ZW
2416 * This function is alway called in PF driver, it is used to assign queues
2417 * among PF and VFs.
2418 *
2419 * Assign queues A~B to PF: hisi_qm_set_vft(qm, 0, A, B - A + 1)
2420 * Assign queues A~B to VF: hisi_qm_set_vft(qm, 2, A, B - A + 1)
2421 * (VF function number 0x2)
2422 */
cd1b7ae3 2423static int hisi_qm_set_vft(struct hisi_qm *qm, u32 fun_num, u32 base,
263c9959
ZW
2424 u32 number)
2425{
2426 u32 max_q_num = qm->ctrl_qp_num;
2427
2428 if (base >= max_q_num || number > max_q_num ||
2429 (base + number) > max_q_num)
2430 return -EINVAL;
2431
2432 return qm_set_sqc_cqc_vft(qm, fun_num, base, number);
2433}
263c9959
ZW
2434
2435static void qm_init_eq_aeq_status(struct hisi_qm *qm)
2436{
2437 struct hisi_qm_status *status = &qm->status;
2438
2439 status->eq_head = 0;
2440 status->aeq_head = 0;
9a8641a7
ST
2441 status->eqc_phase = true;
2442 status->aeqc_phase = true;
263c9959
ZW
2443}
2444
2445static int qm_eq_ctx_cfg(struct hisi_qm *qm)
2446{
2447 struct device *dev = &qm->pdev->dev;
2448 struct qm_eqc *eqc;
2449 struct qm_aeqc *aeqc;
2450 dma_addr_t eqc_dma;
2451 dma_addr_t aeqc_dma;
2452 int ret;
2453
2454 qm_init_eq_aeq_status(qm);
2455
2456 eqc = kzalloc(sizeof(struct qm_eqc), GFP_KERNEL);
2457 if (!eqc)
2458 return -ENOMEM;
2459 eqc_dma = dma_map_single(dev, eqc, sizeof(struct qm_eqc),
2460 DMA_TO_DEVICE);
2461 if (dma_mapping_error(dev, eqc_dma)) {
2462 kfree(eqc);
2463 return -ENOMEM;
2464 }
2465
9a8641a7
ST
2466 eqc->base_l = cpu_to_le32(lower_32_bits(qm->eqe_dma));
2467 eqc->base_h = cpu_to_le32(upper_32_bits(qm->eqe_dma));
263c9959 2468 if (qm->ver == QM_HW_V1)
9a8641a7
ST
2469 eqc->dw3 = cpu_to_le32(QM_EQE_AEQE_SIZE);
2470 eqc->dw6 = cpu_to_le32((QM_Q_DEPTH - 1) | (1 << QM_EQC_PHASE_SHIFT));
263c9959
ZW
2471 ret = qm_mb(qm, QM_MB_CMD_EQC, eqc_dma, 0, 0);
2472 dma_unmap_single(dev, eqc_dma, sizeof(struct qm_eqc), DMA_TO_DEVICE);
2473 kfree(eqc);
2474 if (ret)
2475 return ret;
2476
2477 aeqc = kzalloc(sizeof(struct qm_aeqc), GFP_KERNEL);
2478 if (!aeqc)
2479 return -ENOMEM;
2480 aeqc_dma = dma_map_single(dev, aeqc, sizeof(struct qm_aeqc),
2481 DMA_TO_DEVICE);
2482 if (dma_mapping_error(dev, aeqc_dma)) {
2483 kfree(aeqc);
2484 return -ENOMEM;
2485 }
2486
9a8641a7
ST
2487 aeqc->base_l = cpu_to_le32(lower_32_bits(qm->aeqe_dma));
2488 aeqc->base_h = cpu_to_le32(upper_32_bits(qm->aeqe_dma));
2489 aeqc->dw6 = cpu_to_le32((QM_Q_DEPTH - 1) | (1 << QM_EQC_PHASE_SHIFT));
263c9959
ZW
2490
2491 ret = qm_mb(qm, QM_MB_CMD_AEQC, aeqc_dma, 0, 0);
2492 dma_unmap_single(dev, aeqc_dma, sizeof(struct qm_aeqc), DMA_TO_DEVICE);
2493 kfree(aeqc);
2494
2495 return ret;
2496}
2497
2498static int __hisi_qm_start(struct hisi_qm *qm)
2499{
263c9959
ZW
2500 int ret;
2501
263c9959
ZW
2502 WARN_ON(!qm->qdma.dma);
2503
79e09f30
ZW
2504 if (qm->fun_type == QM_HW_PF) {
2505 ret = qm_dev_mem_reset(qm);
2506 if (ret)
2507 return ret;
263c9959 2508
79e09f30
ZW
2509 ret = hisi_qm_set_vft(qm, 0, qm->qp_base, qm->qp_num);
2510 if (ret)
2511 return ret;
2512 }
263c9959 2513
263c9959
ZW
2514 ret = qm_eq_ctx_cfg(qm);
2515 if (ret)
2516 return ret;
2517
2518 ret = qm_mb(qm, QM_MB_CMD_SQC_BT, qm->sqc_dma, 0, 0);
2519 if (ret)
2520 return ret;
2521
2522 ret = qm_mb(qm, QM_MB_CMD_CQC_BT, qm->cqc_dma, 0, 0);
2523 if (ret)
2524 return ret;
2525
2526 writel(0x0, qm->io_base + QM_VF_EQ_INT_MASK);
2527 writel(0x0, qm->io_base + QM_VF_AEQ_INT_MASK);
2528
2529 return 0;
2530}
2531
2532/**
2533 * hisi_qm_start() - start qm
2534 * @qm: The qm to be started.
2535 *
2536 * This function starts a qm, then we can allocate qp from this qm.
2537 */
2538int hisi_qm_start(struct hisi_qm *qm)
2539{
2540 struct device *dev = &qm->pdev->dev;
b67202e8
ZW
2541 int ret = 0;
2542
2543 down_write(&qm->qps_lock);
2544
2545 if (!qm_avail_state(qm, QM_START)) {
2546 up_write(&qm->qps_lock);
2547 return -EPERM;
2548 }
263c9959
ZW
2549
2550 dev_dbg(dev, "qm start with %d queue pairs\n", qm->qp_num);
2551
2552 if (!qm->qp_num) {
2553 dev_err(dev, "qp_num should not be 0\n");
b67202e8
ZW
2554 ret = -EINVAL;
2555 goto err_unlock;
263c9959
ZW
2556 }
2557
b67202e8
ZW
2558 ret = __hisi_qm_start(qm);
2559 if (!ret)
2560 atomic_set(&qm->status.flags, QM_START);
2561
2562err_unlock:
2563 up_write(&qm->qps_lock);
2564 return ret;
263c9959
ZW
2565}
2566EXPORT_SYMBOL_GPL(hisi_qm_start);
2567
6c6dd580
ST
2568static int qm_restart(struct hisi_qm *qm)
2569{
2570 struct device *dev = &qm->pdev->dev;
2571 struct hisi_qp *qp;
2572 int ret, i;
2573
2574 ret = hisi_qm_start(qm);
2575 if (ret < 0)
2576 return ret;
2577
b67202e8 2578 down_write(&qm->qps_lock);
6c6dd580 2579 for (i = 0; i < qm->qp_num; i++) {
5308f660
WQ
2580 qp = &qm->qp_array[i];
2581 if (atomic_read(&qp->qp_status.flags) == QP_STOP &&
b67202e8
ZW
2582 qp->is_resetting == true) {
2583 ret = qm_start_qp_nolock(qp, 0);
6c6dd580
ST
2584 if (ret < 0) {
2585 dev_err(dev, "Failed to start qp%d!\n", i);
2586
b67202e8
ZW
2587 up_write(&qm->qps_lock);
2588 return ret;
2589 }
2590 qp->is_resetting = false;
2591 }
2592 }
2593 up_write(&qm->qps_lock);
2594
2595 return 0;
2596}
2597
2598/* Stop started qps in reset flow */
2599static int qm_stop_started_qp(struct hisi_qm *qm)
2600{
2601 struct device *dev = &qm->pdev->dev;
2602 struct hisi_qp *qp;
2603 int i, ret;
2604
2605 for (i = 0; i < qm->qp_num; i++) {
5308f660 2606 qp = &qm->qp_array[i];
b67202e8
ZW
2607 if (qp && atomic_read(&qp->qp_status.flags) == QP_START) {
2608 qp->is_resetting = true;
2609 ret = qm_stop_qp_nolock(qp);
2610 if (ret < 0) {
2611 dev_err(dev, "Failed to stop qp%d!\n", i);
6c6dd580
ST
2612 return ret;
2613 }
2614 }
2615 }
6c6dd580
ST
2616
2617 return 0;
2618}
2619
2620/**
2621 * This function clears all queues memory in a qm. Reset of accelerator can
2622 * use this to clear queues.
2623 */
2624static void qm_clear_queues(struct hisi_qm *qm)
2625{
2626 struct hisi_qp *qp;
2627 int i;
2628
2629 for (i = 0; i < qm->qp_num; i++) {
5308f660
WQ
2630 qp = &qm->qp_array[i];
2631 if (qp->is_resetting)
6c6dd580
ST
2632 memset(qp->qdma.va, 0, qp->qdma.size);
2633 }
2634
2635 memset(qm->qdma.va, 0, qm->qdma.size);
2636}
2637
263c9959
ZW
2638/**
2639 * hisi_qm_stop() - Stop a qm.
2640 * @qm: The qm which will be stopped.
2641 *
2642 * This function stops qm and its qps, then qm can not accept request.
2643 * Related resources are not released at this state, we can use hisi_qm_start
2644 * to let qm start again.
2645 */
2646int hisi_qm_stop(struct hisi_qm *qm)
2647{
b67202e8
ZW
2648 struct device *dev = &qm->pdev->dev;
2649 int ret = 0;
263c9959 2650
b67202e8
ZW
2651 down_write(&qm->qps_lock);
2652
2653 if (!qm_avail_state(qm, QM_STOP)) {
2654 ret = -EPERM;
2655 goto err_unlock;
263c9959
ZW
2656 }
2657
b67202e8
ZW
2658 if (qm->status.stop_reason == QM_SOFT_RESET ||
2659 qm->status.stop_reason == QM_FLR) {
2660 ret = qm_stop_started_qp(qm);
2661 if (ret < 0) {
2662 dev_err(dev, "Failed to stop started qp!\n");
2663 goto err_unlock;
2664 }
2665 }
263c9959
ZW
2666
2667 /* Mask eq and aeq irq */
2668 writel(0x1, qm->io_base + QM_VF_EQ_INT_MASK);
2669 writel(0x1, qm->io_base + QM_VF_AEQ_INT_MASK);
2670
79e09f30
ZW
2671 if (qm->fun_type == QM_HW_PF) {
2672 ret = hisi_qm_set_vft(qm, 0, 0, 0);
b67202e8 2673 if (ret < 0) {
79e09f30 2674 dev_err(dev, "Failed to set vft!\n");
b67202e8
ZW
2675 ret = -EBUSY;
2676 goto err_unlock;
2677 }
79e09f30 2678 }
263c9959 2679
6c6dd580 2680 qm_clear_queues(qm);
b67202e8 2681 atomic_set(&qm->status.flags, QM_STOP);
6c6dd580 2682
b67202e8
ZW
2683err_unlock:
2684 up_write(&qm->qps_lock);
263c9959
ZW
2685 return ret;
2686}
2687EXPORT_SYMBOL_GPL(hisi_qm_stop);
2688
0a3a3960
LL
2689static ssize_t qm_status_read(struct file *filp, char __user *buffer,
2690 size_t count, loff_t *pos)
2691{
2692 struct hisi_qm *qm = filp->private_data;
2693 char buf[QM_DBG_READ_LEN];
2694 int val, cp_len, len;
2695
2696 if (*pos)
2697 return 0;
2698
2699 if (count < QM_DBG_READ_LEN)
2700 return -ENOSPC;
2701
2702 val = atomic_read(&qm->status.flags);
2703 len = snprintf(buf, QM_DBG_READ_LEN, "%s\n", qm_s[val]);
2704 if (!len)
2705 return -EFAULT;
2706
2707 cp_len = copy_to_user(buffer, buf, len);
2708 if (cp_len)
2709 return -EFAULT;
2710
2711 return (*pos = len);
2712}
2713
2714static const struct file_operations qm_status_fops = {
2715 .owner = THIS_MODULE,
2716 .open = simple_open,
2717 .read = qm_status_read,
2718};
2719
85026525
LL
2720static int qm_debugfs_atomic64_set(void *data, u64 val)
2721{
2722 if (val)
2723 return -EINVAL;
2724
2725 atomic64_set((atomic64_t *)data, 0);
2726
2727 return 0;
2728}
2729
2730static int qm_debugfs_atomic64_get(void *data, u64 *val)
2731{
2732 *val = atomic64_read((atomic64_t *)data);
2733
2734 return 0;
2735}
2736
2737DEFINE_DEBUGFS_ATTRIBUTE(qm_atomic64_ops, qm_debugfs_atomic64_get,
2738 qm_debugfs_atomic64_set, "%llu\n");
2739
72c7a68d
ZW
2740/**
2741 * hisi_qm_debug_init() - Initialize qm related debugfs files.
2742 * @qm: The qm for which we want to add debugfs files.
2743 *
2744 * Create qm related debugfs files.
2745 */
2746int hisi_qm_debug_init(struct hisi_qm *qm)
2747{
85026525 2748 struct qm_dfx *dfx = &qm->debug.dfx;
4a97bfc7 2749 struct dentry *qm_d;
85026525 2750 void *data;
72c7a68d
ZW
2751 int i, ret;
2752
2753 qm_d = debugfs_create_dir("qm", qm->debug.debug_root);
72c7a68d
ZW
2754 qm->debug.qm_d = qm_d;
2755
2756 /* only show this in PF */
2757 if (qm->fun_type == QM_HW_PF)
2758 for (i = CURRENT_Q; i < DEBUG_FILE_NUM; i++)
2759 if (qm_create_debugfs_file(qm, i)) {
2760 ret = -ENOENT;
2761 goto failed_to_create;
2762 }
2763
988453fb 2764 debugfs_create_file("regs", 0444, qm->debug.qm_d, qm, &qm_regs_fops);
72c7a68d 2765
c31dc9fe
ST
2766 debugfs_create_file("cmd", 0444, qm->debug.qm_d, qm, &qm_cmd_fops);
2767
0a3a3960
LL
2768 debugfs_create_file("status", 0444, qm->debug.qm_d, qm,
2769 &qm_status_fops);
85026525
LL
2770 for (i = 0; i < ARRAY_SIZE(qm_dfx_files); i++) {
2771 data = (atomic64_t *)((uintptr_t)dfx + qm_dfx_files[i].offset);
2772 debugfs_create_file(qm_dfx_files[i].name,
2773 0644,
2774 qm_d,
2775 data,
2776 &qm_atomic64_ops);
2777 }
2778
72c7a68d
ZW
2779 return 0;
2780
2781failed_to_create:
2782 debugfs_remove_recursive(qm_d);
2783 return ret;
2784}
2785EXPORT_SYMBOL_GPL(hisi_qm_debug_init);
2786
2787/**
2788 * hisi_qm_debug_regs_clear() - clear qm debug related registers.
2789 * @qm: The qm for which we want to clear its debug registers.
2790 */
2791void hisi_qm_debug_regs_clear(struct hisi_qm *qm)
2792{
2793 struct qm_dfx_registers *regs;
2794 int i;
2795
2796 /* clear current_q */
2797 writel(0x0, qm->io_base + QM_DFX_SQE_CNT_VF_SQN);
2798 writel(0x0, qm->io_base + QM_DFX_CQE_CNT_VF_CQN);
2799
2800 /*
2801 * these registers are reading and clearing, so clear them after
2802 * reading them.
2803 */
2804 writel(0x1, qm->io_base + QM_DFX_CNT_CLR_CE);
2805
2806 regs = qm_dfx_regs;
2807 for (i = 0; i < CNT_CYC_REGS_NUM; i++) {
2808 readl(qm->io_base + regs->reg_offset);
2809 regs++;
2810 }
2811
2812 writel(0x0, qm->io_base + QM_DFX_CNT_CLR_CE);
2813}
2814EXPORT_SYMBOL_GPL(hisi_qm_debug_regs_clear);
2815
eaebf4c3 2816static void qm_hw_error_init(struct hisi_qm *qm)
263c9959 2817{
eaebf4c3
ST
2818 const struct hisi_qm_err_info *err_info = &qm->err_ini->err_info;
2819
263c9959 2820 if (!qm->ops->hw_error_init) {
ee1788c6 2821 dev_err(&qm->pdev->dev, "QM doesn't support hw error handling!\n");
263c9959
ZW
2822 return;
2823 }
2824
3176637a 2825 qm->ops->hw_error_init(qm, err_info->ce, err_info->nfe, err_info->fe);
eaebf4c3
ST
2826}
2827
2828static void qm_hw_error_uninit(struct hisi_qm *qm)
2829{
2830 if (!qm->ops->hw_error_uninit) {
2831 dev_err(&qm->pdev->dev, "Unexpected QM hw error uninit!\n");
2832 return;
2833 }
2834
2835 qm->ops->hw_error_uninit(qm);
263c9959 2836}
263c9959 2837
dbdc1ec3 2838static enum acc_err_result qm_hw_error_handle(struct hisi_qm *qm)
263c9959
ZW
2839{
2840 if (!qm->ops->hw_error_handle) {
ee1788c6 2841 dev_err(&qm->pdev->dev, "QM doesn't support hw error report!\n");
dbdc1ec3 2842 return ACC_ERR_NONE;
263c9959
ZW
2843 }
2844
2845 return qm->ops->hw_error_handle(qm);
2846}
263c9959 2847
eaebf4c3
ST
2848/**
2849 * hisi_qm_dev_err_init() - Initialize device error configuration.
2850 * @qm: The qm for which we want to do error initialization.
2851 *
2852 * Initialize QM and device error related configuration.
2853 */
2854void hisi_qm_dev_err_init(struct hisi_qm *qm)
2855{
2856 if (qm->fun_type == QM_HW_VF)
2857 return;
2858
2859 qm_hw_error_init(qm);
2860
2861 if (!qm->err_ini->hw_err_enable) {
2862 dev_err(&qm->pdev->dev, "Device doesn't support hw error init!\n");
2863 return;
2864 }
2865 qm->err_ini->hw_err_enable(qm);
2866}
2867EXPORT_SYMBOL_GPL(hisi_qm_dev_err_init);
2868
2869/**
2870 * hisi_qm_dev_err_uninit() - Uninitialize device error configuration.
2871 * @qm: The qm for which we want to do error uninitialization.
2872 *
2873 * Uninitialize QM and device error related configuration.
2874 */
2875void hisi_qm_dev_err_uninit(struct hisi_qm *qm)
2876{
2877 if (qm->fun_type == QM_HW_VF)
2878 return;
2879
2880 qm_hw_error_uninit(qm);
2881
2882 if (!qm->err_ini->hw_err_disable) {
2883 dev_err(&qm->pdev->dev, "Unexpected device hw error uninit!\n");
2884 return;
2885 }
2886 qm->err_ini->hw_err_disable(qm);
2887}
2888EXPORT_SYMBOL_GPL(hisi_qm_dev_err_uninit);
2889
3f1ec97a
WQ
2890/**
2891 * hisi_qm_free_qps() - free multiple queue pairs.
2892 * @qps: The queue pairs need to be freed.
2893 * @qp_num: The num of queue pairs.
2894 */
2895void hisi_qm_free_qps(struct hisi_qp **qps, int qp_num)
2896{
2897 int i;
2898
2899 if (!qps || qp_num <= 0)
2900 return;
2901
2902 for (i = qp_num - 1; i >= 0; i--)
2903 hisi_qm_release_qp(qps[i]);
2904}
2905EXPORT_SYMBOL_GPL(hisi_qm_free_qps);
2906
2907static void free_list(struct list_head *head)
2908{
2909 struct hisi_qm_resource *res, *tmp;
2910
2911 list_for_each_entry_safe(res, tmp, head, list) {
2912 list_del(&res->list);
2913 kfree(res);
2914 }
2915}
2916
2917static int hisi_qm_sort_devices(int node, struct list_head *head,
2918 struct hisi_qm_list *qm_list)
2919{
2920 struct hisi_qm_resource *res, *tmp;
2921 struct hisi_qm *qm;
2922 struct list_head *n;
2923 struct device *dev;
2924 int dev_node = 0;
2925
2926 list_for_each_entry(qm, &qm_list->list, list) {
2927 dev = &qm->pdev->dev;
2928
2929 if (IS_ENABLED(CONFIG_NUMA)) {
2930 dev_node = dev_to_node(dev);
2931 if (dev_node < 0)
2932 dev_node = 0;
2933 }
2934
2935 res = kzalloc(sizeof(*res), GFP_KERNEL);
2936 if (!res)
2937 return -ENOMEM;
2938
2939 res->qm = qm;
2940 res->distance = node_distance(dev_node, node);
2941 n = head;
2942 list_for_each_entry(tmp, head, list) {
2943 if (res->distance < tmp->distance) {
2944 n = &tmp->list;
2945 break;
2946 }
2947 }
2948 list_add_tail(&res->list, n);
2949 }
2950
2951 return 0;
2952}
2953
2954/**
2955 * hisi_qm_alloc_qps_node() - Create multiple queue pairs.
2956 * @qm_list: The list of all available devices.
2957 * @qp_num: The number of queue pairs need created.
2958 * @alg_type: The algorithm type.
2959 * @node: The numa node.
2960 * @qps: The queue pairs need created.
2961 *
2962 * This function will sort all available device according to numa distance.
2963 * Then try to create all queue pairs from one device, if all devices do
2964 * not meet the requirements will return error.
2965 */
2966int hisi_qm_alloc_qps_node(struct hisi_qm_list *qm_list, int qp_num,
2967 u8 alg_type, int node, struct hisi_qp **qps)
2968{
2969 struct hisi_qm_resource *tmp;
2970 int ret = -ENODEV;
2971 LIST_HEAD(head);
2972 int i;
2973
2974 if (!qps || !qm_list || qp_num <= 0)
2975 return -EINVAL;
2976
2977 mutex_lock(&qm_list->lock);
2978 if (hisi_qm_sort_devices(node, &head, qm_list)) {
2979 mutex_unlock(&qm_list->lock);
2980 goto err;
2981 }
2982
2983 list_for_each_entry(tmp, &head, list) {
2984 for (i = 0; i < qp_num; i++) {
2985 qps[i] = hisi_qm_create_qp(tmp->qm, alg_type);
2986 if (IS_ERR(qps[i])) {
2987 hisi_qm_free_qps(qps, i);
2988 break;
2989 }
2990 }
2991
2992 if (i == qp_num) {
2993 ret = 0;
2994 break;
2995 }
2996 }
2997
2998 mutex_unlock(&qm_list->lock);
2999 if (ret)
3000 pr_info("Failed to create qps, node[%d], alg[%d], qp[%d]!\n",
3001 node, alg_type, qp_num);
3002
3003err:
3004 free_list(&head);
3005 return ret;
3006}
3007EXPORT_SYMBOL_GPL(hisi_qm_alloc_qps_node);
3008
cd1b7ae3
ST
3009static int qm_vf_q_assign(struct hisi_qm *qm, u32 num_vfs)
3010{
3011 u32 remain_q_num, q_num, i, j;
3012 u32 q_base = qm->qp_num;
3013 int ret;
3014
3015 if (!num_vfs)
3016 return -EINVAL;
3017
3018 remain_q_num = qm->ctrl_qp_num - qm->qp_num;
3019
3020 /* If remain queues not enough, return error. */
3021 if (qm->ctrl_qp_num < qm->qp_num || remain_q_num < num_vfs)
3022 return -EINVAL;
3023
3024 q_num = remain_q_num / num_vfs;
3025 for (i = 1; i <= num_vfs; i++) {
3026 if (i == num_vfs)
3027 q_num += remain_q_num % num_vfs;
3028 ret = hisi_qm_set_vft(qm, i, q_base, q_num);
3029 if (ret) {
3030 for (j = i; j > 0; j--)
3031 hisi_qm_set_vft(qm, j, 0, 0);
3032 return ret;
3033 }
3034 q_base += q_num;
3035 }
3036
3037 return 0;
3038}
3039
3040static int qm_clear_vft_config(struct hisi_qm *qm)
3041{
3042 int ret;
3043 u32 i;
3044
3045 for (i = 1; i <= qm->vfs_num; i++) {
3046 ret = hisi_qm_set_vft(qm, i, 0, 0);
3047 if (ret)
3048 return ret;
3049 }
3050 qm->vfs_num = 0;
3051
3052 return 0;
3053}
3054
3055/**
3056 * hisi_qm_sriov_enable() - enable virtual functions
3057 * @pdev: the PCIe device
3058 * @max_vfs: the number of virtual functions to enable
3059 *
3060 * Returns the number of enabled VFs. If there are VFs enabled already or
3061 * max_vfs is more than the total number of device can be enabled, returns
3062 * failure.
3063 */
3064int hisi_qm_sriov_enable(struct pci_dev *pdev, int max_vfs)
3065{
3066 struct hisi_qm *qm = pci_get_drvdata(pdev);
3067 int pre_existing_vfs, num_vfs, total_vfs, ret;
3068
3069 total_vfs = pci_sriov_get_totalvfs(pdev);
3070 pre_existing_vfs = pci_num_vf(pdev);
3071 if (pre_existing_vfs) {
3072 pci_err(pdev, "%d VFs already enabled. Please disable pre-enabled VFs!\n",
3073 pre_existing_vfs);
3074 return 0;
3075 }
3076
3077 num_vfs = min_t(int, max_vfs, total_vfs);
3078 ret = qm_vf_q_assign(qm, num_vfs);
3079 if (ret) {
3080 pci_err(pdev, "Can't assign queues for VF!\n");
3081 return ret;
3082 }
3083
3084 qm->vfs_num = num_vfs;
3085
3086 ret = pci_enable_sriov(pdev, num_vfs);
3087 if (ret) {
3088 pci_err(pdev, "Can't enable VF!\n");
3089 qm_clear_vft_config(qm);
3090 return ret;
3091 }
3092
3093 pci_info(pdev, "VF enabled, vfs_num(=%d)!\n", num_vfs);
3094
3095 return num_vfs;
3096}
3097EXPORT_SYMBOL_GPL(hisi_qm_sriov_enable);
3098
3099/**
3100 * hisi_qm_sriov_disable - disable virtual functions
3101 * @pdev: the PCI device
3102 *
3103 * Return failure if there are VFs assigned already.
3104 */
3105int hisi_qm_sriov_disable(struct pci_dev *pdev)
3106{
3107 struct hisi_qm *qm = pci_get_drvdata(pdev);
3108
3109 if (pci_vfs_assigned(pdev)) {
3110 pci_err(pdev, "Failed to disable VFs as VFs are assigned!\n");
3111 return -EPERM;
3112 }
3113
3114 /* remove in hpre_pci_driver will be called to free VF resources */
3115 pci_disable_sriov(pdev);
3116 return qm_clear_vft_config(qm);
3117}
3118EXPORT_SYMBOL_GPL(hisi_qm_sriov_disable);
3119
3120/**
3121 * hisi_qm_sriov_configure - configure the number of VFs
3122 * @pdev: The PCI device
3123 * @num_vfs: The number of VFs need enabled
3124 *
3125 * Enable SR-IOV according to num_vfs, 0 means disable.
3126 */
3127int hisi_qm_sriov_configure(struct pci_dev *pdev, int num_vfs)
3128{
3129 if (num_vfs == 0)
3130 return hisi_qm_sriov_disable(pdev);
3131 else
3132 return hisi_qm_sriov_enable(pdev, num_vfs);
3133}
3134EXPORT_SYMBOL_GPL(hisi_qm_sriov_configure);
3135
dbdc1ec3 3136static enum acc_err_result qm_dev_err_handle(struct hisi_qm *qm)
f826e6ef
ST
3137{
3138 u32 err_sts;
3139
3140 if (!qm->err_ini->get_dev_hw_err_status) {
3141 dev_err(&qm->pdev->dev, "Device doesn't support get hw error status!\n");
dbdc1ec3 3142 return ACC_ERR_NONE;
f826e6ef
ST
3143 }
3144
3145 /* get device hardware error status */
3146 err_sts = qm->err_ini->get_dev_hw_err_status(qm);
3147 if (err_sts) {
6c6dd580
ST
3148 if (err_sts & qm->err_ini->err_info.ecc_2bits_mask)
3149 qm->err_status.is_dev_ecc_mbit = true;
3150
f826e6ef
ST
3151 if (!qm->err_ini->log_dev_hw_err) {
3152 dev_err(&qm->pdev->dev, "Device doesn't support log hw error!\n");
dbdc1ec3 3153 return ACC_ERR_NEED_RESET;
f826e6ef
ST
3154 }
3155
3156 qm->err_ini->log_dev_hw_err(qm, err_sts);
dbdc1ec3 3157 return ACC_ERR_NEED_RESET;
f826e6ef
ST
3158 }
3159
dbdc1ec3 3160 return ACC_ERR_RECOVERED;
f826e6ef
ST
3161}
3162
dbdc1ec3 3163static enum acc_err_result qm_process_dev_error(struct hisi_qm *qm)
f826e6ef 3164{
dbdc1ec3 3165 enum acc_err_result qm_ret, dev_ret;
f826e6ef
ST
3166
3167 /* log qm error */
3168 qm_ret = qm_hw_error_handle(qm);
3169
3170 /* log device error */
3171 dev_ret = qm_dev_err_handle(qm);
3172
dbdc1ec3
ST
3173 return (qm_ret == ACC_ERR_NEED_RESET ||
3174 dev_ret == ACC_ERR_NEED_RESET) ?
3175 ACC_ERR_NEED_RESET : ACC_ERR_RECOVERED;
f826e6ef
ST
3176}
3177
3178/**
3179 * hisi_qm_dev_err_detected() - Get device and qm error status then log it.
3180 * @pdev: The PCI device which need report error.
3181 * @state: The connectivity between CPU and device.
3182 *
3183 * We register this function into PCIe AER handlers, It will report device or
3184 * qm hardware error status when error occur.
3185 */
3186pci_ers_result_t hisi_qm_dev_err_detected(struct pci_dev *pdev,
3187 pci_channel_state_t state)
3188{
dbdc1ec3
ST
3189 struct hisi_qm *qm = pci_get_drvdata(pdev);
3190 enum acc_err_result ret;
3191
f826e6ef
ST
3192 if (pdev->is_virtfn)
3193 return PCI_ERS_RESULT_NONE;
3194
3195 pci_info(pdev, "PCI error detected, state(=%d)!!\n", state);
3196 if (state == pci_channel_io_perm_failure)
3197 return PCI_ERS_RESULT_DISCONNECT;
3198
dbdc1ec3
ST
3199 ret = qm_process_dev_error(qm);
3200 if (ret == ACC_ERR_NEED_RESET)
3201 return PCI_ERS_RESULT_NEED_RESET;
3202
3203 return PCI_ERS_RESULT_RECOVERED;
f826e6ef
ST
3204}
3205EXPORT_SYMBOL_GPL(hisi_qm_dev_err_detected);
3206
7ce396fa
ST
3207static int qm_get_hw_error_status(struct hisi_qm *qm)
3208{
3209 return readl(qm->io_base + QM_ABNORMAL_INT_STATUS);
3210}
3211
6c6dd580
ST
3212static int qm_check_req_recv(struct hisi_qm *qm)
3213{
3214 struct pci_dev *pdev = qm->pdev;
3215 int ret;
3216 u32 val;
3217
3218 writel(ACC_VENDOR_ID_VALUE, qm->io_base + QM_PEH_VENDOR_ID);
3219 ret = readl_relaxed_poll_timeout(qm->io_base + QM_PEH_VENDOR_ID, val,
3220 (val == ACC_VENDOR_ID_VALUE),
3221 POLL_PERIOD, POLL_TIMEOUT);
3222 if (ret) {
3223 dev_err(&pdev->dev, "Fails to read QM reg!\n");
3224 return ret;
3225 }
3226
3227 writel(PCI_VENDOR_ID_HUAWEI, qm->io_base + QM_PEH_VENDOR_ID);
3228 ret = readl_relaxed_poll_timeout(qm->io_base + QM_PEH_VENDOR_ID, val,
3229 (val == PCI_VENDOR_ID_HUAWEI),
3230 POLL_PERIOD, POLL_TIMEOUT);
3231 if (ret)
3232 dev_err(&pdev->dev, "Fails to read QM reg in the second time!\n");
3233
3234 return ret;
3235}
3236
3237static int qm_set_pf_mse(struct hisi_qm *qm, bool set)
3238{
3239 struct pci_dev *pdev = qm->pdev;
3240 u16 cmd;
3241 int i;
3242
3243 pci_read_config_word(pdev, PCI_COMMAND, &cmd);
3244 if (set)
3245 cmd |= PCI_COMMAND_MEMORY;
3246 else
3247 cmd &= ~PCI_COMMAND_MEMORY;
3248
3249 pci_write_config_word(pdev, PCI_COMMAND, cmd);
3250 for (i = 0; i < MAX_WAIT_COUNTS; i++) {
3251 pci_read_config_word(pdev, PCI_COMMAND, &cmd);
3252 if (set == ((cmd & PCI_COMMAND_MEMORY) >> 1))
3253 return 0;
3254
3255 udelay(1);
3256 }
3257
3258 return -ETIMEDOUT;
3259}
3260
3261static int qm_set_vf_mse(struct hisi_qm *qm, bool set)
3262{
3263 struct pci_dev *pdev = qm->pdev;
3264 u16 sriov_ctrl;
3265 int pos;
3266 int i;
3267
3268 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
3269 pci_read_config_word(pdev, pos + PCI_SRIOV_CTRL, &sriov_ctrl);
3270 if (set)
3271 sriov_ctrl |= PCI_SRIOV_CTRL_MSE;
3272 else
3273 sriov_ctrl &= ~PCI_SRIOV_CTRL_MSE;
3274 pci_write_config_word(pdev, pos + PCI_SRIOV_CTRL, sriov_ctrl);
3275
3276 for (i = 0; i < MAX_WAIT_COUNTS; i++) {
3277 pci_read_config_word(pdev, pos + PCI_SRIOV_CTRL, &sriov_ctrl);
3278 if (set == (sriov_ctrl & PCI_SRIOV_CTRL_MSE) >>
3279 ACC_PEH_SRIOV_CTRL_VF_MSE_SHIFT)
3280 return 0;
3281
3282 udelay(1);
3283 }
3284
3285 return -ETIMEDOUT;
3286}
3287
3288static int qm_set_msi(struct hisi_qm *qm, bool set)
3289{
3290 struct pci_dev *pdev = qm->pdev;
3291
3292 if (set) {
3293 pci_write_config_dword(pdev, pdev->msi_cap + PCI_MSI_MASK_64,
3294 0);
3295 } else {
3296 pci_write_config_dword(pdev, pdev->msi_cap + PCI_MSI_MASK_64,
3297 ACC_PEH_MSI_DISABLE);
3298 if (qm->err_status.is_qm_ecc_mbit ||
3299 qm->err_status.is_dev_ecc_mbit)
3300 return 0;
3301
3302 mdelay(1);
3303 if (readl(qm->io_base + QM_PEH_DFX_INFO0))
3304 return -EFAULT;
3305 }
3306
3307 return 0;
3308}
3309
3310static int qm_vf_reset_prepare(struct hisi_qm *qm)
3311{
3312 struct hisi_qm_list *qm_list = qm->qm_list;
b67202e8 3313 int stop_reason = qm->status.stop_reason;
6c6dd580
ST
3314 struct pci_dev *pdev = qm->pdev;
3315 struct pci_dev *virtfn;
3316 struct hisi_qm *vf_qm;
3317 int ret = 0;
3318
3319 mutex_lock(&qm_list->lock);
3320 list_for_each_entry(vf_qm, &qm_list->list, list) {
3321 virtfn = vf_qm->pdev;
3322 if (virtfn == pdev)
3323 continue;
3324
3325 if (pci_physfn(virtfn) == pdev) {
b67202e8 3326 vf_qm->status.stop_reason = stop_reason;
6c6dd580
ST
3327 ret = hisi_qm_stop(vf_qm);
3328 if (ret)
3329 goto stop_fail;
3330 }
3331 }
3332
3333stop_fail:
3334 mutex_unlock(&qm_list->lock);
3335 return ret;
3336}
3337
3338static int qm_reset_prepare_ready(struct hisi_qm *qm)
3339{
3340 struct pci_dev *pdev = qm->pdev;
3341 struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(pdev));
3342 int delay = 0;
3343
3344 /* All reset requests need to be queued for processing */
3345 while (test_and_set_bit(QM_DEV_RESET_FLAG, &pf_qm->reset_flag)) {
3346 msleep(++delay);
3347 if (delay > QM_RESET_WAIT_TIMEOUT)
3348 return -EBUSY;
3349 }
3350
3351 return 0;
3352}
3353
3354static int qm_controller_reset_prepare(struct hisi_qm *qm)
3355{
3356 struct pci_dev *pdev = qm->pdev;
3357 int ret;
3358
3359 ret = qm_reset_prepare_ready(qm);
3360 if (ret) {
3361 pci_err(pdev, "Controller reset not ready!\n");
3362 return ret;
3363 }
3364
3365 if (qm->vfs_num) {
3366 ret = qm_vf_reset_prepare(qm);
3367 if (ret) {
3368 pci_err(pdev, "Fails to stop VFs!\n");
3369 return ret;
3370 }
3371 }
3372
b67202e8 3373 qm->status.stop_reason = QM_SOFT_RESET;
6c6dd580
ST
3374 ret = hisi_qm_stop(qm);
3375 if (ret) {
3376 pci_err(pdev, "Fails to stop QM!\n");
3377 return ret;
3378 }
3379
3380 return 0;
3381}
3382
3383static void qm_dev_ecc_mbit_handle(struct hisi_qm *qm)
3384{
3385 u32 nfe_enb = 0;
3386
3387 if (!qm->err_status.is_dev_ecc_mbit &&
3388 qm->err_status.is_qm_ecc_mbit &&
3389 qm->err_ini->close_axi_master_ooo) {
3390
3391 qm->err_ini->close_axi_master_ooo(qm);
3392
3393 } else if (qm->err_status.is_dev_ecc_mbit &&
3394 !qm->err_status.is_qm_ecc_mbit &&
3395 !qm->err_ini->close_axi_master_ooo) {
3396
3397 nfe_enb = readl(qm->io_base + QM_RAS_NFE_ENABLE);
3398 writel(nfe_enb & QM_RAS_NFE_MBIT_DISABLE,
3399 qm->io_base + QM_RAS_NFE_ENABLE);
3400 writel(QM_ECC_MBIT, qm->io_base + QM_ABNORMAL_INT_SET);
3401 }
3402}
3403
3404static int qm_soft_reset(struct hisi_qm *qm)
3405{
3406 struct pci_dev *pdev = qm->pdev;
3407 int ret;
3408 u32 val;
3409
3410 /* Ensure all doorbells and mailboxes received by QM */
3411 ret = qm_check_req_recv(qm);
3412 if (ret)
3413 return ret;
3414
3415 if (qm->vfs_num) {
3416 ret = qm_set_vf_mse(qm, false);
3417 if (ret) {
3418 pci_err(pdev, "Fails to disable vf MSE bit.\n");
3419 return ret;
3420 }
3421 }
3422
3423 ret = qm_set_msi(qm, false);
3424 if (ret) {
3425 pci_err(pdev, "Fails to disable PEH MSI bit.\n");
3426 return ret;
3427 }
3428
3429 qm_dev_ecc_mbit_handle(qm);
3430
3431 /* OOO register set and check */
3432 writel(ACC_MASTER_GLOBAL_CTRL_SHUTDOWN,
3433 qm->io_base + ACC_MASTER_GLOBAL_CTRL);
3434
3435 /* If bus lock, reset chip */
3436 ret = readl_relaxed_poll_timeout(qm->io_base + ACC_MASTER_TRANS_RETURN,
3437 val,
3438 (val == ACC_MASTER_TRANS_RETURN_RW),
3439 POLL_PERIOD, POLL_TIMEOUT);
3440 if (ret) {
3441 pci_emerg(pdev, "Bus lock! Please reset system.\n");
3442 return ret;
3443 }
3444
3445 ret = qm_set_pf_mse(qm, false);
3446 if (ret) {
3447 pci_err(pdev, "Fails to disable pf MSE bit.\n");
3448 return ret;
3449 }
3450
3451 /* The reset related sub-control registers are not in PCI BAR */
3452 if (ACPI_HANDLE(&pdev->dev)) {
3453 unsigned long long value = 0;
3454 acpi_status s;
3455
3456 s = acpi_evaluate_integer(ACPI_HANDLE(&pdev->dev),
3457 qm->err_ini->err_info.acpi_rst,
3458 NULL, &value);
3459 if (ACPI_FAILURE(s)) {
3460 pci_err(pdev, "NO controller reset method!\n");
3461 return -EIO;
3462 }
3463
3464 if (value) {
3465 pci_err(pdev, "Reset step %llu failed!\n", value);
3466 return -EIO;
3467 }
3468 } else {
3469 pci_err(pdev, "No reset method!\n");
3470 return -EINVAL;
3471 }
3472
3473 return 0;
3474}
3475
3476static int qm_vf_reset_done(struct hisi_qm *qm)
3477{
3478 struct hisi_qm_list *qm_list = qm->qm_list;
3479 struct pci_dev *pdev = qm->pdev;
3480 struct pci_dev *virtfn;
3481 struct hisi_qm *vf_qm;
3482 int ret = 0;
3483
3484 mutex_lock(&qm_list->lock);
3485 list_for_each_entry(vf_qm, &qm_list->list, list) {
3486 virtfn = vf_qm->pdev;
3487 if (virtfn == pdev)
3488 continue;
3489
3490 if (pci_physfn(virtfn) == pdev) {
3491 ret = qm_restart(vf_qm);
3492 if (ret)
3493 goto restart_fail;
3494 }
3495 }
3496
3497restart_fail:
3498 mutex_unlock(&qm_list->lock);
3499 return ret;
3500}
3501
3502static int qm_get_dev_err_status(struct hisi_qm *qm)
3503{
7ce396fa 3504 return qm->err_ini->get_dev_hw_err_status(qm);
6c6dd580
ST
3505}
3506
3507static int qm_dev_hw_init(struct hisi_qm *qm)
3508{
3509 return qm->err_ini->hw_init(qm);
3510}
3511
3512static void qm_restart_prepare(struct hisi_qm *qm)
3513{
3514 u32 value;
3515
3516 if (!qm->err_status.is_qm_ecc_mbit &&
3517 !qm->err_status.is_dev_ecc_mbit)
3518 return;
3519
3520 /* temporarily close the OOO port used for PEH to write out MSI */
3521 value = readl(qm->io_base + ACC_AM_CFG_PORT_WR_EN);
3522 writel(value & ~qm->err_ini->err_info.msi_wr_port,
3523 qm->io_base + ACC_AM_CFG_PORT_WR_EN);
3524
3525 /* clear dev ecc 2bit error source if having */
7ce396fa
ST
3526 value = qm_get_dev_err_status(qm) &
3527 qm->err_ini->err_info.ecc_2bits_mask;
6c6dd580
ST
3528 if (value && qm->err_ini->clear_dev_hw_err_status)
3529 qm->err_ini->clear_dev_hw_err_status(qm, value);
3530
3531 /* clear QM ecc mbit error source */
3532 writel(QM_ECC_MBIT, qm->io_base + QM_ABNORMAL_INT_SOURCE);
3533
3534 /* clear AM Reorder Buffer ecc mbit source */
3535 writel(ACC_ROB_ECC_ERR_MULTPL, qm->io_base + ACC_AM_ROB_ECC_INT_STS);
3536
3537 if (qm->err_ini->open_axi_master_ooo)
3538 qm->err_ini->open_axi_master_ooo(qm);
3539}
3540
3541static void qm_restart_done(struct hisi_qm *qm)
3542{
3543 u32 value;
3544
3545 if (!qm->err_status.is_qm_ecc_mbit &&
3546 !qm->err_status.is_dev_ecc_mbit)
3547 return;
3548
3549 /* open the OOO port for PEH to write out MSI */
3550 value = readl(qm->io_base + ACC_AM_CFG_PORT_WR_EN);
3551 value |= qm->err_ini->err_info.msi_wr_port;
3552 writel(value, qm->io_base + ACC_AM_CFG_PORT_WR_EN);
3553
3554 qm->err_status.is_qm_ecc_mbit = false;
3555 qm->err_status.is_dev_ecc_mbit = false;
3556}
3557
3558static int qm_controller_reset_done(struct hisi_qm *qm)
3559{
3560 struct pci_dev *pdev = qm->pdev;
3561 int ret;
3562
3563 ret = qm_set_msi(qm, true);
3564 if (ret) {
3565 pci_err(pdev, "Fails to enable PEH MSI bit!\n");
3566 return ret;
3567 }
3568
3569 ret = qm_set_pf_mse(qm, true);
3570 if (ret) {
3571 pci_err(pdev, "Fails to enable pf MSE bit!\n");
3572 return ret;
3573 }
3574
3575 if (qm->vfs_num) {
3576 ret = qm_set_vf_mse(qm, true);
3577 if (ret) {
3578 pci_err(pdev, "Fails to enable vf MSE bit!\n");
3579 return ret;
3580 }
3581 }
3582
3583 ret = qm_dev_hw_init(qm);
3584 if (ret) {
3585 pci_err(pdev, "Failed to init device\n");
3586 return ret;
3587 }
3588
3589 qm_restart_prepare(qm);
3590
3591 ret = qm_restart(qm);
3592 if (ret) {
3593 pci_err(pdev, "Failed to start QM!\n");
3594 return ret;
3595 }
3596
3597 if (qm->vfs_num) {
3598 ret = qm_vf_q_assign(qm, qm->vfs_num);
3599 if (ret) {
3600 pci_err(pdev, "Failed to assign queue!\n");
3601 return ret;
3602 }
3603 }
3604
3605 ret = qm_vf_reset_done(qm);
3606 if (ret) {
3607 pci_err(pdev, "Failed to start VFs!\n");
3608 return -EPERM;
3609 }
3610
3611 hisi_qm_dev_err_init(qm);
3612 qm_restart_done(qm);
3613
3614 clear_bit(QM_DEV_RESET_FLAG, &qm->reset_flag);
3615
3616 return 0;
3617}
3618
d0f6223c 3619static int qm_controller_reset(struct hisi_qm *qm)
6c6dd580
ST
3620{
3621 struct pci_dev *pdev = qm->pdev;
3622 int ret;
3623
3624 pci_info(pdev, "Controller resetting...\n");
3625
3626 ret = qm_controller_reset_prepare(qm);
3627 if (ret)
3628 return ret;
3629
3630 ret = qm_soft_reset(qm);
3631 if (ret) {
3632 pci_err(pdev, "Controller reset failed (%d)\n", ret);
3633 return ret;
3634 }
3635
3636 ret = qm_controller_reset_done(qm);
3637 if (ret)
3638 return ret;
3639
3640 pci_info(pdev, "Controller reset complete\n");
3641
3642 return 0;
3643}
3644
3645/**
3646 * hisi_qm_dev_slot_reset() - slot reset
3647 * @pdev: the PCIe device
3648 *
3649 * This function offers QM relate PCIe device reset interface. Drivers which
3650 * use QM can use this function as slot_reset in its struct pci_error_handlers.
3651 */
3652pci_ers_result_t hisi_qm_dev_slot_reset(struct pci_dev *pdev)
3653{
3654 struct hisi_qm *qm = pci_get_drvdata(pdev);
3655 int ret;
3656
3657 if (pdev->is_virtfn)
3658 return PCI_ERS_RESULT_RECOVERED;
3659
3660 pci_aer_clear_nonfatal_status(pdev);
3661
3662 /* reset pcie device controller */
3663 ret = qm_controller_reset(qm);
3664 if (ret) {
3665 pci_err(pdev, "Controller reset failed (%d)\n", ret);
3666 return PCI_ERS_RESULT_DISCONNECT;
3667 }
3668
3669 return PCI_ERS_RESULT_RECOVERED;
3670}
3671EXPORT_SYMBOL_GPL(hisi_qm_dev_slot_reset);
3672
7ce396fa
ST
3673/* check the interrupt is ecc-mbit error or not */
3674static int qm_check_dev_error(struct hisi_qm *qm)
3675{
3676 int ret;
3677
3678 if (qm->fun_type == QM_HW_VF)
3679 return 0;
3680
3681 ret = qm_get_hw_error_status(qm) & QM_ECC_MBIT;
3682 if (ret)
3683 return ret;
3684
3685 return (qm_get_dev_err_status(qm) &
3686 qm->err_ini->err_info.ecc_2bits_mask);
3687}
3688
3689void hisi_qm_reset_prepare(struct pci_dev *pdev)
3690{
3691 struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(pdev));
3692 struct hisi_qm *qm = pci_get_drvdata(pdev);
3693 u32 delay = 0;
3694 int ret;
3695
3696 hisi_qm_dev_err_uninit(pf_qm);
3697
3698 /*
3699 * Check whether there is an ECC mbit error, If it occurs, need to
3700 * wait for soft reset to fix it.
3701 */
3702 while (qm_check_dev_error(pf_qm)) {
3703 msleep(++delay);
3704 if (delay > QM_RESET_WAIT_TIMEOUT)
3705 return;
3706 }
3707
3708 ret = qm_reset_prepare_ready(qm);
3709 if (ret) {
3710 pci_err(pdev, "FLR not ready!\n");
3711 return;
3712 }
3713
3714 if (qm->vfs_num) {
3715 ret = qm_vf_reset_prepare(qm);
3716 if (ret) {
3717 pci_err(pdev, "Failed to prepare reset, ret = %d.\n",
3718 ret);
3719 return;
3720 }
3721 }
3722
3723 ret = hisi_qm_stop(qm);
3724 if (ret) {
3725 pci_err(pdev, "Failed to stop QM, ret = %d.\n", ret);
3726 return;
3727 }
3728
3729 pci_info(pdev, "FLR resetting...\n");
3730}
3731EXPORT_SYMBOL_GPL(hisi_qm_reset_prepare);
3732
3733static bool qm_flr_reset_complete(struct pci_dev *pdev)
3734{
3735 struct pci_dev *pf_pdev = pci_physfn(pdev);
3736 struct hisi_qm *qm = pci_get_drvdata(pf_pdev);
3737 u32 id;
3738
3739 pci_read_config_dword(qm->pdev, PCI_COMMAND, &id);
3740 if (id == QM_PCI_COMMAND_INVALID) {
3741 pci_err(pdev, "Device can not be used!\n");
3742 return false;
3743 }
3744
3745 clear_bit(QM_DEV_RESET_FLAG, &qm->reset_flag);
3746
3747 return true;
3748}
3749
3750void hisi_qm_reset_done(struct pci_dev *pdev)
3751{
3752 struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(pdev));
3753 struct hisi_qm *qm = pci_get_drvdata(pdev);
3754 int ret;
3755
3756 hisi_qm_dev_err_init(pf_qm);
3757
3758 ret = qm_restart(qm);
3759 if (ret) {
3760 pci_err(pdev, "Failed to start QM, ret = %d.\n", ret);
3761 goto flr_done;
3762 }
3763
3764 if (qm->fun_type == QM_HW_PF) {
3765 ret = qm_dev_hw_init(qm);
3766 if (ret) {
3767 pci_err(pdev, "Failed to init PF, ret = %d.\n", ret);
3768 goto flr_done;
3769 }
3770
3771 if (!qm->vfs_num)
3772 goto flr_done;
3773
3774 ret = qm_vf_q_assign(qm, qm->vfs_num);
3775 if (ret) {
3776 pci_err(pdev, "Failed to assign VFs, ret = %d.\n", ret);
3777 goto flr_done;
3778 }
3779
3780 ret = qm_vf_reset_done(qm);
3781 if (ret) {
3782 pci_err(pdev, "Failed to start VFs, ret = %d.\n", ret);
3783 goto flr_done;
3784 }
3785 }
3786
3787flr_done:
3788 if (qm_flr_reset_complete(pdev))
3789 pci_info(pdev, "FLR reset complete\n");
3790}
3791EXPORT_SYMBOL_GPL(hisi_qm_reset_done);
3792
dbdc1ec3
ST
3793static irqreturn_t qm_abnormal_irq(int irq, void *data)
3794{
3795 struct hisi_qm *qm = data;
3796 enum acc_err_result ret;
3797
85026525 3798 atomic64_inc(&qm->debug.dfx.abnormal_irq_cnt);
dbdc1ec3
ST
3799 ret = qm_process_dev_error(qm);
3800 if (ret == ACC_ERR_NEED_RESET)
3801 schedule_work(&qm->rst_work);
3802
3803 return IRQ_HANDLED;
3804}
3805
3806static int qm_irq_register(struct hisi_qm *qm)
3807{
3808 struct pci_dev *pdev = qm->pdev;
3809 int ret;
3810
3811 ret = request_irq(pci_irq_vector(pdev, QM_EQ_EVENT_IRQ_VECTOR),
3812 qm_irq, IRQF_SHARED, qm->dev_name, qm);
3813 if (ret)
3814 return ret;
3815
58ca0060 3816 if (qm->ver != QM_HW_V1) {
dbdc1ec3
ST
3817 ret = request_irq(pci_irq_vector(pdev, QM_AEQ_EVENT_IRQ_VECTOR),
3818 qm_aeq_irq, IRQF_SHARED, qm->dev_name, qm);
3819 if (ret)
3820 goto err_aeq_irq;
3821
3822 if (qm->fun_type == QM_HW_PF) {
3823 ret = request_irq(pci_irq_vector(pdev,
3824 QM_ABNORMAL_EVENT_IRQ_VECTOR),
3825 qm_abnormal_irq, IRQF_SHARED,
3826 qm->dev_name, qm);
3827 if (ret)
3828 goto err_abonormal_irq;
3829 }
3830 }
3831
3832 return 0;
3833
3834err_abonormal_irq:
3835 free_irq(pci_irq_vector(pdev, QM_AEQ_EVENT_IRQ_VECTOR), qm);
3836err_aeq_irq:
3837 free_irq(pci_irq_vector(pdev, QM_EQ_EVENT_IRQ_VECTOR), qm);
3838 return ret;
3839}
3840
3841static void hisi_qm_controller_reset(struct work_struct *rst_work)
3842{
3843 struct hisi_qm *qm = container_of(rst_work, struct hisi_qm, rst_work);
3844 int ret;
3845
3846 /* reset pcie device controller */
3847 ret = qm_controller_reset(qm);
3848 if (ret)
3849 dev_err(&qm->pdev->dev, "controller reset failed (%d)\n", ret);
3850
3851}
3852
3853/**
3854 * hisi_qm_init() - Initialize configures about qm.
3855 * @qm: The qm needing init.
3856 *
3857 * This function init qm, then we can call hisi_qm_start to put qm into work.
3858 */
3859int hisi_qm_init(struct hisi_qm *qm)
3860{
3861 struct pci_dev *pdev = qm->pdev;
3862 struct device *dev = &pdev->dev;
3863 unsigned int num_vec;
3864 int ret;
3865
3866 hisi_qm_pre_init(qm);
3867
3868 ret = qm_alloc_uacce(qm);
3869 if (ret < 0)
3870 dev_warn(&pdev->dev, "fail to alloc uacce (%d)\n", ret);
3871
3872 ret = pci_enable_device_mem(pdev);
3873 if (ret < 0) {
3874 dev_err(&pdev->dev, "Failed to enable device mem!\n");
3875 goto err_remove_uacce;
3876 }
3877
3878 ret = pci_request_mem_regions(pdev, qm->dev_name);
3879 if (ret < 0) {
3880 dev_err(&pdev->dev, "Failed to request mem regions!\n");
3881 goto err_disable_pcidev;
3882 }
3883
3884 qm->phys_base = pci_resource_start(pdev, PCI_BAR_2);
3885 qm->phys_size = pci_resource_len(qm->pdev, PCI_BAR_2);
3886 qm->io_base = ioremap(qm->phys_base, qm->phys_size);
3887 if (!qm->io_base) {
3888 ret = -EIO;
3889 goto err_release_mem_regions;
3890 }
3891
3892 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
3893 if (ret < 0)
3894 goto err_iounmap;
3895 pci_set_master(pdev);
3896
3897 if (!qm->ops->get_irq_num) {
3898 ret = -EOPNOTSUPP;
3899 goto err_iounmap;
3900 }
3901 num_vec = qm->ops->get_irq_num(qm);
3902 ret = pci_alloc_irq_vectors(pdev, num_vec, num_vec, PCI_IRQ_MSI);
3903 if (ret < 0) {
3904 dev_err(dev, "Failed to enable MSI vectors!\n");
3905 goto err_iounmap;
3906 }
3907
3908 ret = qm_irq_register(qm);
3909 if (ret)
3910 goto err_free_irq_vectors;
3911
58ca0060 3912 if (qm->fun_type == QM_HW_VF && qm->ver != QM_HW_V1) {
dbdc1ec3
ST
3913 /* v2 starts to support get vft by mailbox */
3914 ret = hisi_qm_get_vft(qm, &qm->qp_base, &qm->qp_num);
3915 if (ret)
3916 goto err_irq_unregister;
3917 }
3918
3919 ret = hisi_qm_memory_init(qm);
3920 if (ret)
3921 goto err_irq_unregister;
3922
3923 INIT_WORK(&qm->work, qm_work_process);
3924 if (qm->fun_type == QM_HW_PF)
3925 INIT_WORK(&qm->rst_work, hisi_qm_controller_reset);
3926
3927 atomic_set(&qm->status.flags, QM_INIT);
3928
3929 return 0;
3930
3931err_irq_unregister:
3932 qm_irq_unregister(qm);
3933err_free_irq_vectors:
3934 pci_free_irq_vectors(pdev);
3935err_iounmap:
3936 iounmap(qm->io_base);
3937err_release_mem_regions:
3938 pci_release_mem_regions(pdev);
3939err_disable_pcidev:
3940 pci_disable_device(pdev);
3941err_remove_uacce:
3942 uacce_remove(qm->uacce);
3943 qm->uacce = NULL;
3944 return ret;
3945}
3946EXPORT_SYMBOL_GPL(hisi_qm_init);
3947
3948
263c9959
ZW
3949MODULE_LICENSE("GPL v2");
3950MODULE_AUTHOR("Zhou Wang <wangzhou1@hisilicon.com>");
3951MODULE_DESCRIPTION("HiSilicon Accelerator queue manager driver");