1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
3 #include <linux/init.h>
4 #include <linux/kernel.h>
5 #include <linux/module.h>
7 #include <linux/io-64-nonatomic-lo-hi.h>
8 #include <linux/dmaengine.h>
9 #include <uapi/linux/idxd.h>
10 #include "../dmaengine.h"
12 #include "registers.h"
14 static int idxd_cmd_wait(struct idxd_device
*idxd
, u32
*status
, int timeout
);
15 static int idxd_cmd_send(struct idxd_device
*idxd
, int cmd_code
, u32 operand
);
17 /* Interrupt control bits */
18 int idxd_mask_msix_vector(struct idxd_device
*idxd
, int vec_id
)
20 struct pci_dev
*pdev
= idxd
->pdev
;
21 int msixcnt
= pci_msix_vec_count(pdev
);
25 if (vec_id
< 0 || vec_id
>= msixcnt
)
28 offset
= idxd
->msix_perm_offset
+ vec_id
* 8;
29 perm
.bits
= ioread32(idxd
->reg_base
+ offset
);
31 iowrite32(perm
.bits
, idxd
->reg_base
+ offset
);
36 void idxd_mask_msix_vectors(struct idxd_device
*idxd
)
38 struct pci_dev
*pdev
= idxd
->pdev
;
39 int msixcnt
= pci_msix_vec_count(pdev
);
42 for (i
= 0; i
< msixcnt
; i
++) {
43 rc
= idxd_mask_msix_vector(idxd
, i
);
46 "Failed disabling msix vec %d\n", i
);
50 int idxd_unmask_msix_vector(struct idxd_device
*idxd
, int vec_id
)
52 struct pci_dev
*pdev
= idxd
->pdev
;
53 int msixcnt
= pci_msix_vec_count(pdev
);
57 if (vec_id
< 0 || vec_id
>= msixcnt
)
60 offset
= idxd
->msix_perm_offset
+ vec_id
* 8;
61 perm
.bits
= ioread32(idxd
->reg_base
+ offset
);
63 iowrite32(perm
.bits
, idxd
->reg_base
+ offset
);
68 void idxd_unmask_error_interrupts(struct idxd_device
*idxd
)
70 union genctrl_reg genctrl
;
72 genctrl
.bits
= ioread32(idxd
->reg_base
+ IDXD_GENCTRL_OFFSET
);
73 genctrl
.softerr_int_en
= 1;
74 iowrite32(genctrl
.bits
, idxd
->reg_base
+ IDXD_GENCTRL_OFFSET
);
77 void idxd_mask_error_interrupts(struct idxd_device
*idxd
)
79 union genctrl_reg genctrl
;
81 genctrl
.bits
= ioread32(idxd
->reg_base
+ IDXD_GENCTRL_OFFSET
);
82 genctrl
.softerr_int_en
= 0;
83 iowrite32(genctrl
.bits
, idxd
->reg_base
+ IDXD_GENCTRL_OFFSET
);
86 static void free_hw_descs(struct idxd_wq
*wq
)
90 for (i
= 0; i
< wq
->num_descs
; i
++)
91 kfree(wq
->hw_descs
[i
]);
96 static int alloc_hw_descs(struct idxd_wq
*wq
, int num
)
98 struct device
*dev
= &wq
->idxd
->pdev
->dev
;
100 int node
= dev_to_node(dev
);
102 wq
->hw_descs
= kcalloc_node(num
, sizeof(struct dsa_hw_desc
*),
107 for (i
= 0; i
< num
; i
++) {
108 wq
->hw_descs
[i
] = kzalloc_node(sizeof(*wq
->hw_descs
[i
]),
110 if (!wq
->hw_descs
[i
]) {
119 static void free_descs(struct idxd_wq
*wq
)
123 for (i
= 0; i
< wq
->num_descs
; i
++)
129 static int alloc_descs(struct idxd_wq
*wq
, int num
)
131 struct device
*dev
= &wq
->idxd
->pdev
->dev
;
133 int node
= dev_to_node(dev
);
135 wq
->descs
= kcalloc_node(num
, sizeof(struct idxd_desc
*),
140 for (i
= 0; i
< num
; i
++) {
141 wq
->descs
[i
] = kzalloc_node(sizeof(*wq
->descs
[i
]),
152 /* WQ control bits */
153 int idxd_wq_alloc_resources(struct idxd_wq
*wq
)
155 struct idxd_device
*idxd
= wq
->idxd
;
156 struct idxd_group
*group
= wq
->group
;
157 struct device
*dev
= &idxd
->pdev
->dev
;
158 int rc
, num_descs
, i
;
160 if (wq
->type
!= IDXD_WQT_KERNEL
)
163 num_descs
= wq
->size
+
164 idxd
->hw
.gen_cap
.max_descs_per_engine
* group
->num_engines
;
165 wq
->num_descs
= num_descs
;
167 rc
= alloc_hw_descs(wq
, num_descs
);
171 wq
->compls_size
= num_descs
* sizeof(struct dsa_completion_record
);
172 wq
->compls
= dma_alloc_coherent(dev
, wq
->compls_size
,
173 &wq
->compls_addr
, GFP_KERNEL
);
176 goto fail_alloc_compls
;
179 rc
= alloc_descs(wq
, num_descs
);
181 goto fail_alloc_descs
;
183 rc
= sbitmap_init_node(&wq
->sbmap
, num_descs
, -1, GFP_KERNEL
,
186 goto fail_sbitmap_init
;
188 for (i
= 0; i
< num_descs
; i
++) {
189 struct idxd_desc
*desc
= wq
->descs
[i
];
191 desc
->hw
= wq
->hw_descs
[i
];
192 desc
->completion
= &wq
->compls
[i
];
193 desc
->compl_dma
= wq
->compls_addr
+
194 sizeof(struct dsa_completion_record
) * i
;
198 dma_async_tx_descriptor_init(&desc
->txd
, &wq
->dma_chan
);
199 desc
->txd
.tx_submit
= idxd_dma_tx_submit
;
207 dma_free_coherent(dev
, wq
->compls_size
, wq
->compls
, wq
->compls_addr
);
213 void idxd_wq_free_resources(struct idxd_wq
*wq
)
215 struct device
*dev
= &wq
->idxd
->pdev
->dev
;
217 if (wq
->type
!= IDXD_WQT_KERNEL
)
222 dma_free_coherent(dev
, wq
->compls_size
, wq
->compls
, wq
->compls_addr
);
223 sbitmap_free(&wq
->sbmap
);
226 int idxd_wq_enable(struct idxd_wq
*wq
)
228 struct idxd_device
*idxd
= wq
->idxd
;
229 struct device
*dev
= &idxd
->pdev
->dev
;
233 lockdep_assert_held(&idxd
->dev_lock
);
235 if (wq
->state
== IDXD_WQ_ENABLED
) {
236 dev_dbg(dev
, "WQ %d already enabled\n", wq
->id
);
240 rc
= idxd_cmd_send(idxd
, IDXD_CMD_ENABLE_WQ
, wq
->id
);
243 rc
= idxd_cmd_wait(idxd
, &status
, IDXD_REG_TIMEOUT
);
247 if (status
!= IDXD_CMDSTS_SUCCESS
&&
248 status
!= IDXD_CMDSTS_ERR_WQ_ENABLED
) {
249 dev_dbg(dev
, "WQ enable failed: %#x\n", status
);
253 wq
->state
= IDXD_WQ_ENABLED
;
254 dev_dbg(dev
, "WQ %d enabled\n", wq
->id
);
258 int idxd_wq_disable(struct idxd_wq
*wq
)
260 struct idxd_device
*idxd
= wq
->idxd
;
261 struct device
*dev
= &idxd
->pdev
->dev
;
265 lockdep_assert_held(&idxd
->dev_lock
);
266 dev_dbg(dev
, "Disabling WQ %d\n", wq
->id
);
268 if (wq
->state
!= IDXD_WQ_ENABLED
) {
269 dev_dbg(dev
, "WQ %d in wrong state: %d\n", wq
->id
, wq
->state
);
273 operand
= BIT(wq
->id
% 16) | ((wq
->id
/ 16) << 16);
274 rc
= idxd_cmd_send(idxd
, IDXD_CMD_DISABLE_WQ
, operand
);
277 rc
= idxd_cmd_wait(idxd
, &status
, IDXD_REG_TIMEOUT
);
281 if (status
!= IDXD_CMDSTS_SUCCESS
) {
282 dev_dbg(dev
, "WQ disable failed: %#x\n", status
);
286 wq
->state
= IDXD_WQ_DISABLED
;
287 dev_dbg(dev
, "WQ %d disabled\n", wq
->id
);
291 int idxd_wq_map_portal(struct idxd_wq
*wq
)
293 struct idxd_device
*idxd
= wq
->idxd
;
294 struct pci_dev
*pdev
= idxd
->pdev
;
295 struct device
*dev
= &pdev
->dev
;
296 resource_size_t start
;
298 start
= pci_resource_start(pdev
, IDXD_WQ_BAR
);
299 start
= start
+ wq
->id
* IDXD_PORTAL_SIZE
;
301 wq
->dportal
= devm_ioremap(dev
, start
, IDXD_PORTAL_SIZE
);
304 dev_dbg(dev
, "wq %d portal mapped at %p\n", wq
->id
, wq
->dportal
);
309 void idxd_wq_unmap_portal(struct idxd_wq
*wq
)
311 struct device
*dev
= &wq
->idxd
->pdev
->dev
;
313 devm_iounmap(dev
, wq
->dportal
);
316 /* Device control bits */
317 static inline bool idxd_is_enabled(struct idxd_device
*idxd
)
319 union gensts_reg gensts
;
321 gensts
.bits
= ioread32(idxd
->reg_base
+ IDXD_GENSTATS_OFFSET
);
323 if (gensts
.state
== IDXD_DEVICE_STATE_ENABLED
)
328 static int idxd_cmd_wait(struct idxd_device
*idxd
, u32
*status
, int timeout
)
330 u32 sts
, to
= timeout
;
332 lockdep_assert_held(&idxd
->dev_lock
);
333 sts
= ioread32(idxd
->reg_base
+ IDXD_CMDSTS_OFFSET
);
334 while (sts
& IDXD_CMDSTS_ACTIVE
&& --to
) {
336 sts
= ioread32(idxd
->reg_base
+ IDXD_CMDSTS_OFFSET
);
339 if (to
== 0 && sts
& IDXD_CMDSTS_ACTIVE
) {
340 dev_warn(&idxd
->pdev
->dev
, "%s timed out!\n", __func__
);
349 static int idxd_cmd_send(struct idxd_device
*idxd
, int cmd_code
, u32 operand
)
351 union idxd_command_reg cmd
;
355 lockdep_assert_held(&idxd
->dev_lock
);
356 rc
= idxd_cmd_wait(idxd
, &status
, IDXD_REG_TIMEOUT
);
360 memset(&cmd
, 0, sizeof(cmd
));
362 cmd
.operand
= operand
;
363 dev_dbg(&idxd
->pdev
->dev
, "%s: sending cmd: %#x op: %#x\n",
364 __func__
, cmd_code
, operand
);
365 iowrite32(cmd
.bits
, idxd
->reg_base
+ IDXD_CMD_OFFSET
);
370 int idxd_device_enable(struct idxd_device
*idxd
)
372 struct device
*dev
= &idxd
->pdev
->dev
;
376 lockdep_assert_held(&idxd
->dev_lock
);
377 if (idxd_is_enabled(idxd
)) {
378 dev_dbg(dev
, "Device already enabled\n");
382 rc
= idxd_cmd_send(idxd
, IDXD_CMD_ENABLE_DEVICE
, 0);
385 rc
= idxd_cmd_wait(idxd
, &status
, IDXD_REG_TIMEOUT
);
389 /* If the command is successful or if the device was enabled */
390 if (status
!= IDXD_CMDSTS_SUCCESS
&&
391 status
!= IDXD_CMDSTS_ERR_DEV_ENABLED
) {
392 dev_dbg(dev
, "%s: err_code: %#x\n", __func__
, status
);
396 idxd
->state
= IDXD_DEV_ENABLED
;
400 int idxd_device_disable(struct idxd_device
*idxd
)
402 struct device
*dev
= &idxd
->pdev
->dev
;
406 lockdep_assert_held(&idxd
->dev_lock
);
407 if (!idxd_is_enabled(idxd
)) {
408 dev_dbg(dev
, "Device is not enabled\n");
412 rc
= idxd_cmd_send(idxd
, IDXD_CMD_DISABLE_DEVICE
, 0);
415 rc
= idxd_cmd_wait(idxd
, &status
, IDXD_REG_TIMEOUT
);
419 /* If the command is successful or if the device was disabled */
420 if (status
!= IDXD_CMDSTS_SUCCESS
&&
421 !(status
& IDXD_CMDSTS_ERR_DIS_DEV_EN
)) {
422 dev_dbg(dev
, "%s: err_code: %#x\n", __func__
, status
);
427 idxd
->state
= IDXD_DEV_CONF_READY
;
431 int __idxd_device_reset(struct idxd_device
*idxd
)
436 rc
= idxd_cmd_send(idxd
, IDXD_CMD_RESET_DEVICE
, 0);
439 rc
= idxd_cmd_wait(idxd
, &status
, IDXD_REG_TIMEOUT
);
446 int idxd_device_reset(struct idxd_device
*idxd
)
451 spin_lock_irqsave(&idxd
->dev_lock
, flags
);
452 rc
= __idxd_device_reset(idxd
);
453 spin_unlock_irqrestore(&idxd
->dev_lock
, flags
);
457 /* Device configuration bits */
458 static void idxd_group_config_write(struct idxd_group
*group
)
460 struct idxd_device
*idxd
= group
->idxd
;
461 struct device
*dev
= &idxd
->pdev
->dev
;
465 dev_dbg(dev
, "Writing group %d cfg registers\n", group
->id
);
468 for (i
= 0; i
< 4; i
++) {
469 grpcfg_offset
= idxd
->grpcfg_offset
+
470 group
->id
* 64 + i
* sizeof(u64
);
471 iowrite64(group
->grpcfg
.wqs
[i
],
472 idxd
->reg_base
+ grpcfg_offset
);
473 dev_dbg(dev
, "GRPCFG wq[%d:%d: %#x]: %#llx\n",
474 group
->id
, i
, grpcfg_offset
,
475 ioread64(idxd
->reg_base
+ grpcfg_offset
));
478 /* setup GRPENGCFG */
479 grpcfg_offset
= idxd
->grpcfg_offset
+ group
->id
* 64 + 32;
480 iowrite64(group
->grpcfg
.engines
, idxd
->reg_base
+ grpcfg_offset
);
481 dev_dbg(dev
, "GRPCFG engs[%d: %#x]: %#llx\n", group
->id
,
482 grpcfg_offset
, ioread64(idxd
->reg_base
+ grpcfg_offset
));
485 grpcfg_offset
= idxd
->grpcfg_offset
+ group
->id
* 64 + 40;
486 iowrite32(group
->grpcfg
.flags
.bits
, idxd
->reg_base
+ grpcfg_offset
);
487 dev_dbg(dev
, "GRPFLAGS flags[%d: %#x]: %#x\n",
488 group
->id
, grpcfg_offset
,
489 ioread32(idxd
->reg_base
+ grpcfg_offset
));
492 static int idxd_groups_config_write(struct idxd_device
*idxd
)
495 union gencfg_reg reg
;
497 struct device
*dev
= &idxd
->pdev
->dev
;
499 /* Setup bandwidth token limit */
500 if (idxd
->token_limit
) {
501 reg
.bits
= ioread32(idxd
->reg_base
+ IDXD_GENCFG_OFFSET
);
502 reg
.token_limit
= idxd
->token_limit
;
503 iowrite32(reg
.bits
, idxd
->reg_base
+ IDXD_GENCFG_OFFSET
);
506 dev_dbg(dev
, "GENCFG(%#x): %#x\n", IDXD_GENCFG_OFFSET
,
507 ioread32(idxd
->reg_base
+ IDXD_GENCFG_OFFSET
));
509 for (i
= 0; i
< idxd
->max_groups
; i
++) {
510 struct idxd_group
*group
= &idxd
->groups
[i
];
512 idxd_group_config_write(group
);
518 static int idxd_wq_config_write(struct idxd_wq
*wq
)
520 struct idxd_device
*idxd
= wq
->idxd
;
521 struct device
*dev
= &idxd
->pdev
->dev
;
528 memset(&wq
->wqcfg
, 0, sizeof(union wqcfg
));
531 wq
->wqcfg
.wq_size
= wq
->size
;
534 dev_warn(dev
, "Incorrect work queue size: 0\n");
539 wq
->wqcfg
.wq_thresh
= wq
->threshold
;
542 wq
->wqcfg
.priv
= !!(wq
->type
== IDXD_WQT_KERNEL
);
545 wq
->wqcfg
.priority
= wq
->priority
;
548 wq
->wqcfg
.max_xfer_shift
= idxd
->hw
.gen_cap
.max_xfer_shift
;
549 wq
->wqcfg
.max_batch_shift
= idxd
->hw
.gen_cap
.max_batch_shift
;
551 dev_dbg(dev
, "WQ %d CFGs\n", wq
->id
);
552 for (i
= 0; i
< 8; i
++) {
553 wq_offset
= idxd
->wqcfg_offset
+ wq
->id
* 32 + i
* sizeof(u32
);
554 iowrite32(wq
->wqcfg
.bits
[i
], idxd
->reg_base
+ wq_offset
);
555 dev_dbg(dev
, "WQ[%d][%d][%#x]: %#x\n",
556 wq
->id
, i
, wq_offset
,
557 ioread32(idxd
->reg_base
+ wq_offset
));
563 static int idxd_wqs_config_write(struct idxd_device
*idxd
)
567 for (i
= 0; i
< idxd
->max_wqs
; i
++) {
568 struct idxd_wq
*wq
= &idxd
->wqs
[i
];
570 rc
= idxd_wq_config_write(wq
);
578 static void idxd_group_flags_setup(struct idxd_device
*idxd
)
582 /* TC-A 0 and TC-B 1 should be defaults */
583 for (i
= 0; i
< idxd
->max_groups
; i
++) {
584 struct idxd_group
*group
= &idxd
->groups
[i
];
586 if (group
->tc_a
== -1)
587 group
->tc_a
= group
->grpcfg
.flags
.tc_a
= 0;
589 group
->grpcfg
.flags
.tc_a
= group
->tc_a
;
590 if (group
->tc_b
== -1)
591 group
->tc_b
= group
->grpcfg
.flags
.tc_b
= 1;
593 group
->grpcfg
.flags
.tc_b
= group
->tc_b
;
594 group
->grpcfg
.flags
.use_token_limit
= group
->use_token_limit
;
595 group
->grpcfg
.flags
.tokens_reserved
= group
->tokens_reserved
;
596 if (group
->tokens_allowed
)
597 group
->grpcfg
.flags
.tokens_allowed
=
598 group
->tokens_allowed
;
600 group
->grpcfg
.flags
.tokens_allowed
= idxd
->max_tokens
;
604 static int idxd_engines_setup(struct idxd_device
*idxd
)
607 struct idxd_engine
*eng
;
608 struct idxd_group
*group
;
610 for (i
= 0; i
< idxd
->max_groups
; i
++) {
611 group
= &idxd
->groups
[i
];
612 group
->grpcfg
.engines
= 0;
615 for (i
= 0; i
< idxd
->max_engines
; i
++) {
616 eng
= &idxd
->engines
[i
];
622 group
->grpcfg
.engines
|= BIT(eng
->id
);
632 static int idxd_wqs_setup(struct idxd_device
*idxd
)
635 struct idxd_group
*group
;
636 int i
, j
, configured
= 0;
637 struct device
*dev
= &idxd
->pdev
->dev
;
639 for (i
= 0; i
< idxd
->max_groups
; i
++) {
640 group
= &idxd
->groups
[i
];
641 for (j
= 0; j
< 4; j
++)
642 group
->grpcfg
.wqs
[j
] = 0;
645 for (i
= 0; i
< idxd
->max_wqs
; i
++) {
654 if (!wq_dedicated(wq
)) {
655 dev_warn(dev
, "No shared workqueue support.\n");
659 group
->grpcfg
.wqs
[wq
->id
/ 64] |= BIT(wq
->id
% 64);
669 int idxd_device_config(struct idxd_device
*idxd
)
673 lockdep_assert_held(&idxd
->dev_lock
);
674 rc
= idxd_wqs_setup(idxd
);
678 rc
= idxd_engines_setup(idxd
);
682 idxd_group_flags_setup(idxd
);
684 rc
= idxd_wqs_config_write(idxd
);
688 rc
= idxd_groups_config_write(idxd
);