1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
3 #include <linux/init.h>
4 #include <linux/kernel.h>
5 #include <linux/module.h>
7 #include <linux/io-64-nonatomic-lo-hi.h>
8 #include <linux/dmaengine.h>
9 #include <uapi/linux/idxd.h>
10 #include "../dmaengine.h"
12 #include "registers.h"
14 static int idxd_cmd_wait(struct idxd_device
*idxd
, u32
*status
, int timeout
);
15 static int idxd_cmd_send(struct idxd_device
*idxd
, int cmd_code
, u32 operand
);
17 /* Interrupt control bits */
18 int idxd_mask_msix_vector(struct idxd_device
*idxd
, int vec_id
)
20 struct pci_dev
*pdev
= idxd
->pdev
;
21 int msixcnt
= pci_msix_vec_count(pdev
);
25 if (vec_id
< 0 || vec_id
>= msixcnt
)
28 offset
= idxd
->msix_perm_offset
+ vec_id
* 8;
29 perm
.bits
= ioread32(idxd
->reg_base
+ offset
);
31 iowrite32(perm
.bits
, idxd
->reg_base
+ offset
);
36 void idxd_mask_msix_vectors(struct idxd_device
*idxd
)
38 struct pci_dev
*pdev
= idxd
->pdev
;
39 int msixcnt
= pci_msix_vec_count(pdev
);
42 for (i
= 0; i
< msixcnt
; i
++) {
43 rc
= idxd_mask_msix_vector(idxd
, i
);
46 "Failed disabling msix vec %d\n", i
);
50 int idxd_unmask_msix_vector(struct idxd_device
*idxd
, int vec_id
)
52 struct pci_dev
*pdev
= idxd
->pdev
;
53 int msixcnt
= pci_msix_vec_count(pdev
);
57 if (vec_id
< 0 || vec_id
>= msixcnt
)
60 offset
= idxd
->msix_perm_offset
+ vec_id
* 8;
61 perm
.bits
= ioread32(idxd
->reg_base
+ offset
);
63 iowrite32(perm
.bits
, idxd
->reg_base
+ offset
);
66 * A readback from the device ensures that any previously generated
67 * completion record writes are visible to software based on PCI
70 perm
.bits
= ioread32(idxd
->reg_base
+ offset
);
75 void idxd_unmask_error_interrupts(struct idxd_device
*idxd
)
77 union genctrl_reg genctrl
;
79 genctrl
.bits
= ioread32(idxd
->reg_base
+ IDXD_GENCTRL_OFFSET
);
80 genctrl
.softerr_int_en
= 1;
81 iowrite32(genctrl
.bits
, idxd
->reg_base
+ IDXD_GENCTRL_OFFSET
);
84 void idxd_mask_error_interrupts(struct idxd_device
*idxd
)
86 union genctrl_reg genctrl
;
88 genctrl
.bits
= ioread32(idxd
->reg_base
+ IDXD_GENCTRL_OFFSET
);
89 genctrl
.softerr_int_en
= 0;
90 iowrite32(genctrl
.bits
, idxd
->reg_base
+ IDXD_GENCTRL_OFFSET
);
93 static void free_hw_descs(struct idxd_wq
*wq
)
97 for (i
= 0; i
< wq
->num_descs
; i
++)
98 kfree(wq
->hw_descs
[i
]);
103 static int alloc_hw_descs(struct idxd_wq
*wq
, int num
)
105 struct device
*dev
= &wq
->idxd
->pdev
->dev
;
107 int node
= dev_to_node(dev
);
109 wq
->hw_descs
= kcalloc_node(num
, sizeof(struct dsa_hw_desc
*),
114 for (i
= 0; i
< num
; i
++) {
115 wq
->hw_descs
[i
] = kzalloc_node(sizeof(*wq
->hw_descs
[i
]),
117 if (!wq
->hw_descs
[i
]) {
126 static void free_descs(struct idxd_wq
*wq
)
130 for (i
= 0; i
< wq
->num_descs
; i
++)
136 static int alloc_descs(struct idxd_wq
*wq
, int num
)
138 struct device
*dev
= &wq
->idxd
->pdev
->dev
;
140 int node
= dev_to_node(dev
);
142 wq
->descs
= kcalloc_node(num
, sizeof(struct idxd_desc
*),
147 for (i
= 0; i
< num
; i
++) {
148 wq
->descs
[i
] = kzalloc_node(sizeof(*wq
->descs
[i
]),
159 /* WQ control bits */
160 int idxd_wq_alloc_resources(struct idxd_wq
*wq
)
162 struct idxd_device
*idxd
= wq
->idxd
;
163 struct idxd_group
*group
= wq
->group
;
164 struct device
*dev
= &idxd
->pdev
->dev
;
165 int rc
, num_descs
, i
;
167 if (wq
->type
!= IDXD_WQT_KERNEL
)
170 num_descs
= wq
->size
+
171 idxd
->hw
.gen_cap
.max_descs_per_engine
* group
->num_engines
;
172 wq
->num_descs
= num_descs
;
174 rc
= alloc_hw_descs(wq
, num_descs
);
178 wq
->compls_size
= num_descs
* sizeof(struct dsa_completion_record
);
179 wq
->compls
= dma_alloc_coherent(dev
, wq
->compls_size
,
180 &wq
->compls_addr
, GFP_KERNEL
);
183 goto fail_alloc_compls
;
186 rc
= alloc_descs(wq
, num_descs
);
188 goto fail_alloc_descs
;
190 rc
= sbitmap_init_node(&wq
->sbmap
, num_descs
, -1, GFP_KERNEL
,
193 goto fail_sbitmap_init
;
195 for (i
= 0; i
< num_descs
; i
++) {
196 struct idxd_desc
*desc
= wq
->descs
[i
];
198 desc
->hw
= wq
->hw_descs
[i
];
199 desc
->completion
= &wq
->compls
[i
];
200 desc
->compl_dma
= wq
->compls_addr
+
201 sizeof(struct dsa_completion_record
) * i
;
205 dma_async_tx_descriptor_init(&desc
->txd
, &wq
->dma_chan
);
206 desc
->txd
.tx_submit
= idxd_dma_tx_submit
;
214 dma_free_coherent(dev
, wq
->compls_size
, wq
->compls
, wq
->compls_addr
);
220 void idxd_wq_free_resources(struct idxd_wq
*wq
)
222 struct device
*dev
= &wq
->idxd
->pdev
->dev
;
224 if (wq
->type
!= IDXD_WQT_KERNEL
)
229 dma_free_coherent(dev
, wq
->compls_size
, wq
->compls
, wq
->compls_addr
);
230 sbitmap_free(&wq
->sbmap
);
233 int idxd_wq_enable(struct idxd_wq
*wq
)
235 struct idxd_device
*idxd
= wq
->idxd
;
236 struct device
*dev
= &idxd
->pdev
->dev
;
240 lockdep_assert_held(&idxd
->dev_lock
);
242 if (wq
->state
== IDXD_WQ_ENABLED
) {
243 dev_dbg(dev
, "WQ %d already enabled\n", wq
->id
);
247 rc
= idxd_cmd_send(idxd
, IDXD_CMD_ENABLE_WQ
, wq
->id
);
250 rc
= idxd_cmd_wait(idxd
, &status
, IDXD_REG_TIMEOUT
);
254 if (status
!= IDXD_CMDSTS_SUCCESS
&&
255 status
!= IDXD_CMDSTS_ERR_WQ_ENABLED
) {
256 dev_dbg(dev
, "WQ enable failed: %#x\n", status
);
260 wq
->state
= IDXD_WQ_ENABLED
;
261 dev_dbg(dev
, "WQ %d enabled\n", wq
->id
);
265 int idxd_wq_disable(struct idxd_wq
*wq
)
267 struct idxd_device
*idxd
= wq
->idxd
;
268 struct device
*dev
= &idxd
->pdev
->dev
;
272 lockdep_assert_held(&idxd
->dev_lock
);
273 dev_dbg(dev
, "Disabling WQ %d\n", wq
->id
);
275 if (wq
->state
!= IDXD_WQ_ENABLED
) {
276 dev_dbg(dev
, "WQ %d in wrong state: %d\n", wq
->id
, wq
->state
);
280 operand
= BIT(wq
->id
% 16) | ((wq
->id
/ 16) << 16);
281 rc
= idxd_cmd_send(idxd
, IDXD_CMD_DISABLE_WQ
, operand
);
284 rc
= idxd_cmd_wait(idxd
, &status
, IDXD_REG_TIMEOUT
);
288 if (status
!= IDXD_CMDSTS_SUCCESS
) {
289 dev_dbg(dev
, "WQ disable failed: %#x\n", status
);
293 wq
->state
= IDXD_WQ_DISABLED
;
294 dev_dbg(dev
, "WQ %d disabled\n", wq
->id
);
298 int idxd_wq_map_portal(struct idxd_wq
*wq
)
300 struct idxd_device
*idxd
= wq
->idxd
;
301 struct pci_dev
*pdev
= idxd
->pdev
;
302 struct device
*dev
= &pdev
->dev
;
303 resource_size_t start
;
305 start
= pci_resource_start(pdev
, IDXD_WQ_BAR
);
306 start
= start
+ wq
->id
* IDXD_PORTAL_SIZE
;
308 wq
->dportal
= devm_ioremap(dev
, start
, IDXD_PORTAL_SIZE
);
311 dev_dbg(dev
, "wq %d portal mapped at %p\n", wq
->id
, wq
->dportal
);
316 void idxd_wq_unmap_portal(struct idxd_wq
*wq
)
318 struct device
*dev
= &wq
->idxd
->pdev
->dev
;
320 devm_iounmap(dev
, wq
->dportal
);
323 /* Device control bits */
324 static inline bool idxd_is_enabled(struct idxd_device
*idxd
)
326 union gensts_reg gensts
;
328 gensts
.bits
= ioread32(idxd
->reg_base
+ IDXD_GENSTATS_OFFSET
);
330 if (gensts
.state
== IDXD_DEVICE_STATE_ENABLED
)
335 static int idxd_cmd_wait(struct idxd_device
*idxd
, u32
*status
, int timeout
)
337 u32 sts
, to
= timeout
;
339 lockdep_assert_held(&idxd
->dev_lock
);
340 sts
= ioread32(idxd
->reg_base
+ IDXD_CMDSTS_OFFSET
);
341 while (sts
& IDXD_CMDSTS_ACTIVE
&& --to
) {
343 sts
= ioread32(idxd
->reg_base
+ IDXD_CMDSTS_OFFSET
);
346 if (to
== 0 && sts
& IDXD_CMDSTS_ACTIVE
) {
347 dev_warn(&idxd
->pdev
->dev
, "%s timed out!\n", __func__
);
356 static int idxd_cmd_send(struct idxd_device
*idxd
, int cmd_code
, u32 operand
)
358 union idxd_command_reg cmd
;
362 lockdep_assert_held(&idxd
->dev_lock
);
363 rc
= idxd_cmd_wait(idxd
, &status
, IDXD_REG_TIMEOUT
);
367 memset(&cmd
, 0, sizeof(cmd
));
369 cmd
.operand
= operand
;
370 dev_dbg(&idxd
->pdev
->dev
, "%s: sending cmd: %#x op: %#x\n",
371 __func__
, cmd_code
, operand
);
372 iowrite32(cmd
.bits
, idxd
->reg_base
+ IDXD_CMD_OFFSET
);
377 int idxd_device_enable(struct idxd_device
*idxd
)
379 struct device
*dev
= &idxd
->pdev
->dev
;
383 lockdep_assert_held(&idxd
->dev_lock
);
384 if (idxd_is_enabled(idxd
)) {
385 dev_dbg(dev
, "Device already enabled\n");
389 rc
= idxd_cmd_send(idxd
, IDXD_CMD_ENABLE_DEVICE
, 0);
392 rc
= idxd_cmd_wait(idxd
, &status
, IDXD_REG_TIMEOUT
);
396 /* If the command is successful or if the device was enabled */
397 if (status
!= IDXD_CMDSTS_SUCCESS
&&
398 status
!= IDXD_CMDSTS_ERR_DEV_ENABLED
) {
399 dev_dbg(dev
, "%s: err_code: %#x\n", __func__
, status
);
403 idxd
->state
= IDXD_DEV_ENABLED
;
407 int idxd_device_disable(struct idxd_device
*idxd
)
409 struct device
*dev
= &idxd
->pdev
->dev
;
413 lockdep_assert_held(&idxd
->dev_lock
);
414 if (!idxd_is_enabled(idxd
)) {
415 dev_dbg(dev
, "Device is not enabled\n");
419 rc
= idxd_cmd_send(idxd
, IDXD_CMD_DISABLE_DEVICE
, 0);
422 rc
= idxd_cmd_wait(idxd
, &status
, IDXD_REG_TIMEOUT
);
426 /* If the command is successful or if the device was disabled */
427 if (status
!= IDXD_CMDSTS_SUCCESS
&&
428 !(status
& IDXD_CMDSTS_ERR_DIS_DEV_EN
)) {
429 dev_dbg(dev
, "%s: err_code: %#x\n", __func__
, status
);
434 idxd
->state
= IDXD_DEV_CONF_READY
;
438 int __idxd_device_reset(struct idxd_device
*idxd
)
443 rc
= idxd_cmd_send(idxd
, IDXD_CMD_RESET_DEVICE
, 0);
446 rc
= idxd_cmd_wait(idxd
, &status
, IDXD_REG_TIMEOUT
);
453 int idxd_device_reset(struct idxd_device
*idxd
)
458 spin_lock_irqsave(&idxd
->dev_lock
, flags
);
459 rc
= __idxd_device_reset(idxd
);
460 spin_unlock_irqrestore(&idxd
->dev_lock
, flags
);
464 /* Device configuration bits */
465 static void idxd_group_config_write(struct idxd_group
*group
)
467 struct idxd_device
*idxd
= group
->idxd
;
468 struct device
*dev
= &idxd
->pdev
->dev
;
472 dev_dbg(dev
, "Writing group %d cfg registers\n", group
->id
);
475 for (i
= 0; i
< 4; i
++) {
476 grpcfg_offset
= idxd
->grpcfg_offset
+
477 group
->id
* 64 + i
* sizeof(u64
);
478 iowrite64(group
->grpcfg
.wqs
[i
],
479 idxd
->reg_base
+ grpcfg_offset
);
480 dev_dbg(dev
, "GRPCFG wq[%d:%d: %#x]: %#llx\n",
481 group
->id
, i
, grpcfg_offset
,
482 ioread64(idxd
->reg_base
+ grpcfg_offset
));
485 /* setup GRPENGCFG */
486 grpcfg_offset
= idxd
->grpcfg_offset
+ group
->id
* 64 + 32;
487 iowrite64(group
->grpcfg
.engines
, idxd
->reg_base
+ grpcfg_offset
);
488 dev_dbg(dev
, "GRPCFG engs[%d: %#x]: %#llx\n", group
->id
,
489 grpcfg_offset
, ioread64(idxd
->reg_base
+ grpcfg_offset
));
492 grpcfg_offset
= idxd
->grpcfg_offset
+ group
->id
* 64 + 40;
493 iowrite32(group
->grpcfg
.flags
.bits
, idxd
->reg_base
+ grpcfg_offset
);
494 dev_dbg(dev
, "GRPFLAGS flags[%d: %#x]: %#x\n",
495 group
->id
, grpcfg_offset
,
496 ioread32(idxd
->reg_base
+ grpcfg_offset
));
499 static int idxd_groups_config_write(struct idxd_device
*idxd
)
502 union gencfg_reg reg
;
504 struct device
*dev
= &idxd
->pdev
->dev
;
506 /* Setup bandwidth token limit */
507 if (idxd
->token_limit
) {
508 reg
.bits
= ioread32(idxd
->reg_base
+ IDXD_GENCFG_OFFSET
);
509 reg
.token_limit
= idxd
->token_limit
;
510 iowrite32(reg
.bits
, idxd
->reg_base
+ IDXD_GENCFG_OFFSET
);
513 dev_dbg(dev
, "GENCFG(%#x): %#x\n", IDXD_GENCFG_OFFSET
,
514 ioread32(idxd
->reg_base
+ IDXD_GENCFG_OFFSET
));
516 for (i
= 0; i
< idxd
->max_groups
; i
++) {
517 struct idxd_group
*group
= &idxd
->groups
[i
];
519 idxd_group_config_write(group
);
525 static int idxd_wq_config_write(struct idxd_wq
*wq
)
527 struct idxd_device
*idxd
= wq
->idxd
;
528 struct device
*dev
= &idxd
->pdev
->dev
;
535 memset(&wq
->wqcfg
, 0, sizeof(union wqcfg
));
538 wq
->wqcfg
.wq_size
= wq
->size
;
541 dev_warn(dev
, "Incorrect work queue size: 0\n");
546 wq
->wqcfg
.wq_thresh
= wq
->threshold
;
549 wq
->wqcfg
.priv
= !!(wq
->type
== IDXD_WQT_KERNEL
);
552 wq
->wqcfg
.priority
= wq
->priority
;
555 wq
->wqcfg
.max_xfer_shift
= idxd
->hw
.gen_cap
.max_xfer_shift
;
556 wq
->wqcfg
.max_batch_shift
= idxd
->hw
.gen_cap
.max_batch_shift
;
558 dev_dbg(dev
, "WQ %d CFGs\n", wq
->id
);
559 for (i
= 0; i
< 8; i
++) {
560 wq_offset
= idxd
->wqcfg_offset
+ wq
->id
* 32 + i
* sizeof(u32
);
561 iowrite32(wq
->wqcfg
.bits
[i
], idxd
->reg_base
+ wq_offset
);
562 dev_dbg(dev
, "WQ[%d][%d][%#x]: %#x\n",
563 wq
->id
, i
, wq_offset
,
564 ioread32(idxd
->reg_base
+ wq_offset
));
570 static int idxd_wqs_config_write(struct idxd_device
*idxd
)
574 for (i
= 0; i
< idxd
->max_wqs
; i
++) {
575 struct idxd_wq
*wq
= &idxd
->wqs
[i
];
577 rc
= idxd_wq_config_write(wq
);
585 static void idxd_group_flags_setup(struct idxd_device
*idxd
)
589 /* TC-A 0 and TC-B 1 should be defaults */
590 for (i
= 0; i
< idxd
->max_groups
; i
++) {
591 struct idxd_group
*group
= &idxd
->groups
[i
];
593 if (group
->tc_a
== -1)
594 group
->tc_a
= group
->grpcfg
.flags
.tc_a
= 0;
596 group
->grpcfg
.flags
.tc_a
= group
->tc_a
;
597 if (group
->tc_b
== -1)
598 group
->tc_b
= group
->grpcfg
.flags
.tc_b
= 1;
600 group
->grpcfg
.flags
.tc_b
= group
->tc_b
;
601 group
->grpcfg
.flags
.use_token_limit
= group
->use_token_limit
;
602 group
->grpcfg
.flags
.tokens_reserved
= group
->tokens_reserved
;
603 if (group
->tokens_allowed
)
604 group
->grpcfg
.flags
.tokens_allowed
=
605 group
->tokens_allowed
;
607 group
->grpcfg
.flags
.tokens_allowed
= idxd
->max_tokens
;
611 static int idxd_engines_setup(struct idxd_device
*idxd
)
614 struct idxd_engine
*eng
;
615 struct idxd_group
*group
;
617 for (i
= 0; i
< idxd
->max_groups
; i
++) {
618 group
= &idxd
->groups
[i
];
619 group
->grpcfg
.engines
= 0;
622 for (i
= 0; i
< idxd
->max_engines
; i
++) {
623 eng
= &idxd
->engines
[i
];
629 group
->grpcfg
.engines
|= BIT(eng
->id
);
639 static int idxd_wqs_setup(struct idxd_device
*idxd
)
642 struct idxd_group
*group
;
643 int i
, j
, configured
= 0;
644 struct device
*dev
= &idxd
->pdev
->dev
;
646 for (i
= 0; i
< idxd
->max_groups
; i
++) {
647 group
= &idxd
->groups
[i
];
648 for (j
= 0; j
< 4; j
++)
649 group
->grpcfg
.wqs
[j
] = 0;
652 for (i
= 0; i
< idxd
->max_wqs
; i
++) {
661 if (!wq_dedicated(wq
)) {
662 dev_warn(dev
, "No shared workqueue support.\n");
666 group
->grpcfg
.wqs
[wq
->id
/ 64] |= BIT(wq
->id
% 64);
676 int idxd_device_config(struct idxd_device
*idxd
)
680 lockdep_assert_held(&idxd
->dev_lock
);
681 rc
= idxd_wqs_setup(idxd
);
685 rc
= idxd_engines_setup(idxd
);
689 idxd_group_flags_setup(idxd
);
691 rc
= idxd_wqs_config_write(idxd
);
695 rc
= idxd_groups_config_write(idxd
);