1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell RVU Admin Function driver
4 * Copyright (C) 2018 Marvell.
8 #include <linux/module.h>
9 #include <linux/interrupt.h>
10 #include <linux/delay.h>
11 #include <linux/irq.h>
12 #include <linux/pci.h>
13 #include <linux/sysfs.h>
21 #include "rvu_trace.h"
22 #include "rvu_npc_hash.h"
24 #define DRV_NAME "rvu_af"
25 #define DRV_STRING "Marvell OcteonTX2 RVU Admin Function Driver"
27 static void rvu_set_msix_offset(struct rvu
*rvu
, struct rvu_pfvf
*pfvf
,
28 struct rvu_block
*block
, int lf
);
29 static void rvu_clear_msix_offset(struct rvu
*rvu
, struct rvu_pfvf
*pfvf
,
30 struct rvu_block
*block
, int lf
);
31 static void __rvu_flr_handler(struct rvu
*rvu
, u16 pcifunc
);
33 static int rvu_mbox_init(struct rvu
*rvu
, struct mbox_wq_info
*mw
,
35 void (mbox_handler
)(struct work_struct
*),
36 void (mbox_up_handler
)(struct work_struct
*));
42 /* Supported devices */
43 static const struct pci_device_id rvu_id_table
[] = {
44 { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM
, PCI_DEVID_OCTEONTX2_RVU_AF
) },
45 { 0, } /* end of table */
48 MODULE_AUTHOR("Sunil Goutham <sgoutham@marvell.com>");
49 MODULE_DESCRIPTION(DRV_STRING
);
50 MODULE_LICENSE("GPL v2");
51 MODULE_DEVICE_TABLE(pci
, rvu_id_table
);
53 static char *mkex_profile
; /* MKEX profile name */
54 module_param(mkex_profile
, charp
, 0000);
55 MODULE_PARM_DESC(mkex_profile
, "MKEX profile name string");
57 static char *kpu_profile
; /* KPU profile name */
58 module_param(kpu_profile
, charp
, 0000);
59 MODULE_PARM_DESC(kpu_profile
, "KPU profile name string");
61 static void rvu_setup_hw_capabilities(struct rvu
*rvu
)
63 struct rvu_hwinfo
*hw
= rvu
->hw
;
65 hw
->cap
.nix_tx_aggr_lvl
= NIX_TXSCH_LVL_TL1
;
66 hw
->cap
.nix_fixed_txschq_mapping
= false;
67 hw
->cap
.nix_shaping
= true;
68 hw
->cap
.nix_tx_link_bp
= true;
69 hw
->cap
.nix_rx_multicast
= true;
70 hw
->cap
.nix_shaper_toggle_wait
= false;
71 hw
->cap
.npc_hash_extract
= false;
72 hw
->cap
.npc_exact_match_enabled
= false;
75 if (is_rvu_pre_96xx_C0(rvu
)) {
76 hw
->cap
.nix_fixed_txschq_mapping
= true;
77 hw
->cap
.nix_txsch_per_cgx_lmac
= 4;
78 hw
->cap
.nix_txsch_per_lbk_lmac
= 132;
79 hw
->cap
.nix_txsch_per_sdp_lmac
= 76;
80 hw
->cap
.nix_shaping
= false;
81 hw
->cap
.nix_tx_link_bp
= false;
82 if (is_rvu_96xx_A0(rvu
) || is_rvu_95xx_A0(rvu
))
83 hw
->cap
.nix_rx_multicast
= false;
85 if (!is_rvu_pre_96xx_C0(rvu
))
86 hw
->cap
.nix_shaper_toggle_wait
= true;
88 if (!is_rvu_otx2(rvu
))
89 hw
->cap
.per_pf_mbox_regs
= true;
91 if (is_rvu_npc_hash_extract_en(rvu
))
92 hw
->cap
.npc_hash_extract
= true;
95 /* Poll a RVU block's register 'offset', for a 'zero'
96 * or 'nonzero' at bits specified by 'mask'
98 int rvu_poll_reg(struct rvu
*rvu
, u64 block
, u64 offset
, u64 mask
, bool zero
)
100 unsigned long timeout
= jiffies
+ usecs_to_jiffies(20000);
105 reg
= rvu
->afreg_base
+ ((block
<< 28) | offset
);
107 reg_val
= readq(reg
);
108 if (zero
&& !(reg_val
& mask
))
110 if (!zero
&& (reg_val
& mask
))
112 if (time_before(jiffies
, timeout
)) {
116 /* In scenarios where CPU is scheduled out before checking
117 * 'time_before' (above) and gets scheduled in such that
118 * jiffies are beyond timeout value, then check again if HW is
119 * done with the operation in the meantime.
128 int rvu_alloc_rsrc(struct rsrc_bmap
*rsrc
)
135 id
= find_first_zero_bit(rsrc
->bmap
, rsrc
->max
);
139 __set_bit(id
, rsrc
->bmap
);
144 int rvu_alloc_rsrc_contig(struct rsrc_bmap
*rsrc
, int nrsrc
)
151 start
= bitmap_find_next_zero_area(rsrc
->bmap
, rsrc
->max
, 0, nrsrc
, 0);
152 if (start
>= rsrc
->max
)
155 bitmap_set(rsrc
->bmap
, start
, nrsrc
);
159 static void rvu_free_rsrc_contig(struct rsrc_bmap
*rsrc
, int nrsrc
, int start
)
163 if (start
>= rsrc
->max
)
166 bitmap_clear(rsrc
->bmap
, start
, nrsrc
);
169 bool rvu_rsrc_check_contig(struct rsrc_bmap
*rsrc
, int nrsrc
)
176 start
= bitmap_find_next_zero_area(rsrc
->bmap
, rsrc
->max
, 0, nrsrc
, 0);
177 if (start
>= rsrc
->max
)
183 void rvu_free_rsrc(struct rsrc_bmap
*rsrc
, int id
)
188 __clear_bit(id
, rsrc
->bmap
);
191 int rvu_rsrc_free_count(struct rsrc_bmap
*rsrc
)
198 used
= bitmap_weight(rsrc
->bmap
, rsrc
->max
);
199 return (rsrc
->max
- used
);
202 bool is_rsrc_free(struct rsrc_bmap
*rsrc
, int id
)
207 return !test_bit(id
, rsrc
->bmap
);
210 int rvu_alloc_bitmap(struct rsrc_bmap
*rsrc
)
212 rsrc
->bmap
= kcalloc(BITS_TO_LONGS(rsrc
->max
),
213 sizeof(long), GFP_KERNEL
);
219 void rvu_free_bitmap(struct rsrc_bmap
*rsrc
)
224 /* Get block LF's HW index from a PF_FUNC's block slot number */
225 int rvu_get_lf(struct rvu
*rvu
, struct rvu_block
*block
, u16 pcifunc
, u16 slot
)
230 mutex_lock(&rvu
->rsrc_lock
);
231 for (lf
= 0; lf
< block
->lf
.max
; lf
++) {
232 if (block
->fn_map
[lf
] == pcifunc
) {
234 mutex_unlock(&rvu
->rsrc_lock
);
240 mutex_unlock(&rvu
->rsrc_lock
);
244 /* Convert BLOCK_TYPE_E to a BLOCK_ADDR_E.
245 * Some silicon variants of OcteonTX2 supports
246 * multiple blocks of same type.
248 * @pcifunc has to be zero when no LF is yet attached.
250 * For a pcifunc if LFs are attached from multiple blocks of same type, then
251 * return blkaddr of first encountered block.
253 int rvu_get_blkaddr(struct rvu
*rvu
, int blktype
, u16 pcifunc
)
255 int devnum
, blkaddr
= -ENODEV
;
261 blkaddr
= BLKADDR_NPC
;
264 blkaddr
= BLKADDR_NPA
;
267 /* For now assume NIX0 */
269 blkaddr
= BLKADDR_NIX0
;
274 blkaddr
= BLKADDR_SSO
;
277 blkaddr
= BLKADDR_SSOW
;
280 blkaddr
= BLKADDR_TIM
;
283 /* For now assume CPT0 */
285 blkaddr
= BLKADDR_CPT0
;
291 /* Check if this is a RVU PF or VF */
292 if (pcifunc
& RVU_PFVF_FUNC_MASK
) {
294 devnum
= rvu_get_hwvf(rvu
, pcifunc
);
297 devnum
= rvu_get_pf(pcifunc
);
300 /* Check if the 'pcifunc' has a NIX LF from 'BLKADDR_NIX0' or
303 if (blktype
== BLKTYPE_NIX
) {
304 reg
= is_pf
? RVU_PRIV_PFX_NIXX_CFG(0) :
305 RVU_PRIV_HWVFX_NIXX_CFG(0);
306 cfg
= rvu_read64(rvu
, BLKADDR_RVUM
, reg
| (devnum
<< 16));
308 blkaddr
= BLKADDR_NIX0
;
312 reg
= is_pf
? RVU_PRIV_PFX_NIXX_CFG(1) :
313 RVU_PRIV_HWVFX_NIXX_CFG(1);
314 cfg
= rvu_read64(rvu
, BLKADDR_RVUM
, reg
| (devnum
<< 16));
316 blkaddr
= BLKADDR_NIX1
;
319 if (blktype
== BLKTYPE_CPT
) {
320 reg
= is_pf
? RVU_PRIV_PFX_CPTX_CFG(0) :
321 RVU_PRIV_HWVFX_CPTX_CFG(0);
322 cfg
= rvu_read64(rvu
, BLKADDR_RVUM
, reg
| (devnum
<< 16));
324 blkaddr
= BLKADDR_CPT0
;
328 reg
= is_pf
? RVU_PRIV_PFX_CPTX_CFG(1) :
329 RVU_PRIV_HWVFX_CPTX_CFG(1);
330 cfg
= rvu_read64(rvu
, BLKADDR_RVUM
, reg
| (devnum
<< 16));
332 blkaddr
= BLKADDR_CPT1
;
336 if (is_block_implemented(rvu
->hw
, blkaddr
))
341 static void rvu_update_rsrc_map(struct rvu
*rvu
, struct rvu_pfvf
*pfvf
,
342 struct rvu_block
*block
, u16 pcifunc
,
345 int devnum
, num_lfs
= 0;
349 if (lf
>= block
->lf
.max
) {
350 dev_err(&rvu
->pdev
->dev
,
351 "%s: FATAL: LF %d is >= %s's max lfs i.e %d\n",
352 __func__
, lf
, block
->name
, block
->lf
.max
);
356 /* Check if this is for a RVU PF or VF */
357 if (pcifunc
& RVU_PFVF_FUNC_MASK
) {
359 devnum
= rvu_get_hwvf(rvu
, pcifunc
);
362 devnum
= rvu_get_pf(pcifunc
);
365 block
->fn_map
[lf
] = attach
? pcifunc
: 0;
367 switch (block
->addr
) {
369 pfvf
->npalf
= attach
? true : false;
370 num_lfs
= pfvf
->npalf
;
374 pfvf
->nixlf
= attach
? true : false;
375 num_lfs
= pfvf
->nixlf
;
378 attach
? pfvf
->sso
++ : pfvf
->sso
--;
382 attach
? pfvf
->ssow
++ : pfvf
->ssow
--;
383 num_lfs
= pfvf
->ssow
;
386 attach
? pfvf
->timlfs
++ : pfvf
->timlfs
--;
387 num_lfs
= pfvf
->timlfs
;
390 attach
? pfvf
->cptlfs
++ : pfvf
->cptlfs
--;
391 num_lfs
= pfvf
->cptlfs
;
394 attach
? pfvf
->cpt1_lfs
++ : pfvf
->cpt1_lfs
--;
395 num_lfs
= pfvf
->cpt1_lfs
;
399 reg
= is_pf
? block
->pf_lfcnt_reg
: block
->vf_lfcnt_reg
;
400 rvu_write64(rvu
, BLKADDR_RVUM
, reg
| (devnum
<< 16), num_lfs
);
403 inline int rvu_get_pf(u16 pcifunc
)
405 return (pcifunc
>> RVU_PFVF_PF_SHIFT
) & RVU_PFVF_PF_MASK
;
408 void rvu_get_pf_numvfs(struct rvu
*rvu
, int pf
, int *numvfs
, int *hwvf
)
412 /* Get numVFs attached to this PF and first HWVF */
413 cfg
= rvu_read64(rvu
, BLKADDR_RVUM
, RVU_PRIV_PFX_CFG(pf
));
415 *numvfs
= (cfg
>> 12) & 0xFF;
420 int rvu_get_hwvf(struct rvu
*rvu
, int pcifunc
)
425 pf
= rvu_get_pf(pcifunc
);
426 func
= pcifunc
& RVU_PFVF_FUNC_MASK
;
428 /* Get first HWVF attached to this PF */
429 cfg
= rvu_read64(rvu
, BLKADDR_RVUM
, RVU_PRIV_PFX_CFG(pf
));
431 return ((cfg
& 0xFFF) + func
- 1);
434 struct rvu_pfvf
*rvu_get_pfvf(struct rvu
*rvu
, int pcifunc
)
436 /* Check if it is a PF or VF */
437 if (pcifunc
& RVU_PFVF_FUNC_MASK
)
438 return &rvu
->hwvf
[rvu_get_hwvf(rvu
, pcifunc
)];
440 return &rvu
->pf
[rvu_get_pf(pcifunc
)];
443 static bool is_pf_func_valid(struct rvu
*rvu
, u16 pcifunc
)
448 pf
= rvu_get_pf(pcifunc
);
449 if (pf
>= rvu
->hw
->total_pfs
)
452 if (!(pcifunc
& RVU_PFVF_FUNC_MASK
))
455 /* Check if VF is within number of VFs attached to this PF */
456 vf
= (pcifunc
& RVU_PFVF_FUNC_MASK
) - 1;
457 cfg
= rvu_read64(rvu
, BLKADDR_RVUM
, RVU_PRIV_PFX_CFG(pf
));
458 nvfs
= (cfg
>> 12) & 0xFF;
465 bool is_block_implemented(struct rvu_hwinfo
*hw
, int blkaddr
)
467 struct rvu_block
*block
;
469 if (blkaddr
< BLKADDR_RVUM
|| blkaddr
>= BLK_COUNT
)
472 block
= &hw
->block
[blkaddr
];
473 return block
->implemented
;
476 static void rvu_check_block_implemented(struct rvu
*rvu
)
478 struct rvu_hwinfo
*hw
= rvu
->hw
;
479 struct rvu_block
*block
;
483 /* For each block check if 'implemented' bit is set */
484 for (blkid
= 0; blkid
< BLK_COUNT
; blkid
++) {
485 block
= &hw
->block
[blkid
];
486 cfg
= rvupf_read64(rvu
, RVU_PF_BLOCK_ADDRX_DISC(blkid
));
487 if (cfg
& BIT_ULL(11))
488 block
->implemented
= true;
492 static void rvu_setup_rvum_blk_revid(struct rvu
*rvu
)
494 rvu_write64(rvu
, BLKADDR_RVUM
,
495 RVU_PRIV_BLOCK_TYPEX_REV(BLKTYPE_RVUM
),
499 static void rvu_clear_rvum_blk_revid(struct rvu
*rvu
)
501 rvu_write64(rvu
, BLKADDR_RVUM
,
502 RVU_PRIV_BLOCK_TYPEX_REV(BLKTYPE_RVUM
), 0x00);
505 int rvu_lf_reset(struct rvu
*rvu
, struct rvu_block
*block
, int lf
)
509 if (!block
->implemented
)
512 rvu_write64(rvu
, block
->addr
, block
->lfreset_reg
, lf
| BIT_ULL(12));
513 err
= rvu_poll_reg(rvu
, block
->addr
, block
->lfreset_reg
, BIT_ULL(12),
518 static void rvu_block_reset(struct rvu
*rvu
, int blkaddr
, u64 rst_reg
)
520 struct rvu_block
*block
= &rvu
->hw
->block
[blkaddr
];
523 if (!block
->implemented
)
526 rvu_write64(rvu
, blkaddr
, rst_reg
, BIT_ULL(0));
527 err
= rvu_poll_reg(rvu
, blkaddr
, rst_reg
, BIT_ULL(63), true);
529 dev_err(rvu
->dev
, "HW block:%d reset timeout retrying again\n", blkaddr
);
530 while (rvu_poll_reg(rvu
, blkaddr
, rst_reg
, BIT_ULL(63), true) == -EBUSY
)
535 static void rvu_reset_all_blocks(struct rvu
*rvu
)
537 /* Do a HW reset of all RVU blocks */
538 rvu_block_reset(rvu
, BLKADDR_NPA
, NPA_AF_BLK_RST
);
539 rvu_block_reset(rvu
, BLKADDR_NIX0
, NIX_AF_BLK_RST
);
540 rvu_block_reset(rvu
, BLKADDR_NIX1
, NIX_AF_BLK_RST
);
541 rvu_block_reset(rvu
, BLKADDR_NPC
, NPC_AF_BLK_RST
);
542 rvu_block_reset(rvu
, BLKADDR_SSO
, SSO_AF_BLK_RST
);
543 rvu_block_reset(rvu
, BLKADDR_TIM
, TIM_AF_BLK_RST
);
544 rvu_block_reset(rvu
, BLKADDR_CPT0
, CPT_AF_BLK_RST
);
545 rvu_block_reset(rvu
, BLKADDR_CPT1
, CPT_AF_BLK_RST
);
546 rvu_block_reset(rvu
, BLKADDR_NDC_NIX0_RX
, NDC_AF_BLK_RST
);
547 rvu_block_reset(rvu
, BLKADDR_NDC_NIX0_TX
, NDC_AF_BLK_RST
);
548 rvu_block_reset(rvu
, BLKADDR_NDC_NIX1_RX
, NDC_AF_BLK_RST
);
549 rvu_block_reset(rvu
, BLKADDR_NDC_NIX1_TX
, NDC_AF_BLK_RST
);
550 rvu_block_reset(rvu
, BLKADDR_NDC_NPA0
, NDC_AF_BLK_RST
);
553 static void rvu_scan_block(struct rvu
*rvu
, struct rvu_block
*block
)
555 struct rvu_pfvf
*pfvf
;
559 for (lf
= 0; lf
< block
->lf
.max
; lf
++) {
560 cfg
= rvu_read64(rvu
, block
->addr
,
561 block
->lfcfg_reg
| (lf
<< block
->lfshift
));
562 if (!(cfg
& BIT_ULL(63)))
565 /* Set this resource as being used */
566 __set_bit(lf
, block
->lf
.bmap
);
568 /* Get, to whom this LF is attached */
569 pfvf
= rvu_get_pfvf(rvu
, (cfg
>> 8) & 0xFFFF);
570 rvu_update_rsrc_map(rvu
, pfvf
, block
,
571 (cfg
>> 8) & 0xFFFF, lf
, true);
573 /* Set start MSIX vector for this LF within this PF/VF */
574 rvu_set_msix_offset(rvu
, pfvf
, block
, lf
);
578 static void rvu_check_min_msix_vec(struct rvu
*rvu
, int nvecs
, int pf
, int vf
)
587 "PF%d:VF%d is configured with zero msix vectors, %d\n",
594 min_vecs
= RVU_AF_INT_VEC_CNT
+ RVU_PF_INT_VEC_CNT
;
596 min_vecs
= RVU_PF_INT_VEC_CNT
;
598 if (!(nvecs
< min_vecs
))
601 "PF%d is configured with too few vectors, %d, min is %d\n",
602 pf
, nvecs
, min_vecs
);
605 static int rvu_setup_msix_resources(struct rvu
*rvu
)
607 struct rvu_hwinfo
*hw
= rvu
->hw
;
608 int pf
, vf
, numvfs
, hwvf
, err
;
609 int nvecs
, offset
, max_msix
;
610 struct rvu_pfvf
*pfvf
;
614 for (pf
= 0; pf
< hw
->total_pfs
; pf
++) {
615 cfg
= rvu_read64(rvu
, BLKADDR_RVUM
, RVU_PRIV_PFX_CFG(pf
));
616 /* If PF is not enabled, nothing to do */
617 if (!((cfg
>> 20) & 0x01))
620 rvu_get_pf_numvfs(rvu
, pf
, &numvfs
, &hwvf
);
623 /* Get num of MSIX vectors attached to this PF */
624 cfg
= rvu_read64(rvu
, BLKADDR_RVUM
, RVU_PRIV_PFX_MSIX_CFG(pf
));
625 pfvf
->msix
.max
= ((cfg
>> 32) & 0xFFF) + 1;
626 rvu_check_min_msix_vec(rvu
, pfvf
->msix
.max
, pf
, 0);
628 /* Alloc msix bitmap for this PF */
629 err
= rvu_alloc_bitmap(&pfvf
->msix
);
633 /* Allocate memory for MSIX vector to RVU block LF mapping */
634 pfvf
->msix_lfmap
= devm_kcalloc(rvu
->dev
, pfvf
->msix
.max
,
635 sizeof(u16
), GFP_KERNEL
);
636 if (!pfvf
->msix_lfmap
)
639 /* For PF0 (AF) firmware will set msix vector offsets for
640 * AF, block AF and PF0_INT vectors, so jump to VFs.
645 /* Set MSIX offset for PF's 'RVU_PF_INT_VEC' vectors.
646 * These are allocated on driver init and never freed,
647 * so no need to set 'msix_lfmap' for these.
649 cfg
= rvu_read64(rvu
, BLKADDR_RVUM
, RVU_PRIV_PFX_INT_CFG(pf
));
650 nvecs
= (cfg
>> 12) & 0xFF;
652 offset
= rvu_alloc_rsrc_contig(&pfvf
->msix
, nvecs
);
653 rvu_write64(rvu
, BLKADDR_RVUM
,
654 RVU_PRIV_PFX_INT_CFG(pf
), cfg
| offset
);
656 /* Alloc msix bitmap for VFs */
657 for (vf
= 0; vf
< numvfs
; vf
++) {
658 pfvf
= &rvu
->hwvf
[hwvf
+ vf
];
659 /* Get num of MSIX vectors attached to this VF */
660 cfg
= rvu_read64(rvu
, BLKADDR_RVUM
,
661 RVU_PRIV_PFX_MSIX_CFG(pf
));
662 pfvf
->msix
.max
= (cfg
& 0xFFF) + 1;
663 rvu_check_min_msix_vec(rvu
, pfvf
->msix
.max
, pf
, vf
+ 1);
665 /* Alloc msix bitmap for this VF */
666 err
= rvu_alloc_bitmap(&pfvf
->msix
);
671 devm_kcalloc(rvu
->dev
, pfvf
->msix
.max
,
672 sizeof(u16
), GFP_KERNEL
);
673 if (!pfvf
->msix_lfmap
)
676 /* Set MSIX offset for HWVF's 'RVU_VF_INT_VEC' vectors.
677 * These are allocated on driver init and never freed,
678 * so no need to set 'msix_lfmap' for these.
680 cfg
= rvu_read64(rvu
, BLKADDR_RVUM
,
681 RVU_PRIV_HWVFX_INT_CFG(hwvf
+ vf
));
682 nvecs
= (cfg
>> 12) & 0xFF;
684 offset
= rvu_alloc_rsrc_contig(&pfvf
->msix
, nvecs
);
685 rvu_write64(rvu
, BLKADDR_RVUM
,
686 RVU_PRIV_HWVFX_INT_CFG(hwvf
+ vf
),
691 /* HW interprets RVU_AF_MSIXTR_BASE address as an IOVA, hence
692 * create an IOMMU mapping for the physical address configured by
693 * firmware and reconfig RVU_AF_MSIXTR_BASE with IOVA.
695 cfg
= rvu_read64(rvu
, BLKADDR_RVUM
, RVU_PRIV_CONST
);
696 max_msix
= cfg
& 0xFFFFF;
697 if (rvu
->fwdata
&& rvu
->fwdata
->msixtr_base
)
698 phy_addr
= rvu
->fwdata
->msixtr_base
;
700 phy_addr
= rvu_read64(rvu
, BLKADDR_RVUM
, RVU_AF_MSIXTR_BASE
);
702 iova
= dma_map_resource(rvu
->dev
, phy_addr
,
703 max_msix
* PCI_MSIX_ENTRY_SIZE
,
704 DMA_BIDIRECTIONAL
, 0);
706 if (dma_mapping_error(rvu
->dev
, iova
))
709 rvu_write64(rvu
, BLKADDR_RVUM
, RVU_AF_MSIXTR_BASE
, (u64
)iova
);
710 rvu
->msix_base_iova
= iova
;
711 rvu
->msixtr_base_phy
= phy_addr
;
716 static void rvu_reset_msix(struct rvu
*rvu
)
718 /* Restore msixtr base register */
719 rvu_write64(rvu
, BLKADDR_RVUM
, RVU_AF_MSIXTR_BASE
,
720 rvu
->msixtr_base_phy
);
723 static void rvu_free_hw_resources(struct rvu
*rvu
)
725 struct rvu_hwinfo
*hw
= rvu
->hw
;
726 struct rvu_block
*block
;
727 struct rvu_pfvf
*pfvf
;
731 rvu_npa_freemem(rvu
);
732 rvu_npc_freemem(rvu
);
733 rvu_nix_freemem(rvu
);
735 /* Free block LF bitmaps */
736 for (id
= 0; id
< BLK_COUNT
; id
++) {
737 block
= &hw
->block
[id
];
738 kfree(block
->lf
.bmap
);
741 /* Free MSIX bitmaps */
742 for (id
= 0; id
< hw
->total_pfs
; id
++) {
744 kfree(pfvf
->msix
.bmap
);
747 for (id
= 0; id
< hw
->total_vfs
; id
++) {
748 pfvf
= &rvu
->hwvf
[id
];
749 kfree(pfvf
->msix
.bmap
);
752 /* Unmap MSIX vector base IOVA mapping */
753 if (!rvu
->msix_base_iova
)
755 cfg
= rvu_read64(rvu
, BLKADDR_RVUM
, RVU_PRIV_CONST
);
756 max_msix
= cfg
& 0xFFFFF;
757 dma_unmap_resource(rvu
->dev
, rvu
->msix_base_iova
,
758 max_msix
* PCI_MSIX_ENTRY_SIZE
,
759 DMA_BIDIRECTIONAL
, 0);
762 mutex_destroy(&rvu
->rsrc_lock
);
765 static void rvu_setup_pfvf_macaddress(struct rvu
*rvu
)
767 struct rvu_hwinfo
*hw
= rvu
->hw
;
768 int pf
, vf
, numvfs
, hwvf
;
769 struct rvu_pfvf
*pfvf
;
772 for (pf
= 0; pf
< hw
->total_pfs
; pf
++) {
773 /* For PF0(AF), Assign MAC address to only VFs (LBKVFs) */
777 if (!is_pf_cgxmapped(rvu
, pf
))
779 /* Assign MAC address to PF */
781 if (rvu
->fwdata
&& pf
< PF_MACNUM_MAX
) {
782 mac
= &rvu
->fwdata
->pf_macs
[pf
];
784 u64_to_ether_addr(*mac
, pfvf
->mac_addr
);
786 eth_random_addr(pfvf
->mac_addr
);
788 eth_random_addr(pfvf
->mac_addr
);
790 ether_addr_copy(pfvf
->default_mac
, pfvf
->mac_addr
);
793 /* Assign MAC address to VFs*/
794 rvu_get_pf_numvfs(rvu
, pf
, &numvfs
, &hwvf
);
795 for (vf
= 0; vf
< numvfs
; vf
++, hwvf
++) {
796 pfvf
= &rvu
->hwvf
[hwvf
];
797 if (rvu
->fwdata
&& hwvf
< VF_MACNUM_MAX
) {
798 mac
= &rvu
->fwdata
->vf_macs
[hwvf
];
800 u64_to_ether_addr(*mac
, pfvf
->mac_addr
);
802 eth_random_addr(pfvf
->mac_addr
);
804 eth_random_addr(pfvf
->mac_addr
);
806 ether_addr_copy(pfvf
->default_mac
, pfvf
->mac_addr
);
811 static int rvu_fwdata_init(struct rvu
*rvu
)
816 /* Get firmware data base address */
817 err
= cgx_get_fwdata_base(&fwdbase
);
820 rvu
->fwdata
= ioremap_wc(fwdbase
, sizeof(struct rvu_fwdata
));
823 if (!is_rvu_fwdata_valid(rvu
)) {
825 "Mismatch in 'fwdata' struct btw kernel and firmware\n");
826 iounmap(rvu
->fwdata
);
832 dev_info(rvu
->dev
, "Unable to fetch 'fwdata' from firmware\n");
836 static void rvu_fwdata_exit(struct rvu
*rvu
)
839 iounmap(rvu
->fwdata
);
842 static int rvu_setup_nix_hw_resource(struct rvu
*rvu
, int blkaddr
)
844 struct rvu_hwinfo
*hw
= rvu
->hw
;
845 struct rvu_block
*block
;
849 /* Init NIX LF's bitmap */
850 block
= &hw
->block
[blkaddr
];
851 if (!block
->implemented
)
853 blkid
= (blkaddr
== BLKADDR_NIX0
) ? 0 : 1;
854 cfg
= rvu_read64(rvu
, blkaddr
, NIX_AF_CONST2
);
855 block
->lf
.max
= cfg
& 0xFFF;
856 block
->addr
= blkaddr
;
857 block
->type
= BLKTYPE_NIX
;
859 block
->lookup_reg
= NIX_AF_RVU_LF_CFG_DEBUG
;
860 block
->pf_lfcnt_reg
= RVU_PRIV_PFX_NIXX_CFG(blkid
);
861 block
->vf_lfcnt_reg
= RVU_PRIV_HWVFX_NIXX_CFG(blkid
);
862 block
->lfcfg_reg
= NIX_PRIV_LFX_CFG
;
863 block
->msixcfg_reg
= NIX_PRIV_LFX_INT_CFG
;
864 block
->lfreset_reg
= NIX_AF_LF_RST
;
866 sprintf(block
->name
, "NIX%d", blkid
);
867 rvu
->nix_blkaddr
[blkid
] = blkaddr
;
868 return rvu_alloc_bitmap(&block
->lf
);
871 static int rvu_setup_cpt_hw_resource(struct rvu
*rvu
, int blkaddr
)
873 struct rvu_hwinfo
*hw
= rvu
->hw
;
874 struct rvu_block
*block
;
878 /* Init CPT LF's bitmap */
879 block
= &hw
->block
[blkaddr
];
880 if (!block
->implemented
)
882 blkid
= (blkaddr
== BLKADDR_CPT0
) ? 0 : 1;
883 cfg
= rvu_read64(rvu
, blkaddr
, CPT_AF_CONSTANTS0
);
884 block
->lf
.max
= cfg
& 0xFF;
885 block
->addr
= blkaddr
;
886 block
->type
= BLKTYPE_CPT
;
887 block
->multislot
= true;
889 block
->lookup_reg
= CPT_AF_RVU_LF_CFG_DEBUG
;
890 block
->pf_lfcnt_reg
= RVU_PRIV_PFX_CPTX_CFG(blkid
);
891 block
->vf_lfcnt_reg
= RVU_PRIV_HWVFX_CPTX_CFG(blkid
);
892 block
->lfcfg_reg
= CPT_PRIV_LFX_CFG
;
893 block
->msixcfg_reg
= CPT_PRIV_LFX_INT_CFG
;
894 block
->lfreset_reg
= CPT_AF_LF_RST
;
896 sprintf(block
->name
, "CPT%d", blkid
);
897 return rvu_alloc_bitmap(&block
->lf
);
900 static void rvu_get_lbk_bufsize(struct rvu
*rvu
)
902 struct pci_dev
*pdev
= NULL
;
906 pdev
= pci_get_device(PCI_VENDOR_ID_CAVIUM
,
907 PCI_DEVID_OCTEONTX2_LBK
, pdev
);
911 base
= pci_ioremap_bar(pdev
, 0);
915 lbk_const
= readq(base
+ LBK_CONST
);
917 /* cache fifo size */
918 rvu
->hw
->lbk_bufsize
= FIELD_GET(LBK_CONST_BUF_SIZE
, lbk_const
);
925 static int rvu_setup_hw_resources(struct rvu
*rvu
)
927 struct rvu_hwinfo
*hw
= rvu
->hw
;
928 struct rvu_block
*block
;
932 /* Get HW supported max RVU PF & VF count */
933 cfg
= rvu_read64(rvu
, BLKADDR_RVUM
, RVU_PRIV_CONST
);
934 hw
->total_pfs
= (cfg
>> 32) & 0xFF;
935 hw
->total_vfs
= (cfg
>> 20) & 0xFFF;
936 hw
->max_vfs_per_pf
= (cfg
>> 40) & 0xFF;
938 /* Init NPA LF's bitmap */
939 block
= &hw
->block
[BLKADDR_NPA
];
940 if (!block
->implemented
)
942 cfg
= rvu_read64(rvu
, BLKADDR_NPA
, NPA_AF_CONST
);
943 block
->lf
.max
= (cfg
>> 16) & 0xFFF;
944 block
->addr
= BLKADDR_NPA
;
945 block
->type
= BLKTYPE_NPA
;
947 block
->lookup_reg
= NPA_AF_RVU_LF_CFG_DEBUG
;
948 block
->pf_lfcnt_reg
= RVU_PRIV_PFX_NPA_CFG
;
949 block
->vf_lfcnt_reg
= RVU_PRIV_HWVFX_NPA_CFG
;
950 block
->lfcfg_reg
= NPA_PRIV_LFX_CFG
;
951 block
->msixcfg_reg
= NPA_PRIV_LFX_INT_CFG
;
952 block
->lfreset_reg
= NPA_AF_LF_RST
;
954 sprintf(block
->name
, "NPA");
955 err
= rvu_alloc_bitmap(&block
->lf
);
958 "%s: Failed to allocate NPA LF bitmap\n", __func__
);
963 err
= rvu_setup_nix_hw_resource(rvu
, BLKADDR_NIX0
);
966 "%s: Failed to allocate NIX0 LFs bitmap\n", __func__
);
970 err
= rvu_setup_nix_hw_resource(rvu
, BLKADDR_NIX1
);
973 "%s: Failed to allocate NIX1 LFs bitmap\n", __func__
);
977 /* Init SSO group's bitmap */
978 block
= &hw
->block
[BLKADDR_SSO
];
979 if (!block
->implemented
)
981 cfg
= rvu_read64(rvu
, BLKADDR_SSO
, SSO_AF_CONST
);
982 block
->lf
.max
= cfg
& 0xFFFF;
983 block
->addr
= BLKADDR_SSO
;
984 block
->type
= BLKTYPE_SSO
;
985 block
->multislot
= true;
987 block
->lookup_reg
= SSO_AF_RVU_LF_CFG_DEBUG
;
988 block
->pf_lfcnt_reg
= RVU_PRIV_PFX_SSO_CFG
;
989 block
->vf_lfcnt_reg
= RVU_PRIV_HWVFX_SSO_CFG
;
990 block
->lfcfg_reg
= SSO_PRIV_LFX_HWGRP_CFG
;
991 block
->msixcfg_reg
= SSO_PRIV_LFX_HWGRP_INT_CFG
;
992 block
->lfreset_reg
= SSO_AF_LF_HWGRP_RST
;
994 sprintf(block
->name
, "SSO GROUP");
995 err
= rvu_alloc_bitmap(&block
->lf
);
998 "%s: Failed to allocate SSO LF bitmap\n", __func__
);
1003 /* Init SSO workslot's bitmap */
1004 block
= &hw
->block
[BLKADDR_SSOW
];
1005 if (!block
->implemented
)
1007 block
->lf
.max
= (cfg
>> 56) & 0xFF;
1008 block
->addr
= BLKADDR_SSOW
;
1009 block
->type
= BLKTYPE_SSOW
;
1010 block
->multislot
= true;
1012 block
->lookup_reg
= SSOW_AF_RVU_LF_HWS_CFG_DEBUG
;
1013 block
->pf_lfcnt_reg
= RVU_PRIV_PFX_SSOW_CFG
;
1014 block
->vf_lfcnt_reg
= RVU_PRIV_HWVFX_SSOW_CFG
;
1015 block
->lfcfg_reg
= SSOW_PRIV_LFX_HWS_CFG
;
1016 block
->msixcfg_reg
= SSOW_PRIV_LFX_HWS_INT_CFG
;
1017 block
->lfreset_reg
= SSOW_AF_LF_HWS_RST
;
1019 sprintf(block
->name
, "SSOWS");
1020 err
= rvu_alloc_bitmap(&block
->lf
);
1023 "%s: Failed to allocate SSOW LF bitmap\n", __func__
);
1028 /* Init TIM LF's bitmap */
1029 block
= &hw
->block
[BLKADDR_TIM
];
1030 if (!block
->implemented
)
1032 cfg
= rvu_read64(rvu
, BLKADDR_TIM
, TIM_AF_CONST
);
1033 block
->lf
.max
= cfg
& 0xFFFF;
1034 block
->addr
= BLKADDR_TIM
;
1035 block
->type
= BLKTYPE_TIM
;
1036 block
->multislot
= true;
1038 block
->lookup_reg
= TIM_AF_RVU_LF_CFG_DEBUG
;
1039 block
->pf_lfcnt_reg
= RVU_PRIV_PFX_TIM_CFG
;
1040 block
->vf_lfcnt_reg
= RVU_PRIV_HWVFX_TIM_CFG
;
1041 block
->lfcfg_reg
= TIM_PRIV_LFX_CFG
;
1042 block
->msixcfg_reg
= TIM_PRIV_LFX_INT_CFG
;
1043 block
->lfreset_reg
= TIM_AF_LF_RST
;
1045 sprintf(block
->name
, "TIM");
1046 err
= rvu_alloc_bitmap(&block
->lf
);
1049 "%s: Failed to allocate TIM LF bitmap\n", __func__
);
1054 err
= rvu_setup_cpt_hw_resource(rvu
, BLKADDR_CPT0
);
1057 "%s: Failed to allocate CPT0 LF bitmap\n", __func__
);
1060 err
= rvu_setup_cpt_hw_resource(rvu
, BLKADDR_CPT1
);
1063 "%s: Failed to allocate CPT1 LF bitmap\n", __func__
);
1067 /* Allocate memory for PFVF data */
1068 rvu
->pf
= devm_kcalloc(rvu
->dev
, hw
->total_pfs
,
1069 sizeof(struct rvu_pfvf
), GFP_KERNEL
);
1072 "%s: Failed to allocate memory for PF's rvu_pfvf struct\n", __func__
);
1076 rvu
->hwvf
= devm_kcalloc(rvu
->dev
, hw
->total_vfs
,
1077 sizeof(struct rvu_pfvf
), GFP_KERNEL
);
1080 "%s: Failed to allocate memory for VF's rvu_pfvf struct\n", __func__
);
1084 mutex_init(&rvu
->rsrc_lock
);
1086 rvu_fwdata_init(rvu
);
1088 err
= rvu_setup_msix_resources(rvu
);
1091 "%s: Failed to setup MSIX resources\n", __func__
);
1095 for (blkid
= 0; blkid
< BLK_COUNT
; blkid
++) {
1096 block
= &hw
->block
[blkid
];
1097 if (!block
->lf
.bmap
)
1100 /* Allocate memory for block LF/slot to pcifunc mapping info */
1101 block
->fn_map
= devm_kcalloc(rvu
->dev
, block
->lf
.max
,
1102 sizeof(u16
), GFP_KERNEL
);
1103 if (!block
->fn_map
) {
1108 /* Scan all blocks to check if low level firmware has
1109 * already provisioned any of the resources to a PF/VF.
1111 rvu_scan_block(rvu
, block
);
1114 err
= rvu_set_channels_base(rvu
);
1118 err
= rvu_npc_init(rvu
);
1120 dev_err(rvu
->dev
, "%s: Failed to initialize npc\n", __func__
);
1124 err
= rvu_cgx_init(rvu
);
1126 dev_err(rvu
->dev
, "%s: Failed to initialize cgx\n", __func__
);
1130 err
= rvu_npc_exact_init(rvu
);
1132 dev_err(rvu
->dev
, "failed to initialize exact match table\n");
1136 /* Assign MACs for CGX mapped functions */
1137 rvu_setup_pfvf_macaddress(rvu
);
1139 err
= rvu_npa_init(rvu
);
1141 dev_err(rvu
->dev
, "%s: Failed to initialize npa\n", __func__
);
1145 rvu_get_lbk_bufsize(rvu
);
1147 err
= rvu_nix_init(rvu
);
1149 dev_err(rvu
->dev
, "%s: Failed to initialize nix\n", __func__
);
1153 err
= rvu_sdp_init(rvu
);
1155 dev_err(rvu
->dev
, "%s: Failed to initialize sdp\n", __func__
);
1159 rvu_program_channels(rvu
);
1161 err
= rvu_mcs_init(rvu
);
1163 dev_err(rvu
->dev
, "%s: Failed to initialize mcs\n", __func__
);
1167 err
= rvu_cpt_init(rvu
);
1169 dev_err(rvu
->dev
, "%s: Failed to initialize cpt\n", __func__
);
1178 rvu_nix_freemem(rvu
);
1180 rvu_npa_freemem(rvu
);
1184 rvu_npc_freemem(rvu
);
1185 rvu_fwdata_exit(rvu
);
1187 rvu_reset_msix(rvu
);
1191 /* NPA and NIX admin queue APIs */
1192 void rvu_aq_free(struct rvu
*rvu
, struct admin_queue
*aq
)
1197 qmem_free(rvu
->dev
, aq
->inst
);
1198 qmem_free(rvu
->dev
, aq
->res
);
1199 devm_kfree(rvu
->dev
, aq
);
1202 int rvu_aq_alloc(struct rvu
*rvu
, struct admin_queue
**ad_queue
,
1203 int qsize
, int inst_size
, int res_size
)
1205 struct admin_queue
*aq
;
1208 *ad_queue
= devm_kzalloc(rvu
->dev
, sizeof(*aq
), GFP_KERNEL
);
1213 /* Alloc memory for instructions i.e AQ */
1214 err
= qmem_alloc(rvu
->dev
, &aq
->inst
, qsize
, inst_size
);
1216 devm_kfree(rvu
->dev
, aq
);
1220 /* Alloc memory for results */
1221 err
= qmem_alloc(rvu
->dev
, &aq
->res
, qsize
, res_size
);
1223 rvu_aq_free(rvu
, aq
);
1227 spin_lock_init(&aq
->lock
);
1231 int rvu_mbox_handler_ready(struct rvu
*rvu
, struct msg_req
*req
,
1232 struct ready_msg_rsp
*rsp
)
1235 rsp
->rclk_freq
= rvu
->fwdata
->rclk
;
1236 rsp
->sclk_freq
= rvu
->fwdata
->sclk
;
1241 /* Get current count of a RVU block's LF/slots
1242 * provisioned to a given RVU func.
1244 u16
rvu_get_rsrc_mapcount(struct rvu_pfvf
*pfvf
, int blkaddr
)
1248 return pfvf
->npalf
? 1 : 0;
1251 return pfvf
->nixlf
? 1 : 0;
1257 return pfvf
->timlfs
;
1259 return pfvf
->cptlfs
;
1261 return pfvf
->cpt1_lfs
;
1266 /* Return true if LFs of block type are attached to pcifunc */
1267 static bool is_blktype_attached(struct rvu_pfvf
*pfvf
, int blktype
)
1271 return pfvf
->npalf
? 1 : 0;
1273 return pfvf
->nixlf
? 1 : 0;
1277 return !!pfvf
->ssow
;
1279 return !!pfvf
->timlfs
;
1281 return pfvf
->cptlfs
|| pfvf
->cpt1_lfs
;
1287 bool is_pffunc_map_valid(struct rvu
*rvu
, u16 pcifunc
, int blktype
)
1289 struct rvu_pfvf
*pfvf
;
1291 if (!is_pf_func_valid(rvu
, pcifunc
))
1294 pfvf
= rvu_get_pfvf(rvu
, pcifunc
);
1296 /* Check if this PFFUNC has a LF of type blktype attached */
1297 if (!is_blktype_attached(pfvf
, blktype
))
1303 static int rvu_lookup_rsrc(struct rvu
*rvu
, struct rvu_block
*block
,
1304 int pcifunc
, int slot
)
1308 val
= ((u64
)pcifunc
<< 24) | (slot
<< 16) | (1ULL << 13);
1309 rvu_write64(rvu
, block
->addr
, block
->lookup_reg
, val
);
1310 /* Wait for the lookup to finish */
1311 /* TODO: put some timeout here */
1312 while (rvu_read64(rvu
, block
->addr
, block
->lookup_reg
) & (1ULL << 13))
1315 val
= rvu_read64(rvu
, block
->addr
, block
->lookup_reg
);
1317 /* Check LF valid bit */
1318 if (!(val
& (1ULL << 12)))
1321 return (val
& 0xFFF);
1324 int rvu_get_blkaddr_from_slot(struct rvu
*rvu
, int blktype
, u16 pcifunc
,
1325 u16 global_slot
, u16
*slot_in_block
)
1327 struct rvu_pfvf
*pfvf
= rvu_get_pfvf(rvu
, pcifunc
);
1328 int numlfs
, total_lfs
= 0, nr_blocks
= 0;
1329 int i
, num_blkaddr
[BLK_COUNT
] = { 0 };
1330 struct rvu_block
*block
;
1334 if (!is_blktype_attached(pfvf
, blktype
))
1337 /* Get all the block addresses from which LFs are attached to
1338 * the given pcifunc in num_blkaddr[].
1340 for (blkaddr
= BLKADDR_RVUM
; blkaddr
< BLK_COUNT
; blkaddr
++) {
1341 block
= &rvu
->hw
->block
[blkaddr
];
1342 if (block
->type
!= blktype
)
1344 if (!is_block_implemented(rvu
->hw
, blkaddr
))
1347 numlfs
= rvu_get_rsrc_mapcount(pfvf
, blkaddr
);
1349 total_lfs
+= numlfs
;
1350 num_blkaddr
[nr_blocks
] = blkaddr
;
1355 if (global_slot
>= total_lfs
)
1358 /* Based on the given global slot number retrieve the
1359 * correct block address out of all attached block
1360 * addresses and slot number in that block.
1364 for (i
= 0; i
< nr_blocks
; i
++) {
1365 numlfs
= rvu_get_rsrc_mapcount(pfvf
, num_blkaddr
[i
]);
1366 total_lfs
+= numlfs
;
1367 if (global_slot
< total_lfs
) {
1368 blkaddr
= num_blkaddr
[i
];
1369 start_slot
= total_lfs
- numlfs
;
1370 *slot_in_block
= global_slot
- start_slot
;
1378 static void rvu_detach_block(struct rvu
*rvu
, int pcifunc
, int blktype
)
1380 struct rvu_pfvf
*pfvf
= rvu_get_pfvf(rvu
, pcifunc
);
1381 struct rvu_hwinfo
*hw
= rvu
->hw
;
1382 struct rvu_block
*block
;
1383 int slot
, lf
, num_lfs
;
1386 blkaddr
= rvu_get_blkaddr(rvu
, blktype
, pcifunc
);
1390 if (blktype
== BLKTYPE_NIX
)
1391 rvu_nix_reset_mac(pfvf
, pcifunc
);
1393 block
= &hw
->block
[blkaddr
];
1395 num_lfs
= rvu_get_rsrc_mapcount(pfvf
, block
->addr
);
1399 for (slot
= 0; slot
< num_lfs
; slot
++) {
1400 lf
= rvu_lookup_rsrc(rvu
, block
, pcifunc
, slot
);
1401 if (lf
< 0) /* This should never happen */
1404 /* Disable the LF */
1405 rvu_write64(rvu
, blkaddr
, block
->lfcfg_reg
|
1406 (lf
<< block
->lfshift
), 0x00ULL
);
1408 /* Update SW maintained mapping info as well */
1409 rvu_update_rsrc_map(rvu
, pfvf
, block
,
1410 pcifunc
, lf
, false);
1412 /* Free the resource */
1413 rvu_free_rsrc(&block
->lf
, lf
);
1415 /* Clear MSIX vector offset for this LF */
1416 rvu_clear_msix_offset(rvu
, pfvf
, block
, lf
);
1420 static int rvu_detach_rsrcs(struct rvu
*rvu
, struct rsrc_detach
*detach
,
1423 struct rvu_hwinfo
*hw
= rvu
->hw
;
1424 bool detach_all
= true;
1425 struct rvu_block
*block
;
1428 mutex_lock(&rvu
->rsrc_lock
);
1430 /* Check for partial resource detach */
1431 if (detach
&& detach
->partial
)
1434 /* Check for RVU block's LFs attached to this func,
1435 * if so, detach them.
1437 for (blkid
= 0; blkid
< BLK_COUNT
; blkid
++) {
1438 block
= &hw
->block
[blkid
];
1439 if (!block
->lf
.bmap
)
1441 if (!detach_all
&& detach
) {
1442 if (blkid
== BLKADDR_NPA
&& !detach
->npalf
)
1444 else if ((blkid
== BLKADDR_NIX0
) && !detach
->nixlf
)
1446 else if ((blkid
== BLKADDR_NIX1
) && !detach
->nixlf
)
1448 else if ((blkid
== BLKADDR_SSO
) && !detach
->sso
)
1450 else if ((blkid
== BLKADDR_SSOW
) && !detach
->ssow
)
1452 else if ((blkid
== BLKADDR_TIM
) && !detach
->timlfs
)
1454 else if ((blkid
== BLKADDR_CPT0
) && !detach
->cptlfs
)
1456 else if ((blkid
== BLKADDR_CPT1
) && !detach
->cptlfs
)
1459 rvu_detach_block(rvu
, pcifunc
, block
->type
);
1462 mutex_unlock(&rvu
->rsrc_lock
);
1466 int rvu_mbox_handler_detach_resources(struct rvu
*rvu
,
1467 struct rsrc_detach
*detach
,
1468 struct msg_rsp
*rsp
)
1470 return rvu_detach_rsrcs(rvu
, detach
, detach
->hdr
.pcifunc
);
1473 int rvu_get_nix_blkaddr(struct rvu
*rvu
, u16 pcifunc
)
1475 struct rvu_pfvf
*pfvf
= rvu_get_pfvf(rvu
, pcifunc
);
1476 int blkaddr
= BLKADDR_NIX0
, vf
;
1477 struct rvu_pfvf
*pf
;
1479 pf
= rvu_get_pfvf(rvu
, pcifunc
& ~RVU_PFVF_FUNC_MASK
);
1481 /* All CGX mapped PFs are set with assigned NIX block during init */
1482 if (is_pf_cgxmapped(rvu
, rvu_get_pf(pcifunc
))) {
1483 blkaddr
= pf
->nix_blkaddr
;
1484 } else if (is_afvf(pcifunc
)) {
1486 /* Assign NIX based on VF number. All even numbered VFs get
1487 * NIX0 and odd numbered gets NIX1
1489 blkaddr
= (vf
& 1) ? BLKADDR_NIX1
: BLKADDR_NIX0
;
1490 /* NIX1 is not present on all silicons */
1491 if (!is_block_implemented(rvu
->hw
, BLKADDR_NIX1
))
1492 blkaddr
= BLKADDR_NIX0
;
1495 /* if SDP1 then the blkaddr is NIX1 */
1496 if (is_sdp_pfvf(pcifunc
) && pf
->sdp_info
->node_id
== 1)
1497 blkaddr
= BLKADDR_NIX1
;
1501 pfvf
->nix_blkaddr
= BLKADDR_NIX1
;
1502 pfvf
->nix_rx_intf
= NIX_INTFX_RX(1);
1503 pfvf
->nix_tx_intf
= NIX_INTFX_TX(1);
1507 pfvf
->nix_blkaddr
= BLKADDR_NIX0
;
1508 pfvf
->nix_rx_intf
= NIX_INTFX_RX(0);
1509 pfvf
->nix_tx_intf
= NIX_INTFX_TX(0);
1513 return pfvf
->nix_blkaddr
;
1516 static int rvu_get_attach_blkaddr(struct rvu
*rvu
, int blktype
,
1517 u16 pcifunc
, struct rsrc_attach
*attach
)
1523 blkaddr
= rvu_get_nix_blkaddr(rvu
, pcifunc
);
1526 if (attach
->hdr
.ver
< RVU_MULTI_BLK_VER
)
1527 return rvu_get_blkaddr(rvu
, blktype
, 0);
1528 blkaddr
= attach
->cpt_blkaddr
? attach
->cpt_blkaddr
:
1530 if (blkaddr
!= BLKADDR_CPT0
&& blkaddr
!= BLKADDR_CPT1
)
1534 return rvu_get_blkaddr(rvu
, blktype
, 0);
1537 if (is_block_implemented(rvu
->hw
, blkaddr
))
1543 static void rvu_attach_block(struct rvu
*rvu
, int pcifunc
, int blktype
,
1544 int num_lfs
, struct rsrc_attach
*attach
)
1546 struct rvu_pfvf
*pfvf
= rvu_get_pfvf(rvu
, pcifunc
);
1547 struct rvu_hwinfo
*hw
= rvu
->hw
;
1548 struct rvu_block
*block
;
1556 blkaddr
= rvu_get_attach_blkaddr(rvu
, blktype
, pcifunc
, attach
);
1560 block
= &hw
->block
[blkaddr
];
1561 if (!block
->lf
.bmap
)
1564 for (slot
= 0; slot
< num_lfs
; slot
++) {
1565 /* Allocate the resource */
1566 lf
= rvu_alloc_rsrc(&block
->lf
);
1570 cfg
= (1ULL << 63) | (pcifunc
<< 8) | slot
;
1571 rvu_write64(rvu
, blkaddr
, block
->lfcfg_reg
|
1572 (lf
<< block
->lfshift
), cfg
);
1573 rvu_update_rsrc_map(rvu
, pfvf
, block
,
1576 /* Set start MSIX vector for this LF within this PF/VF */
1577 rvu_set_msix_offset(rvu
, pfvf
, block
, lf
);
1581 static int rvu_check_rsrc_availability(struct rvu
*rvu
,
1582 struct rsrc_attach
*req
, u16 pcifunc
)
1584 struct rvu_pfvf
*pfvf
= rvu_get_pfvf(rvu
, pcifunc
);
1585 int free_lfs
, mappedlfs
, blkaddr
;
1586 struct rvu_hwinfo
*hw
= rvu
->hw
;
1587 struct rvu_block
*block
;
1589 /* Only one NPA LF can be attached */
1590 if (req
->npalf
&& !is_blktype_attached(pfvf
, BLKTYPE_NPA
)) {
1591 block
= &hw
->block
[BLKADDR_NPA
];
1592 free_lfs
= rvu_rsrc_free_count(&block
->lf
);
1595 } else if (req
->npalf
) {
1596 dev_err(&rvu
->pdev
->dev
,
1597 "Func 0x%x: Invalid req, already has NPA\n",
1602 /* Only one NIX LF can be attached */
1603 if (req
->nixlf
&& !is_blktype_attached(pfvf
, BLKTYPE_NIX
)) {
1604 blkaddr
= rvu_get_attach_blkaddr(rvu
, BLKTYPE_NIX
,
1608 block
= &hw
->block
[blkaddr
];
1609 free_lfs
= rvu_rsrc_free_count(&block
->lf
);
1612 } else if (req
->nixlf
) {
1613 dev_err(&rvu
->pdev
->dev
,
1614 "Func 0x%x: Invalid req, already has NIX\n",
1620 block
= &hw
->block
[BLKADDR_SSO
];
1621 /* Is request within limits ? */
1622 if (req
->sso
> block
->lf
.max
) {
1623 dev_err(&rvu
->pdev
->dev
,
1624 "Func 0x%x: Invalid SSO req, %d > max %d\n",
1625 pcifunc
, req
->sso
, block
->lf
.max
);
1628 mappedlfs
= rvu_get_rsrc_mapcount(pfvf
, block
->addr
);
1629 free_lfs
= rvu_rsrc_free_count(&block
->lf
);
1630 /* Check if additional resources are available */
1631 if (req
->sso
> mappedlfs
&&
1632 ((req
->sso
- mappedlfs
) > free_lfs
))
1637 block
= &hw
->block
[BLKADDR_SSOW
];
1638 if (req
->ssow
> block
->lf
.max
) {
1639 dev_err(&rvu
->pdev
->dev
,
1640 "Func 0x%x: Invalid SSOW req, %d > max %d\n",
1641 pcifunc
, req
->sso
, block
->lf
.max
);
1644 mappedlfs
= rvu_get_rsrc_mapcount(pfvf
, block
->addr
);
1645 free_lfs
= rvu_rsrc_free_count(&block
->lf
);
1646 if (req
->ssow
> mappedlfs
&&
1647 ((req
->ssow
- mappedlfs
) > free_lfs
))
1652 block
= &hw
->block
[BLKADDR_TIM
];
1653 if (req
->timlfs
> block
->lf
.max
) {
1654 dev_err(&rvu
->pdev
->dev
,
1655 "Func 0x%x: Invalid TIMLF req, %d > max %d\n",
1656 pcifunc
, req
->timlfs
, block
->lf
.max
);
1659 mappedlfs
= rvu_get_rsrc_mapcount(pfvf
, block
->addr
);
1660 free_lfs
= rvu_rsrc_free_count(&block
->lf
);
1661 if (req
->timlfs
> mappedlfs
&&
1662 ((req
->timlfs
- mappedlfs
) > free_lfs
))
1667 blkaddr
= rvu_get_attach_blkaddr(rvu
, BLKTYPE_CPT
,
1671 block
= &hw
->block
[blkaddr
];
1672 if (req
->cptlfs
> block
->lf
.max
) {
1673 dev_err(&rvu
->pdev
->dev
,
1674 "Func 0x%x: Invalid CPTLF req, %d > max %d\n",
1675 pcifunc
, req
->cptlfs
, block
->lf
.max
);
1678 mappedlfs
= rvu_get_rsrc_mapcount(pfvf
, block
->addr
);
1679 free_lfs
= rvu_rsrc_free_count(&block
->lf
);
1680 if (req
->cptlfs
> mappedlfs
&&
1681 ((req
->cptlfs
- mappedlfs
) > free_lfs
))
1688 dev_info(rvu
->dev
, "Request for %s failed\n", block
->name
);
1692 static bool rvu_attach_from_same_block(struct rvu
*rvu
, int blktype
,
1693 struct rsrc_attach
*attach
)
1695 int blkaddr
, num_lfs
;
1697 blkaddr
= rvu_get_attach_blkaddr(rvu
, blktype
,
1698 attach
->hdr
.pcifunc
, attach
);
1702 num_lfs
= rvu_get_rsrc_mapcount(rvu_get_pfvf(rvu
, attach
->hdr
.pcifunc
),
1704 /* Requester already has LFs from given block ? */
1708 int rvu_mbox_handler_attach_resources(struct rvu
*rvu
,
1709 struct rsrc_attach
*attach
,
1710 struct msg_rsp
*rsp
)
1712 u16 pcifunc
= attach
->hdr
.pcifunc
;
1715 /* If first request, detach all existing attached resources */
1716 if (!attach
->modify
)
1717 rvu_detach_rsrcs(rvu
, NULL
, pcifunc
);
1719 mutex_lock(&rvu
->rsrc_lock
);
1721 /* Check if the request can be accommodated */
1722 err
= rvu_check_rsrc_availability(rvu
, attach
, pcifunc
);
1726 /* Now attach the requested resources */
1728 rvu_attach_block(rvu
, pcifunc
, BLKTYPE_NPA
, 1, attach
);
1731 rvu_attach_block(rvu
, pcifunc
, BLKTYPE_NIX
, 1, attach
);
1734 /* RVU func doesn't know which exact LF or slot is attached
1735 * to it, it always sees as slot 0,1,2. So for a 'modify'
1736 * request, simply detach all existing attached LFs/slots
1737 * and attach a fresh.
1740 rvu_detach_block(rvu
, pcifunc
, BLKTYPE_SSO
);
1741 rvu_attach_block(rvu
, pcifunc
, BLKTYPE_SSO
,
1742 attach
->sso
, attach
);
1747 rvu_detach_block(rvu
, pcifunc
, BLKTYPE_SSOW
);
1748 rvu_attach_block(rvu
, pcifunc
, BLKTYPE_SSOW
,
1749 attach
->ssow
, attach
);
1752 if (attach
->timlfs
) {
1754 rvu_detach_block(rvu
, pcifunc
, BLKTYPE_TIM
);
1755 rvu_attach_block(rvu
, pcifunc
, BLKTYPE_TIM
,
1756 attach
->timlfs
, attach
);
1759 if (attach
->cptlfs
) {
1760 if (attach
->modify
&&
1761 rvu_attach_from_same_block(rvu
, BLKTYPE_CPT
, attach
))
1762 rvu_detach_block(rvu
, pcifunc
, BLKTYPE_CPT
);
1763 rvu_attach_block(rvu
, pcifunc
, BLKTYPE_CPT
,
1764 attach
->cptlfs
, attach
);
1768 mutex_unlock(&rvu
->rsrc_lock
);
1772 static u16
rvu_get_msix_offset(struct rvu
*rvu
, struct rvu_pfvf
*pfvf
,
1773 int blkaddr
, int lf
)
1778 return MSIX_VECTOR_INVALID
;
1780 for (vec
= 0; vec
< pfvf
->msix
.max
; vec
++) {
1781 if (pfvf
->msix_lfmap
[vec
] == MSIX_BLKLF(blkaddr
, lf
))
1784 return MSIX_VECTOR_INVALID
;
1787 static void rvu_set_msix_offset(struct rvu
*rvu
, struct rvu_pfvf
*pfvf
,
1788 struct rvu_block
*block
, int lf
)
1790 u16 nvecs
, vec
, offset
;
1793 cfg
= rvu_read64(rvu
, block
->addr
, block
->msixcfg_reg
|
1794 (lf
<< block
->lfshift
));
1795 nvecs
= (cfg
>> 12) & 0xFF;
1797 /* Check and alloc MSIX vectors, must be contiguous */
1798 if (!rvu_rsrc_check_contig(&pfvf
->msix
, nvecs
))
1801 offset
= rvu_alloc_rsrc_contig(&pfvf
->msix
, nvecs
);
1803 /* Config MSIX offset in LF */
1804 rvu_write64(rvu
, block
->addr
, block
->msixcfg_reg
|
1805 (lf
<< block
->lfshift
), (cfg
& ~0x7FFULL
) | offset
);
1807 /* Update the bitmap as well */
1808 for (vec
= 0; vec
< nvecs
; vec
++)
1809 pfvf
->msix_lfmap
[offset
+ vec
] = MSIX_BLKLF(block
->addr
, lf
);
1812 static void rvu_clear_msix_offset(struct rvu
*rvu
, struct rvu_pfvf
*pfvf
,
1813 struct rvu_block
*block
, int lf
)
1815 u16 nvecs
, vec
, offset
;
1818 cfg
= rvu_read64(rvu
, block
->addr
, block
->msixcfg_reg
|
1819 (lf
<< block
->lfshift
));
1820 nvecs
= (cfg
>> 12) & 0xFF;
1822 /* Clear MSIX offset in LF */
1823 rvu_write64(rvu
, block
->addr
, block
->msixcfg_reg
|
1824 (lf
<< block
->lfshift
), cfg
& ~0x7FFULL
);
1826 offset
= rvu_get_msix_offset(rvu
, pfvf
, block
->addr
, lf
);
1828 /* Update the mapping */
1829 for (vec
= 0; vec
< nvecs
; vec
++)
1830 pfvf
->msix_lfmap
[offset
+ vec
] = 0;
1832 /* Free the same in MSIX bitmap */
1833 rvu_free_rsrc_contig(&pfvf
->msix
, nvecs
, offset
);
1836 int rvu_mbox_handler_msix_offset(struct rvu
*rvu
, struct msg_req
*req
,
1837 struct msix_offset_rsp
*rsp
)
1839 struct rvu_hwinfo
*hw
= rvu
->hw
;
1840 u16 pcifunc
= req
->hdr
.pcifunc
;
1841 struct rvu_pfvf
*pfvf
;
1842 int lf
, slot
, blkaddr
;
1844 pfvf
= rvu_get_pfvf(rvu
, pcifunc
);
1845 if (!pfvf
->msix
.bmap
)
1848 /* Set MSIX offsets for each block's LFs attached to this PF/VF */
1849 lf
= rvu_get_lf(rvu
, &hw
->block
[BLKADDR_NPA
], pcifunc
, 0);
1850 rsp
->npa_msixoff
= rvu_get_msix_offset(rvu
, pfvf
, BLKADDR_NPA
, lf
);
1852 /* Get BLKADDR from which LFs are attached to pcifunc */
1853 blkaddr
= rvu_get_blkaddr(rvu
, BLKTYPE_NIX
, pcifunc
);
1855 rsp
->nix_msixoff
= MSIX_VECTOR_INVALID
;
1857 lf
= rvu_get_lf(rvu
, &hw
->block
[blkaddr
], pcifunc
, 0);
1858 rsp
->nix_msixoff
= rvu_get_msix_offset(rvu
, pfvf
, blkaddr
, lf
);
1861 rsp
->sso
= pfvf
->sso
;
1862 for (slot
= 0; slot
< rsp
->sso
; slot
++) {
1863 lf
= rvu_get_lf(rvu
, &hw
->block
[BLKADDR_SSO
], pcifunc
, slot
);
1864 rsp
->sso_msixoff
[slot
] =
1865 rvu_get_msix_offset(rvu
, pfvf
, BLKADDR_SSO
, lf
);
1868 rsp
->ssow
= pfvf
->ssow
;
1869 for (slot
= 0; slot
< rsp
->ssow
; slot
++) {
1870 lf
= rvu_get_lf(rvu
, &hw
->block
[BLKADDR_SSOW
], pcifunc
, slot
);
1871 rsp
->ssow_msixoff
[slot
] =
1872 rvu_get_msix_offset(rvu
, pfvf
, BLKADDR_SSOW
, lf
);
1875 rsp
->timlfs
= pfvf
->timlfs
;
1876 for (slot
= 0; slot
< rsp
->timlfs
; slot
++) {
1877 lf
= rvu_get_lf(rvu
, &hw
->block
[BLKADDR_TIM
], pcifunc
, slot
);
1878 rsp
->timlf_msixoff
[slot
] =
1879 rvu_get_msix_offset(rvu
, pfvf
, BLKADDR_TIM
, lf
);
1882 rsp
->cptlfs
= pfvf
->cptlfs
;
1883 for (slot
= 0; slot
< rsp
->cptlfs
; slot
++) {
1884 lf
= rvu_get_lf(rvu
, &hw
->block
[BLKADDR_CPT0
], pcifunc
, slot
);
1885 rsp
->cptlf_msixoff
[slot
] =
1886 rvu_get_msix_offset(rvu
, pfvf
, BLKADDR_CPT0
, lf
);
1889 rsp
->cpt1_lfs
= pfvf
->cpt1_lfs
;
1890 for (slot
= 0; slot
< rsp
->cpt1_lfs
; slot
++) {
1891 lf
= rvu_get_lf(rvu
, &hw
->block
[BLKADDR_CPT1
], pcifunc
, slot
);
1892 rsp
->cpt1_lf_msixoff
[slot
] =
1893 rvu_get_msix_offset(rvu
, pfvf
, BLKADDR_CPT1
, lf
);
1899 int rvu_mbox_handler_free_rsrc_cnt(struct rvu
*rvu
, struct msg_req
*req
,
1900 struct free_rsrcs_rsp
*rsp
)
1902 struct rvu_hwinfo
*hw
= rvu
->hw
;
1903 struct rvu_block
*block
;
1904 struct nix_txsch
*txsch
;
1905 struct nix_hw
*nix_hw
;
1907 mutex_lock(&rvu
->rsrc_lock
);
1909 block
= &hw
->block
[BLKADDR_NPA
];
1910 rsp
->npa
= rvu_rsrc_free_count(&block
->lf
);
1912 block
= &hw
->block
[BLKADDR_NIX0
];
1913 rsp
->nix
= rvu_rsrc_free_count(&block
->lf
);
1915 block
= &hw
->block
[BLKADDR_NIX1
];
1916 rsp
->nix1
= rvu_rsrc_free_count(&block
->lf
);
1918 block
= &hw
->block
[BLKADDR_SSO
];
1919 rsp
->sso
= rvu_rsrc_free_count(&block
->lf
);
1921 block
= &hw
->block
[BLKADDR_SSOW
];
1922 rsp
->ssow
= rvu_rsrc_free_count(&block
->lf
);
1924 block
= &hw
->block
[BLKADDR_TIM
];
1925 rsp
->tim
= rvu_rsrc_free_count(&block
->lf
);
1927 block
= &hw
->block
[BLKADDR_CPT0
];
1928 rsp
->cpt
= rvu_rsrc_free_count(&block
->lf
);
1930 block
= &hw
->block
[BLKADDR_CPT1
];
1931 rsp
->cpt1
= rvu_rsrc_free_count(&block
->lf
);
1933 if (rvu
->hw
->cap
.nix_fixed_txschq_mapping
) {
1934 rsp
->schq
[NIX_TXSCH_LVL_SMQ
] = 1;
1935 rsp
->schq
[NIX_TXSCH_LVL_TL4
] = 1;
1936 rsp
->schq
[NIX_TXSCH_LVL_TL3
] = 1;
1937 rsp
->schq
[NIX_TXSCH_LVL_TL2
] = 1;
1939 if (!is_block_implemented(rvu
->hw
, BLKADDR_NIX1
))
1941 rsp
->schq_nix1
[NIX_TXSCH_LVL_SMQ
] = 1;
1942 rsp
->schq_nix1
[NIX_TXSCH_LVL_TL4
] = 1;
1943 rsp
->schq_nix1
[NIX_TXSCH_LVL_TL3
] = 1;
1944 rsp
->schq_nix1
[NIX_TXSCH_LVL_TL2
] = 1;
1946 nix_hw
= get_nix_hw(hw
, BLKADDR_NIX0
);
1947 txsch
= &nix_hw
->txsch
[NIX_TXSCH_LVL_SMQ
];
1948 rsp
->schq
[NIX_TXSCH_LVL_SMQ
] =
1949 rvu_rsrc_free_count(&txsch
->schq
);
1951 txsch
= &nix_hw
->txsch
[NIX_TXSCH_LVL_TL4
];
1952 rsp
->schq
[NIX_TXSCH_LVL_TL4
] =
1953 rvu_rsrc_free_count(&txsch
->schq
);
1955 txsch
= &nix_hw
->txsch
[NIX_TXSCH_LVL_TL3
];
1956 rsp
->schq
[NIX_TXSCH_LVL_TL3
] =
1957 rvu_rsrc_free_count(&txsch
->schq
);
1959 txsch
= &nix_hw
->txsch
[NIX_TXSCH_LVL_TL2
];
1960 rsp
->schq
[NIX_TXSCH_LVL_TL2
] =
1961 rvu_rsrc_free_count(&txsch
->schq
);
1963 if (!is_block_implemented(rvu
->hw
, BLKADDR_NIX1
))
1966 nix_hw
= get_nix_hw(hw
, BLKADDR_NIX1
);
1967 txsch
= &nix_hw
->txsch
[NIX_TXSCH_LVL_SMQ
];
1968 rsp
->schq_nix1
[NIX_TXSCH_LVL_SMQ
] =
1969 rvu_rsrc_free_count(&txsch
->schq
);
1971 txsch
= &nix_hw
->txsch
[NIX_TXSCH_LVL_TL4
];
1972 rsp
->schq_nix1
[NIX_TXSCH_LVL_TL4
] =
1973 rvu_rsrc_free_count(&txsch
->schq
);
1975 txsch
= &nix_hw
->txsch
[NIX_TXSCH_LVL_TL3
];
1976 rsp
->schq_nix1
[NIX_TXSCH_LVL_TL3
] =
1977 rvu_rsrc_free_count(&txsch
->schq
);
1979 txsch
= &nix_hw
->txsch
[NIX_TXSCH_LVL_TL2
];
1980 rsp
->schq_nix1
[NIX_TXSCH_LVL_TL2
] =
1981 rvu_rsrc_free_count(&txsch
->schq
);
1984 rsp
->schq_nix1
[NIX_TXSCH_LVL_TL1
] = 1;
1986 rsp
->schq
[NIX_TXSCH_LVL_TL1
] = 1;
1987 mutex_unlock(&rvu
->rsrc_lock
);
1992 int rvu_mbox_handler_vf_flr(struct rvu
*rvu
, struct msg_req
*req
,
1993 struct msg_rsp
*rsp
)
1995 u16 pcifunc
= req
->hdr
.pcifunc
;
1999 vf
= pcifunc
& RVU_PFVF_FUNC_MASK
;
2000 cfg
= rvu_read64(rvu
, BLKADDR_RVUM
,
2001 RVU_PRIV_PFX_CFG(rvu_get_pf(pcifunc
)));
2002 numvfs
= (cfg
>> 12) & 0xFF;
2004 if (vf
&& vf
<= numvfs
)
2005 __rvu_flr_handler(rvu
, pcifunc
);
2007 return RVU_INVALID_VF_ID
;
2012 int rvu_mbox_handler_get_hw_cap(struct rvu
*rvu
, struct msg_req
*req
,
2013 struct get_hw_cap_rsp
*rsp
)
2015 struct rvu_hwinfo
*hw
= rvu
->hw
;
2017 rsp
->nix_fixed_txschq_mapping
= hw
->cap
.nix_fixed_txschq_mapping
;
2018 rsp
->nix_shaping
= hw
->cap
.nix_shaping
;
2019 rsp
->npc_hash_extract
= hw
->cap
.npc_hash_extract
;
2024 int rvu_mbox_handler_set_vf_perm(struct rvu
*rvu
, struct set_vf_perm
*req
,
2025 struct msg_rsp
*rsp
)
2027 struct rvu_hwinfo
*hw
= rvu
->hw
;
2028 u16 pcifunc
= req
->hdr
.pcifunc
;
2029 struct rvu_pfvf
*pfvf
;
2033 /* Only PF can add VF permissions */
2034 if ((pcifunc
& RVU_PFVF_FUNC_MASK
) || is_afvf(pcifunc
))
2037 target
= (pcifunc
& ~RVU_PFVF_FUNC_MASK
) | (req
->vf
+ 1);
2038 pfvf
= rvu_get_pfvf(rvu
, target
);
2040 if (req
->flags
& RESET_VF_PERM
) {
2041 pfvf
->flags
&= RVU_CLEAR_VF_PERM
;
2042 } else if (test_bit(PF_SET_VF_TRUSTED
, &pfvf
->flags
) ^
2043 (req
->flags
& VF_TRUSTED
)) {
2044 change_bit(PF_SET_VF_TRUSTED
, &pfvf
->flags
);
2045 /* disable multicast and promisc entries */
2046 if (!test_bit(PF_SET_VF_TRUSTED
, &pfvf
->flags
)) {
2047 blkaddr
= rvu_get_blkaddr(rvu
, BLKTYPE_NIX
, target
);
2050 nixlf
= rvu_get_lf(rvu
, &hw
->block
[blkaddr
],
2054 npc_enadis_default_mce_entry(rvu
, target
, nixlf
,
2055 NIXLF_ALLMULTI_ENTRY
,
2057 npc_enadis_default_mce_entry(rvu
, target
, nixlf
,
2058 NIXLF_PROMISC_ENTRY
,
2066 static int rvu_process_mbox_msg(struct otx2_mbox
*mbox
, int devid
,
2067 struct mbox_msghdr
*req
)
2069 struct rvu
*rvu
= pci_get_drvdata(mbox
->pdev
);
2071 /* Check if valid, if not reply with a invalid msg */
2072 if (req
->sig
!= OTX2_MBOX_REQ_SIG
)
2076 #define M(_name, _id, _fn_name, _req_type, _rsp_type) \
2078 struct _rsp_type *rsp; \
2081 rsp = (struct _rsp_type *)otx2_mbox_alloc_msg( \
2083 sizeof(struct _rsp_type)); \
2084 /* some handlers should complete even if reply */ \
2085 /* could not be allocated */ \
2087 _id != MBOX_MSG_DETACH_RESOURCES && \
2088 _id != MBOX_MSG_NIX_TXSCH_FREE && \
2089 _id != MBOX_MSG_VF_FLR) \
2092 rsp->hdr.id = _id; \
2093 rsp->hdr.sig = OTX2_MBOX_RSP_SIG; \
2094 rsp->hdr.pcifunc = req->pcifunc; \
2098 err = rvu_mbox_handler_ ## _fn_name(rvu, \
2099 (struct _req_type *)req, \
2102 rsp->hdr.rc = err; \
2104 trace_otx2_msg_process(mbox->pdev, _id, err); \
2105 return rsp ? err : -ENOMEM; \
2112 otx2_reply_invalid_msg(mbox
, devid
, req
->pcifunc
, req
->id
);
2117 static void __rvu_mbox_handler(struct rvu_work
*mwork
, int type
)
2119 struct rvu
*rvu
= mwork
->rvu
;
2120 int offset
, err
, id
, devid
;
2121 struct otx2_mbox_dev
*mdev
;
2122 struct mbox_hdr
*req_hdr
;
2123 struct mbox_msghdr
*msg
;
2124 struct mbox_wq_info
*mw
;
2125 struct otx2_mbox
*mbox
;
2129 mw
= &rvu
->afpf_wq_info
;
2132 mw
= &rvu
->afvf_wq_info
;
2138 devid
= mwork
- mw
->mbox_wrk
;
2140 mdev
= &mbox
->dev
[devid
];
2142 /* Process received mbox messages */
2143 req_hdr
= mdev
->mbase
+ mbox
->rx_start
;
2144 if (mw
->mbox_wrk
[devid
].num_msgs
== 0)
2147 offset
= mbox
->rx_start
+ ALIGN(sizeof(*req_hdr
), MBOX_MSG_ALIGN
);
2149 for (id
= 0; id
< mw
->mbox_wrk
[devid
].num_msgs
; id
++) {
2150 msg
= mdev
->mbase
+ offset
;
2152 /* Set which PF/VF sent this message based on mbox IRQ */
2156 ~(RVU_PFVF_PF_MASK
<< RVU_PFVF_PF_SHIFT
);
2157 msg
->pcifunc
|= (devid
<< RVU_PFVF_PF_SHIFT
);
2161 ~(RVU_PFVF_FUNC_MASK
<< RVU_PFVF_FUNC_SHIFT
);
2162 msg
->pcifunc
|= (devid
<< RVU_PFVF_FUNC_SHIFT
) + 1;
2166 err
= rvu_process_mbox_msg(mbox
, devid
, msg
);
2168 offset
= mbox
->rx_start
+ msg
->next_msgoff
;
2172 if (msg
->pcifunc
& RVU_PFVF_FUNC_MASK
)
2173 dev_warn(rvu
->dev
, "Error %d when processing message %s (0x%x) from PF%d:VF%d\n",
2174 err
, otx2_mbox_id2name(msg
->id
),
2175 msg
->id
, rvu_get_pf(msg
->pcifunc
),
2176 (msg
->pcifunc
& RVU_PFVF_FUNC_MASK
) - 1);
2178 dev_warn(rvu
->dev
, "Error %d when processing message %s (0x%x) from PF%d\n",
2179 err
, otx2_mbox_id2name(msg
->id
),
2182 mw
->mbox_wrk
[devid
].num_msgs
= 0;
2184 /* Send mbox responses to VF/PF */
2185 otx2_mbox_msg_send(mbox
, devid
);
2188 static inline void rvu_afpf_mbox_handler(struct work_struct
*work
)
2190 struct rvu_work
*mwork
= container_of(work
, struct rvu_work
, work
);
2192 __rvu_mbox_handler(mwork
, TYPE_AFPF
);
2195 static inline void rvu_afvf_mbox_handler(struct work_struct
*work
)
2197 struct rvu_work
*mwork
= container_of(work
, struct rvu_work
, work
);
2199 __rvu_mbox_handler(mwork
, TYPE_AFVF
);
2202 static void __rvu_mbox_up_handler(struct rvu_work
*mwork
, int type
)
2204 struct rvu
*rvu
= mwork
->rvu
;
2205 struct otx2_mbox_dev
*mdev
;
2206 struct mbox_hdr
*rsp_hdr
;
2207 struct mbox_msghdr
*msg
;
2208 struct mbox_wq_info
*mw
;
2209 struct otx2_mbox
*mbox
;
2210 int offset
, id
, devid
;
2214 mw
= &rvu
->afpf_wq_info
;
2217 mw
= &rvu
->afvf_wq_info
;
2223 devid
= mwork
- mw
->mbox_wrk_up
;
2224 mbox
= &mw
->mbox_up
;
2225 mdev
= &mbox
->dev
[devid
];
2227 rsp_hdr
= mdev
->mbase
+ mbox
->rx_start
;
2228 if (mw
->mbox_wrk_up
[devid
].up_num_msgs
== 0) {
2229 dev_warn(rvu
->dev
, "mbox up handler: num_msgs = 0\n");
2233 offset
= mbox
->rx_start
+ ALIGN(sizeof(*rsp_hdr
), MBOX_MSG_ALIGN
);
2235 for (id
= 0; id
< mw
->mbox_wrk_up
[devid
].up_num_msgs
; id
++) {
2236 msg
= mdev
->mbase
+ offset
;
2238 if (msg
->id
>= MBOX_MSG_MAX
) {
2240 "Mbox msg with unknown ID 0x%x\n", msg
->id
);
2244 if (msg
->sig
!= OTX2_MBOX_RSP_SIG
) {
2246 "Mbox msg with wrong signature %x, ID 0x%x\n",
2252 case MBOX_MSG_CGX_LINK_EVENT
:
2257 "Mbox msg response has err %d, ID 0x%x\n",
2262 offset
= mbox
->rx_start
+ msg
->next_msgoff
;
2265 mw
->mbox_wrk_up
[devid
].up_num_msgs
= 0;
2267 otx2_mbox_reset(mbox
, devid
);
2270 static inline void rvu_afpf_mbox_up_handler(struct work_struct
*work
)
2272 struct rvu_work
*mwork
= container_of(work
, struct rvu_work
, work
);
2274 __rvu_mbox_up_handler(mwork
, TYPE_AFPF
);
2277 static inline void rvu_afvf_mbox_up_handler(struct work_struct
*work
)
2279 struct rvu_work
*mwork
= container_of(work
, struct rvu_work
, work
);
2281 __rvu_mbox_up_handler(mwork
, TYPE_AFVF
);
2284 static int rvu_get_mbox_regions(struct rvu
*rvu
, void **mbox_addr
,
2285 int num
, int type
, unsigned long *pf_bmap
)
2287 struct rvu_hwinfo
*hw
= rvu
->hw
;
2291 /* For cn10k platform VF mailbox regions of a PF follows after the
2292 * PF <-> AF mailbox region. Whereas for Octeontx2 it is read from
2293 * RVU_PF_VF_BAR4_ADDR register.
2295 if (type
== TYPE_AFVF
) {
2296 for (region
= 0; region
< num
; region
++) {
2297 if (!test_bit(region
, pf_bmap
))
2300 if (hw
->cap
.per_pf_mbox_regs
) {
2301 bar4
= rvu_read64(rvu
, BLKADDR_RVUM
,
2302 RVU_AF_PFX_BAR4_ADDR(0)) +
2304 bar4
+= region
* MBOX_SIZE
;
2306 bar4
= rvupf_read64(rvu
, RVU_PF_VF_BAR4_ADDR
);
2307 bar4
+= region
* MBOX_SIZE
;
2309 mbox_addr
[region
] = (void *)ioremap_wc(bar4
, MBOX_SIZE
);
2310 if (!mbox_addr
[region
])
2316 /* For cn10k platform AF <-> PF mailbox region of a PF is read from per
2317 * PF registers. Whereas for Octeontx2 it is read from
2318 * RVU_AF_PF_BAR4_ADDR register.
2320 for (region
= 0; region
< num
; region
++) {
2321 if (!test_bit(region
, pf_bmap
))
2324 if (hw
->cap
.per_pf_mbox_regs
) {
2325 bar4
= rvu_read64(rvu
, BLKADDR_RVUM
,
2326 RVU_AF_PFX_BAR4_ADDR(region
));
2328 bar4
= rvu_read64(rvu
, BLKADDR_RVUM
,
2329 RVU_AF_PF_BAR4_ADDR
);
2330 bar4
+= region
* MBOX_SIZE
;
2332 mbox_addr
[region
] = (void *)ioremap_wc(bar4
, MBOX_SIZE
);
2333 if (!mbox_addr
[region
])
2340 iounmap((void __iomem
*)mbox_addr
[region
]);
2344 static int rvu_mbox_init(struct rvu
*rvu
, struct mbox_wq_info
*mw
,
2346 void (mbox_handler
)(struct work_struct
*),
2347 void (mbox_up_handler
)(struct work_struct
*))
2349 int err
= -EINVAL
, i
, dir
, dir_up
;
2350 void __iomem
*reg_base
;
2351 struct rvu_work
*mwork
;
2352 unsigned long *pf_bmap
;
2353 void **mbox_regions
;
2357 pf_bmap
= bitmap_zalloc(num
, GFP_KERNEL
);
2362 if (type
== TYPE_AFVF
)
2363 bitmap_set(pf_bmap
, 0, num
);
2365 if (type
== TYPE_AFPF
) {
2366 /* Mark enabled PFs in bitmap */
2367 for (i
= 0; i
< num
; i
++) {
2368 cfg
= rvu_read64(rvu
, BLKADDR_RVUM
, RVU_PRIV_PFX_CFG(i
));
2369 if (cfg
& BIT_ULL(20))
2370 set_bit(i
, pf_bmap
);
2374 mbox_regions
= kcalloc(num
, sizeof(void *), GFP_KERNEL
);
2375 if (!mbox_regions
) {
2382 name
= "rvu_afpf_mailbox";
2383 dir
= MBOX_DIR_AFPF
;
2384 dir_up
= MBOX_DIR_AFPF_UP
;
2385 reg_base
= rvu
->afreg_base
;
2386 err
= rvu_get_mbox_regions(rvu
, mbox_regions
, num
, TYPE_AFPF
, pf_bmap
);
2391 name
= "rvu_afvf_mailbox";
2392 dir
= MBOX_DIR_PFVF
;
2393 dir_up
= MBOX_DIR_PFVF_UP
;
2394 reg_base
= rvu
->pfreg_base
;
2395 err
= rvu_get_mbox_regions(rvu
, mbox_regions
, num
, TYPE_AFVF
, pf_bmap
);
2403 mw
->mbox_wq
= alloc_workqueue(name
,
2404 WQ_UNBOUND
| WQ_HIGHPRI
| WQ_MEM_RECLAIM
,
2411 mw
->mbox_wrk
= devm_kcalloc(rvu
->dev
, num
,
2412 sizeof(struct rvu_work
), GFP_KERNEL
);
2413 if (!mw
->mbox_wrk
) {
2418 mw
->mbox_wrk_up
= devm_kcalloc(rvu
->dev
, num
,
2419 sizeof(struct rvu_work
), GFP_KERNEL
);
2420 if (!mw
->mbox_wrk_up
) {
2425 err
= otx2_mbox_regions_init(&mw
->mbox
, mbox_regions
, rvu
->pdev
,
2426 reg_base
, dir
, num
, pf_bmap
);
2430 err
= otx2_mbox_regions_init(&mw
->mbox_up
, mbox_regions
, rvu
->pdev
,
2431 reg_base
, dir_up
, num
, pf_bmap
);
2435 for (i
= 0; i
< num
; i
++) {
2436 if (!test_bit(i
, pf_bmap
))
2439 mwork
= &mw
->mbox_wrk
[i
];
2441 INIT_WORK(&mwork
->work
, mbox_handler
);
2443 mwork
= &mw
->mbox_wrk_up
[i
];
2445 INIT_WORK(&mwork
->work
, mbox_up_handler
);
2450 destroy_workqueue(mw
->mbox_wq
);
2453 iounmap((void __iomem
*)mbox_regions
[num
]);
2455 kfree(mbox_regions
);
2457 bitmap_free(pf_bmap
);
2461 static void rvu_mbox_destroy(struct mbox_wq_info
*mw
)
2463 struct otx2_mbox
*mbox
= &mw
->mbox
;
2464 struct otx2_mbox_dev
*mdev
;
2468 destroy_workqueue(mw
->mbox_wq
);
2472 for (devid
= 0; devid
< mbox
->ndevs
; devid
++) {
2473 mdev
= &mbox
->dev
[devid
];
2475 iounmap((void __iomem
*)mdev
->hwbase
);
2478 otx2_mbox_destroy(&mw
->mbox
);
2479 otx2_mbox_destroy(&mw
->mbox_up
);
2482 static void rvu_queue_work(struct mbox_wq_info
*mw
, int first
,
2483 int mdevs
, u64 intr
)
2485 struct otx2_mbox_dev
*mdev
;
2486 struct otx2_mbox
*mbox
;
2487 struct mbox_hdr
*hdr
;
2490 for (i
= first
; i
< mdevs
; i
++) {
2492 if (!(intr
& BIT_ULL(i
- first
)))
2496 mdev
= &mbox
->dev
[i
];
2497 hdr
= mdev
->mbase
+ mbox
->rx_start
;
2499 /*The hdr->num_msgs is set to zero immediately in the interrupt
2500 * handler to ensure that it holds a correct value next time
2501 * when the interrupt handler is called.
2502 * pf->mbox.num_msgs holds the data for use in pfaf_mbox_handler
2503 * pf>mbox.up_num_msgs holds the data for use in
2504 * pfaf_mbox_up_handler.
2507 if (hdr
->num_msgs
) {
2508 mw
->mbox_wrk
[i
].num_msgs
= hdr
->num_msgs
;
2510 queue_work(mw
->mbox_wq
, &mw
->mbox_wrk
[i
].work
);
2512 mbox
= &mw
->mbox_up
;
2513 mdev
= &mbox
->dev
[i
];
2514 hdr
= mdev
->mbase
+ mbox
->rx_start
;
2515 if (hdr
->num_msgs
) {
2516 mw
->mbox_wrk_up
[i
].up_num_msgs
= hdr
->num_msgs
;
2518 queue_work(mw
->mbox_wq
, &mw
->mbox_wrk_up
[i
].work
);
2523 static irqreturn_t
rvu_mbox_intr_handler(int irq
, void *rvu_irq
)
2525 struct rvu
*rvu
= (struct rvu
*)rvu_irq
;
2529 intr
= rvu_read64(rvu
, BLKADDR_RVUM
, RVU_AF_PFAF_MBOX_INT
);
2530 /* Clear interrupts */
2531 rvu_write64(rvu
, BLKADDR_RVUM
, RVU_AF_PFAF_MBOX_INT
, intr
);
2533 trace_otx2_msg_interrupt(rvu
->pdev
, "PF(s) to AF", intr
);
2535 /* Sync with mbox memory region */
2538 rvu_queue_work(&rvu
->afpf_wq_info
, 0, rvu
->hw
->total_pfs
, intr
);
2540 /* Handle VF interrupts */
2542 intr
= rvupf_read64(rvu
, RVU_PF_VFPF_MBOX_INTX(1));
2543 rvupf_write64(rvu
, RVU_PF_VFPF_MBOX_INTX(1), intr
);
2545 rvu_queue_work(&rvu
->afvf_wq_info
, 64, vfs
, intr
);
2549 intr
= rvupf_read64(rvu
, RVU_PF_VFPF_MBOX_INTX(0));
2550 rvupf_write64(rvu
, RVU_PF_VFPF_MBOX_INTX(0), intr
);
2552 trace_otx2_msg_interrupt(rvu
->pdev
, "VF(s) to AF", intr
);
2554 rvu_queue_work(&rvu
->afvf_wq_info
, 0, vfs
, intr
);
2559 static void rvu_enable_mbox_intr(struct rvu
*rvu
)
2561 struct rvu_hwinfo
*hw
= rvu
->hw
;
2563 /* Clear spurious irqs, if any */
2564 rvu_write64(rvu
, BLKADDR_RVUM
,
2565 RVU_AF_PFAF_MBOX_INT
, INTR_MASK(hw
->total_pfs
));
2567 /* Enable mailbox interrupt for all PFs except PF0 i.e AF itself */
2568 rvu_write64(rvu
, BLKADDR_RVUM
, RVU_AF_PFAF_MBOX_INT_ENA_W1S
,
2569 INTR_MASK(hw
->total_pfs
) & ~1ULL);
2572 static void rvu_blklf_teardown(struct rvu
*rvu
, u16 pcifunc
, u8 blkaddr
)
2574 struct rvu_block
*block
;
2575 int slot
, lf
, num_lfs
;
2578 block
= &rvu
->hw
->block
[blkaddr
];
2579 num_lfs
= rvu_get_rsrc_mapcount(rvu_get_pfvf(rvu
, pcifunc
),
2583 for (slot
= 0; slot
< num_lfs
; slot
++) {
2584 lf
= rvu_get_lf(rvu
, block
, pcifunc
, slot
);
2588 /* Cleanup LF and reset it */
2589 if (block
->addr
== BLKADDR_NIX0
|| block
->addr
== BLKADDR_NIX1
)
2590 rvu_nix_lf_teardown(rvu
, pcifunc
, block
->addr
, lf
);
2591 else if (block
->addr
== BLKADDR_NPA
)
2592 rvu_npa_lf_teardown(rvu
, pcifunc
, lf
);
2593 else if ((block
->addr
== BLKADDR_CPT0
) ||
2594 (block
->addr
== BLKADDR_CPT1
))
2595 rvu_cpt_lf_teardown(rvu
, pcifunc
, block
->addr
, lf
,
2598 err
= rvu_lf_reset(rvu
, block
, lf
);
2600 dev_err(rvu
->dev
, "Failed to reset blkaddr %d LF%d\n",
2606 static void __rvu_flr_handler(struct rvu
*rvu
, u16 pcifunc
)
2608 if (rvu_npc_exact_has_match_table(rvu
))
2609 rvu_npc_exact_reset(rvu
, pcifunc
);
2611 mutex_lock(&rvu
->flr_lock
);
2612 /* Reset order should reflect inter-block dependencies:
2613 * 1. Reset any packet/work sources (NIX, CPT, TIM)
2614 * 2. Flush and reset SSO/SSOW
2615 * 3. Cleanup pools (NPA)
2617 rvu_blklf_teardown(rvu
, pcifunc
, BLKADDR_NIX0
);
2618 rvu_blklf_teardown(rvu
, pcifunc
, BLKADDR_NIX1
);
2619 rvu_blklf_teardown(rvu
, pcifunc
, BLKADDR_CPT0
);
2620 rvu_blklf_teardown(rvu
, pcifunc
, BLKADDR_CPT1
);
2621 rvu_blklf_teardown(rvu
, pcifunc
, BLKADDR_TIM
);
2622 rvu_blklf_teardown(rvu
, pcifunc
, BLKADDR_SSOW
);
2623 rvu_blklf_teardown(rvu
, pcifunc
, BLKADDR_SSO
);
2624 rvu_blklf_teardown(rvu
, pcifunc
, BLKADDR_NPA
);
2625 rvu_reset_lmt_map_tbl(rvu
, pcifunc
);
2626 rvu_detach_rsrcs(rvu
, NULL
, pcifunc
);
2627 /* In scenarios where PF/VF drivers detach NIXLF without freeing MCAM
2628 * entries, check and free the MCAM entries explicitly to avoid leak.
2629 * Since LF is detached use LF number as -1.
2631 rvu_npc_free_mcam_entries(rvu
, pcifunc
, -1);
2632 rvu_mac_reset(rvu
, pcifunc
);
2634 mutex_unlock(&rvu
->flr_lock
);
2637 static void rvu_afvf_flr_handler(struct rvu
*rvu
, int vf
)
2641 /* pcifunc = 0(PF0) | (vf + 1) */
2642 __rvu_flr_handler(rvu
, vf
+ 1);
2649 /* Signal FLR finish and enable IRQ */
2650 rvupf_write64(rvu
, RVU_PF_VFTRPENDX(reg
), BIT_ULL(vf
));
2651 rvupf_write64(rvu
, RVU_PF_VFFLR_INT_ENA_W1SX(reg
), BIT_ULL(vf
));
2654 static void rvu_flr_handler(struct work_struct
*work
)
2656 struct rvu_work
*flrwork
= container_of(work
, struct rvu_work
, work
);
2657 struct rvu
*rvu
= flrwork
->rvu
;
2658 u16 pcifunc
, numvfs
, vf
;
2662 pf
= flrwork
- rvu
->flr_wrk
;
2663 if (pf
>= rvu
->hw
->total_pfs
) {
2664 rvu_afvf_flr_handler(rvu
, pf
- rvu
->hw
->total_pfs
);
2668 cfg
= rvu_read64(rvu
, BLKADDR_RVUM
, RVU_PRIV_PFX_CFG(pf
));
2669 numvfs
= (cfg
>> 12) & 0xFF;
2670 pcifunc
= pf
<< RVU_PFVF_PF_SHIFT
;
2672 for (vf
= 0; vf
< numvfs
; vf
++)
2673 __rvu_flr_handler(rvu
, (pcifunc
| (vf
+ 1)));
2675 __rvu_flr_handler(rvu
, pcifunc
);
2677 /* Signal FLR finish */
2678 rvu_write64(rvu
, BLKADDR_RVUM
, RVU_AF_PFTRPEND
, BIT_ULL(pf
));
2680 /* Enable interrupt */
2681 rvu_write64(rvu
, BLKADDR_RVUM
, RVU_AF_PFFLR_INT_ENA_W1S
, BIT_ULL(pf
));
2684 static void rvu_afvf_queue_flr_work(struct rvu
*rvu
, int start_vf
, int numvfs
)
2686 int dev
, vf
, reg
= 0;
2692 intr
= rvupf_read64(rvu
, RVU_PF_VFFLR_INTX(reg
));
2696 for (vf
= 0; vf
< numvfs
; vf
++) {
2697 if (!(intr
& BIT_ULL(vf
)))
2699 /* Clear and disable the interrupt */
2700 rvupf_write64(rvu
, RVU_PF_VFFLR_INTX(reg
), BIT_ULL(vf
));
2701 rvupf_write64(rvu
, RVU_PF_VFFLR_INT_ENA_W1CX(reg
), BIT_ULL(vf
));
2703 dev
= vf
+ start_vf
+ rvu
->hw
->total_pfs
;
2704 queue_work(rvu
->flr_wq
, &rvu
->flr_wrk
[dev
].work
);
2708 static irqreturn_t
rvu_flr_intr_handler(int irq
, void *rvu_irq
)
2710 struct rvu
*rvu
= (struct rvu
*)rvu_irq
;
2714 intr
= rvu_read64(rvu
, BLKADDR_RVUM
, RVU_AF_PFFLR_INT
);
2718 for (pf
= 0; pf
< rvu
->hw
->total_pfs
; pf
++) {
2719 if (intr
& (1ULL << pf
)) {
2720 /* clear interrupt */
2721 rvu_write64(rvu
, BLKADDR_RVUM
, RVU_AF_PFFLR_INT
,
2723 /* Disable the interrupt */
2724 rvu_write64(rvu
, BLKADDR_RVUM
, RVU_AF_PFFLR_INT_ENA_W1C
,
2726 /* PF is already dead do only AF related operations */
2727 queue_work(rvu
->flr_wq
, &rvu
->flr_wrk
[pf
].work
);
2732 rvu_afvf_queue_flr_work(rvu
, 0, 64);
2734 rvu_afvf_queue_flr_work(rvu
, 64, rvu
->vfs
- 64);
2739 static void rvu_me_handle_vfset(struct rvu
*rvu
, int idx
, u64 intr
)
2743 /* Nothing to be done here other than clearing the
2746 for (vf
= 0; vf
< 64; vf
++) {
2747 if (intr
& (1ULL << vf
)) {
2748 /* clear the trpend due to ME(master enable) */
2749 rvupf_write64(rvu
, RVU_PF_VFTRPENDX(idx
), BIT_ULL(vf
));
2750 /* clear interrupt */
2751 rvupf_write64(rvu
, RVU_PF_VFME_INTX(idx
), BIT_ULL(vf
));
2756 /* Handles ME interrupts from VFs of AF */
2757 static irqreturn_t
rvu_me_vf_intr_handler(int irq
, void *rvu_irq
)
2759 struct rvu
*rvu
= (struct rvu
*)rvu_irq
;
2763 intr
= rvu_read64(rvu
, BLKADDR_RVUM
, RVU_AF_PFME_INT
);
2765 for (vfset
= 0; vfset
<= 1; vfset
++) {
2766 intr
= rvupf_read64(rvu
, RVU_PF_VFME_INTX(vfset
));
2768 rvu_me_handle_vfset(rvu
, vfset
, intr
);
2774 /* Handles ME interrupts from PFs */
2775 static irqreturn_t
rvu_me_pf_intr_handler(int irq
, void *rvu_irq
)
2777 struct rvu
*rvu
= (struct rvu
*)rvu_irq
;
2781 intr
= rvu_read64(rvu
, BLKADDR_RVUM
, RVU_AF_PFME_INT
);
2783 /* Nothing to be done here other than clearing the
2786 for (pf
= 0; pf
< rvu
->hw
->total_pfs
; pf
++) {
2787 if (intr
& (1ULL << pf
)) {
2788 /* clear the trpend due to ME(master enable) */
2789 rvu_write64(rvu
, BLKADDR_RVUM
, RVU_AF_PFTRPEND
,
2791 /* clear interrupt */
2792 rvu_write64(rvu
, BLKADDR_RVUM
, RVU_AF_PFME_INT
,
2800 static void rvu_unregister_interrupts(struct rvu
*rvu
)
2804 rvu_cpt_unregister_interrupts(rvu
);
2806 /* Disable the Mbox interrupt */
2807 rvu_write64(rvu
, BLKADDR_RVUM
, RVU_AF_PFAF_MBOX_INT_ENA_W1C
,
2808 INTR_MASK(rvu
->hw
->total_pfs
) & ~1ULL);
2810 /* Disable the PF FLR interrupt */
2811 rvu_write64(rvu
, BLKADDR_RVUM
, RVU_AF_PFFLR_INT_ENA_W1C
,
2812 INTR_MASK(rvu
->hw
->total_pfs
) & ~1ULL);
2814 /* Disable the PF ME interrupt */
2815 rvu_write64(rvu
, BLKADDR_RVUM
, RVU_AF_PFME_INT_ENA_W1C
,
2816 INTR_MASK(rvu
->hw
->total_pfs
) & ~1ULL);
2818 for (irq
= 0; irq
< rvu
->num_vec
; irq
++) {
2819 if (rvu
->irq_allocated
[irq
]) {
2820 free_irq(pci_irq_vector(rvu
->pdev
, irq
), rvu
);
2821 rvu
->irq_allocated
[irq
] = false;
2825 pci_free_irq_vectors(rvu
->pdev
);
2829 static int rvu_afvf_msix_vectors_num_ok(struct rvu
*rvu
)
2831 struct rvu_pfvf
*pfvf
= &rvu
->pf
[0];
2835 offset
= rvu_read64(rvu
, BLKADDR_RVUM
, RVU_PRIV_PFX_INT_CFG(0)) & 0x3ff;
2837 /* Make sure there are enough MSIX vectors configured so that
2838 * VF interrupts can be handled. Offset equal to zero means
2839 * that PF vectors are not configured and overlapping AF vectors.
2841 return (pfvf
->msix
.max
>= RVU_AF_INT_VEC_CNT
+ RVU_PF_INT_VEC_CNT
) &&
2845 static int rvu_register_interrupts(struct rvu
*rvu
)
2847 int ret
, offset
, pf_vec_start
;
2849 rvu
->num_vec
= pci_msix_vec_count(rvu
->pdev
);
2851 rvu
->irq_name
= devm_kmalloc_array(rvu
->dev
, rvu
->num_vec
,
2852 NAME_SIZE
, GFP_KERNEL
);
2856 rvu
->irq_allocated
= devm_kcalloc(rvu
->dev
, rvu
->num_vec
,
2857 sizeof(bool), GFP_KERNEL
);
2858 if (!rvu
->irq_allocated
)
2862 ret
= pci_alloc_irq_vectors(rvu
->pdev
, rvu
->num_vec
,
2863 rvu
->num_vec
, PCI_IRQ_MSIX
);
2866 "RVUAF: Request for %d msix vectors failed, ret %d\n",
2871 /* Register mailbox interrupt handler */
2872 sprintf(&rvu
->irq_name
[RVU_AF_INT_VEC_MBOX
* NAME_SIZE
], "RVUAF Mbox");
2873 ret
= request_irq(pci_irq_vector(rvu
->pdev
, RVU_AF_INT_VEC_MBOX
),
2874 rvu_mbox_intr_handler
, 0,
2875 &rvu
->irq_name
[RVU_AF_INT_VEC_MBOX
* NAME_SIZE
], rvu
);
2878 "RVUAF: IRQ registration failed for mbox irq\n");
2882 rvu
->irq_allocated
[RVU_AF_INT_VEC_MBOX
] = true;
2884 /* Enable mailbox interrupts from all PFs */
2885 rvu_enable_mbox_intr(rvu
);
2887 /* Register FLR interrupt handler */
2888 sprintf(&rvu
->irq_name
[RVU_AF_INT_VEC_PFFLR
* NAME_SIZE
],
2890 ret
= request_irq(pci_irq_vector(rvu
->pdev
, RVU_AF_INT_VEC_PFFLR
),
2891 rvu_flr_intr_handler
, 0,
2892 &rvu
->irq_name
[RVU_AF_INT_VEC_PFFLR
* NAME_SIZE
],
2896 "RVUAF: IRQ registration failed for FLR\n");
2899 rvu
->irq_allocated
[RVU_AF_INT_VEC_PFFLR
] = true;
2901 /* Enable FLR interrupt for all PFs*/
2902 rvu_write64(rvu
, BLKADDR_RVUM
,
2903 RVU_AF_PFFLR_INT
, INTR_MASK(rvu
->hw
->total_pfs
));
2905 rvu_write64(rvu
, BLKADDR_RVUM
, RVU_AF_PFFLR_INT_ENA_W1S
,
2906 INTR_MASK(rvu
->hw
->total_pfs
) & ~1ULL);
2908 /* Register ME interrupt handler */
2909 sprintf(&rvu
->irq_name
[RVU_AF_INT_VEC_PFME
* NAME_SIZE
],
2911 ret
= request_irq(pci_irq_vector(rvu
->pdev
, RVU_AF_INT_VEC_PFME
),
2912 rvu_me_pf_intr_handler
, 0,
2913 &rvu
->irq_name
[RVU_AF_INT_VEC_PFME
* NAME_SIZE
],
2917 "RVUAF: IRQ registration failed for ME\n");
2919 rvu
->irq_allocated
[RVU_AF_INT_VEC_PFME
] = true;
2921 /* Clear TRPEND bit for all PF */
2922 rvu_write64(rvu
, BLKADDR_RVUM
,
2923 RVU_AF_PFTRPEND
, INTR_MASK(rvu
->hw
->total_pfs
));
2924 /* Enable ME interrupt for all PFs*/
2925 rvu_write64(rvu
, BLKADDR_RVUM
,
2926 RVU_AF_PFME_INT
, INTR_MASK(rvu
->hw
->total_pfs
));
2928 rvu_write64(rvu
, BLKADDR_RVUM
, RVU_AF_PFME_INT_ENA_W1S
,
2929 INTR_MASK(rvu
->hw
->total_pfs
) & ~1ULL);
2931 if (!rvu_afvf_msix_vectors_num_ok(rvu
))
2934 /* Get PF MSIX vectors offset. */
2935 pf_vec_start
= rvu_read64(rvu
, BLKADDR_RVUM
,
2936 RVU_PRIV_PFX_INT_CFG(0)) & 0x3ff;
2938 /* Register MBOX0 interrupt. */
2939 offset
= pf_vec_start
+ RVU_PF_INT_VEC_VFPF_MBOX0
;
2940 sprintf(&rvu
->irq_name
[offset
* NAME_SIZE
], "RVUAFVF Mbox0");
2941 ret
= request_irq(pci_irq_vector(rvu
->pdev
, offset
),
2942 rvu_mbox_intr_handler
, 0,
2943 &rvu
->irq_name
[offset
* NAME_SIZE
],
2947 "RVUAF: IRQ registration failed for Mbox0\n");
2949 rvu
->irq_allocated
[offset
] = true;
2951 /* Register MBOX1 interrupt. MBOX1 IRQ number follows MBOX0 so
2952 * simply increment current offset by 1.
2954 offset
= pf_vec_start
+ RVU_PF_INT_VEC_VFPF_MBOX1
;
2955 sprintf(&rvu
->irq_name
[offset
* NAME_SIZE
], "RVUAFVF Mbox1");
2956 ret
= request_irq(pci_irq_vector(rvu
->pdev
, offset
),
2957 rvu_mbox_intr_handler
, 0,
2958 &rvu
->irq_name
[offset
* NAME_SIZE
],
2962 "RVUAF: IRQ registration failed for Mbox1\n");
2964 rvu
->irq_allocated
[offset
] = true;
2966 /* Register FLR interrupt handler for AF's VFs */
2967 offset
= pf_vec_start
+ RVU_PF_INT_VEC_VFFLR0
;
2968 sprintf(&rvu
->irq_name
[offset
* NAME_SIZE
], "RVUAFVF FLR0");
2969 ret
= request_irq(pci_irq_vector(rvu
->pdev
, offset
),
2970 rvu_flr_intr_handler
, 0,
2971 &rvu
->irq_name
[offset
* NAME_SIZE
], rvu
);
2974 "RVUAF: IRQ registration failed for RVUAFVF FLR0\n");
2977 rvu
->irq_allocated
[offset
] = true;
2979 offset
= pf_vec_start
+ RVU_PF_INT_VEC_VFFLR1
;
2980 sprintf(&rvu
->irq_name
[offset
* NAME_SIZE
], "RVUAFVF FLR1");
2981 ret
= request_irq(pci_irq_vector(rvu
->pdev
, offset
),
2982 rvu_flr_intr_handler
, 0,
2983 &rvu
->irq_name
[offset
* NAME_SIZE
], rvu
);
2986 "RVUAF: IRQ registration failed for RVUAFVF FLR1\n");
2989 rvu
->irq_allocated
[offset
] = true;
2991 /* Register ME interrupt handler for AF's VFs */
2992 offset
= pf_vec_start
+ RVU_PF_INT_VEC_VFME0
;
2993 sprintf(&rvu
->irq_name
[offset
* NAME_SIZE
], "RVUAFVF ME0");
2994 ret
= request_irq(pci_irq_vector(rvu
->pdev
, offset
),
2995 rvu_me_vf_intr_handler
, 0,
2996 &rvu
->irq_name
[offset
* NAME_SIZE
], rvu
);
2999 "RVUAF: IRQ registration failed for RVUAFVF ME0\n");
3002 rvu
->irq_allocated
[offset
] = true;
3004 offset
= pf_vec_start
+ RVU_PF_INT_VEC_VFME1
;
3005 sprintf(&rvu
->irq_name
[offset
* NAME_SIZE
], "RVUAFVF ME1");
3006 ret
= request_irq(pci_irq_vector(rvu
->pdev
, offset
),
3007 rvu_me_vf_intr_handler
, 0,
3008 &rvu
->irq_name
[offset
* NAME_SIZE
], rvu
);
3011 "RVUAF: IRQ registration failed for RVUAFVF ME1\n");
3014 rvu
->irq_allocated
[offset
] = true;
3016 ret
= rvu_cpt_register_interrupts(rvu
);
3023 rvu_unregister_interrupts(rvu
);
3027 static void rvu_flr_wq_destroy(struct rvu
*rvu
)
3030 destroy_workqueue(rvu
->flr_wq
);
3035 static int rvu_flr_init(struct rvu
*rvu
)
3041 /* Enable FLR for all PFs*/
3042 for (pf
= 0; pf
< rvu
->hw
->total_pfs
; pf
++) {
3043 cfg
= rvu_read64(rvu
, BLKADDR_RVUM
, RVU_PRIV_PFX_CFG(pf
));
3044 rvu_write64(rvu
, BLKADDR_RVUM
, RVU_PRIV_PFX_CFG(pf
),
3048 rvu
->flr_wq
= alloc_ordered_workqueue("rvu_afpf_flr",
3049 WQ_HIGHPRI
| WQ_MEM_RECLAIM
);
3053 num_devs
= rvu
->hw
->total_pfs
+ pci_sriov_get_totalvfs(rvu
->pdev
);
3054 rvu
->flr_wrk
= devm_kcalloc(rvu
->dev
, num_devs
,
3055 sizeof(struct rvu_work
), GFP_KERNEL
);
3056 if (!rvu
->flr_wrk
) {
3057 destroy_workqueue(rvu
->flr_wq
);
3061 for (dev
= 0; dev
< num_devs
; dev
++) {
3062 rvu
->flr_wrk
[dev
].rvu
= rvu
;
3063 INIT_WORK(&rvu
->flr_wrk
[dev
].work
, rvu_flr_handler
);
3066 mutex_init(&rvu
->flr_lock
);
3071 static void rvu_disable_afvf_intr(struct rvu
*rvu
)
3075 rvupf_write64(rvu
, RVU_PF_VFPF_MBOX_INT_ENA_W1CX(0), INTR_MASK(vfs
));
3076 rvupf_write64(rvu
, RVU_PF_VFFLR_INT_ENA_W1CX(0), INTR_MASK(vfs
));
3077 rvupf_write64(rvu
, RVU_PF_VFME_INT_ENA_W1CX(0), INTR_MASK(vfs
));
3081 rvupf_write64(rvu
, RVU_PF_VFPF_MBOX_INT_ENA_W1CX(1),
3082 INTR_MASK(vfs
- 64));
3083 rvupf_write64(rvu
, RVU_PF_VFFLR_INT_ENA_W1CX(1), INTR_MASK(vfs
- 64));
3084 rvupf_write64(rvu
, RVU_PF_VFME_INT_ENA_W1CX(1), INTR_MASK(vfs
- 64));
3087 static void rvu_enable_afvf_intr(struct rvu
*rvu
)
3091 /* Clear any pending interrupts and enable AF VF interrupts for
3095 rvupf_write64(rvu
, RVU_PF_VFPF_MBOX_INTX(0), INTR_MASK(vfs
));
3096 rvupf_write64(rvu
, RVU_PF_VFPF_MBOX_INT_ENA_W1SX(0), INTR_MASK(vfs
));
3099 rvupf_write64(rvu
, RVU_PF_VFFLR_INTX(0), INTR_MASK(vfs
));
3100 rvupf_write64(rvu
, RVU_PF_VFFLR_INT_ENA_W1SX(0), INTR_MASK(vfs
));
3101 rvupf_write64(rvu
, RVU_PF_VFME_INT_ENA_W1SX(0), INTR_MASK(vfs
));
3103 /* Same for remaining VFs, if any. */
3107 rvupf_write64(rvu
, RVU_PF_VFPF_MBOX_INTX(1), INTR_MASK(vfs
- 64));
3108 rvupf_write64(rvu
, RVU_PF_VFPF_MBOX_INT_ENA_W1SX(1),
3109 INTR_MASK(vfs
- 64));
3111 rvupf_write64(rvu
, RVU_PF_VFFLR_INTX(1), INTR_MASK(vfs
- 64));
3112 rvupf_write64(rvu
, RVU_PF_VFFLR_INT_ENA_W1SX(1), INTR_MASK(vfs
- 64));
3113 rvupf_write64(rvu
, RVU_PF_VFME_INT_ENA_W1SX(1), INTR_MASK(vfs
- 64));
3116 int rvu_get_num_lbk_chans(void)
3118 struct pci_dev
*pdev
;
3122 pdev
= pci_get_device(PCI_VENDOR_ID_CAVIUM
, PCI_DEVID_OCTEONTX2_LBK
,
3127 base
= pci_ioremap_bar(pdev
, 0);
3131 /* Read number of available LBK channels from LBK(0)_CONST register. */
3132 ret
= (readq(base
+ 0x10) >> 32) & 0xffff;
3140 static int rvu_enable_sriov(struct rvu
*rvu
)
3142 struct pci_dev
*pdev
= rvu
->pdev
;
3143 int err
, chans
, vfs
;
3145 if (!rvu_afvf_msix_vectors_num_ok(rvu
)) {
3146 dev_warn(&pdev
->dev
,
3147 "Skipping SRIOV enablement since not enough IRQs are available\n");
3151 chans
= rvu_get_num_lbk_chans();
3155 vfs
= pci_sriov_get_totalvfs(pdev
);
3157 /* Limit VFs in case we have more VFs than LBK channels available. */
3164 /* LBK channel number 63 is used for switching packets between
3165 * CGX mapped VFs. Hence limit LBK pairs till 62 only.
3170 /* Save VFs number for reference in VF interrupts handlers.
3171 * Since interrupts might start arriving during SRIOV enablement
3172 * ordinary API cannot be used to get number of enabled VFs.
3176 err
= rvu_mbox_init(rvu
, &rvu
->afvf_wq_info
, TYPE_AFVF
, vfs
,
3177 rvu_afvf_mbox_handler
, rvu_afvf_mbox_up_handler
);
3181 rvu_enable_afvf_intr(rvu
);
3182 /* Make sure IRQs are enabled before SRIOV. */
3185 err
= pci_enable_sriov(pdev
, vfs
);
3187 rvu_disable_afvf_intr(rvu
);
3188 rvu_mbox_destroy(&rvu
->afvf_wq_info
);
3195 static void rvu_disable_sriov(struct rvu
*rvu
)
3197 rvu_disable_afvf_intr(rvu
);
3198 rvu_mbox_destroy(&rvu
->afvf_wq_info
);
3199 pci_disable_sriov(rvu
->pdev
);
3202 static void rvu_update_module_params(struct rvu
*rvu
)
3204 const char *default_pfl_name
= "default";
3206 strscpy(rvu
->mkex_pfl_name
,
3207 mkex_profile
? mkex_profile
: default_pfl_name
, MKEX_NAME_LEN
);
3208 strscpy(rvu
->kpu_pfl_name
,
3209 kpu_profile
? kpu_profile
: default_pfl_name
, KPU_NAME_LEN
);
3212 static int rvu_probe(struct pci_dev
*pdev
, const struct pci_device_id
*id
)
3214 struct device
*dev
= &pdev
->dev
;
3218 rvu
= devm_kzalloc(dev
, sizeof(*rvu
), GFP_KERNEL
);
3222 rvu
->hw
= devm_kzalloc(dev
, sizeof(struct rvu_hwinfo
), GFP_KERNEL
);
3224 devm_kfree(dev
, rvu
);
3228 pci_set_drvdata(pdev
, rvu
);
3230 rvu
->dev
= &pdev
->dev
;
3232 err
= pci_enable_device(pdev
);
3234 dev_err(dev
, "Failed to enable PCI device\n");
3238 err
= pci_request_regions(pdev
, DRV_NAME
);
3240 dev_err(dev
, "PCI request regions failed 0x%x\n", err
);
3241 goto err_disable_device
;
3244 err
= dma_set_mask_and_coherent(dev
, DMA_BIT_MASK(48));
3246 dev_err(dev
, "DMA mask config failed, abort\n");
3247 goto err_release_regions
;
3250 pci_set_master(pdev
);
3252 rvu
->ptp
= ptp_get();
3253 if (IS_ERR(rvu
->ptp
)) {
3254 err
= PTR_ERR(rvu
->ptp
);
3256 goto err_release_regions
;
3260 /* Map Admin function CSRs */
3261 rvu
->afreg_base
= pcim_iomap(pdev
, PCI_AF_REG_BAR_NUM
, 0);
3262 rvu
->pfreg_base
= pcim_iomap(pdev
, PCI_PF_REG_BAR_NUM
, 0);
3263 if (!rvu
->afreg_base
|| !rvu
->pfreg_base
) {
3264 dev_err(dev
, "Unable to map admin function CSRs, aborting\n");
3269 /* Store module params in rvu structure */
3270 rvu_update_module_params(rvu
);
3272 /* Check which blocks the HW supports */
3273 rvu_check_block_implemented(rvu
);
3275 rvu_reset_all_blocks(rvu
);
3277 rvu_setup_hw_capabilities(rvu
);
3279 err
= rvu_setup_hw_resources(rvu
);
3283 /* Init mailbox btw AF and PFs */
3284 err
= rvu_mbox_init(rvu
, &rvu
->afpf_wq_info
, TYPE_AFPF
,
3285 rvu
->hw
->total_pfs
, rvu_afpf_mbox_handler
,
3286 rvu_afpf_mbox_up_handler
);
3288 dev_err(dev
, "%s: Failed to initialize mbox\n", __func__
);
3292 err
= rvu_flr_init(rvu
);
3294 dev_err(dev
, "%s: Failed to initialize flr\n", __func__
);
3298 err
= rvu_register_interrupts(rvu
);
3300 dev_err(dev
, "%s: Failed to register interrupts\n", __func__
);
3304 err
= rvu_register_dl(rvu
);
3306 dev_err(dev
, "%s: Failed to register devlink\n", __func__
);
3310 rvu_setup_rvum_blk_revid(rvu
);
3312 /* Enable AF's VFs (if any) */
3313 err
= rvu_enable_sriov(rvu
);
3315 dev_err(dev
, "%s: Failed to enable sriov\n", __func__
);
3319 /* Initialize debugfs */
3322 mutex_init(&rvu
->rswitch
.switch_lock
);
3325 ptp_start(rvu
, rvu
->fwdata
->sclk
, rvu
->fwdata
->ptp_ext_clk_rate
,
3326 rvu
->fwdata
->ptp_ext_tstamp
);
3330 rvu_unregister_dl(rvu
);
3332 rvu_unregister_interrupts(rvu
);
3334 rvu_flr_wq_destroy(rvu
);
3336 rvu_mbox_destroy(&rvu
->afpf_wq_info
);
3339 rvu_fwdata_exit(rvu
);
3341 rvu_reset_all_blocks(rvu
);
3342 rvu_free_hw_resources(rvu
);
3343 rvu_clear_rvum_blk_revid(rvu
);
3346 err_release_regions
:
3347 pci_release_regions(pdev
);
3349 pci_disable_device(pdev
);
3351 pci_set_drvdata(pdev
, NULL
);
3352 devm_kfree(&pdev
->dev
, rvu
->hw
);
3353 devm_kfree(dev
, rvu
);
3357 static void rvu_remove(struct pci_dev
*pdev
)
3359 struct rvu
*rvu
= pci_get_drvdata(pdev
);
3362 rvu_unregister_dl(rvu
);
3363 rvu_unregister_interrupts(rvu
);
3364 rvu_flr_wq_destroy(rvu
);
3366 rvu_fwdata_exit(rvu
);
3368 rvu_mbox_destroy(&rvu
->afpf_wq_info
);
3369 rvu_disable_sriov(rvu
);
3370 rvu_reset_all_blocks(rvu
);
3371 rvu_free_hw_resources(rvu
);
3372 rvu_clear_rvum_blk_revid(rvu
);
3374 pci_release_regions(pdev
);
3375 pci_disable_device(pdev
);
3376 pci_set_drvdata(pdev
, NULL
);
3378 devm_kfree(&pdev
->dev
, rvu
->hw
);
3379 devm_kfree(&pdev
->dev
, rvu
);
3382 static struct pci_driver rvu_driver
= {
3384 .id_table
= rvu_id_table
,
3386 .remove
= rvu_remove
,
3389 static int __init
rvu_init_module(void)
3393 pr_info("%s: %s\n", DRV_NAME
, DRV_STRING
);
3395 err
= pci_register_driver(&cgx_driver
);
3399 err
= pci_register_driver(&ptp_driver
);
3403 err
= pci_register_driver(&mcs_driver
);
3407 err
= pci_register_driver(&rvu_driver
);
3413 pci_unregister_driver(&mcs_driver
);
3415 pci_unregister_driver(&ptp_driver
);
3417 pci_unregister_driver(&cgx_driver
);
3422 static void __exit
rvu_cleanup_module(void)
3424 pci_unregister_driver(&rvu_driver
);
3425 pci_unregister_driver(&mcs_driver
);
3426 pci_unregister_driver(&ptp_driver
);
3427 pci_unregister_driver(&cgx_driver
);
3430 module_init(rvu_init_module
);
3431 module_exit(rvu_cleanup_module
);