1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell RVU Virtual Function ethernet driver
4 * Copyright (C) 2020 Marvell.
8 #include <linux/etherdevice.h>
9 #include <linux/module.h>
10 #include <linux/pci.h>
11 #include <linux/net_tstamp.h>
13 #include "otx2_common.h"
18 #define DRV_NAME "rvu_nicvf"
19 #define DRV_STRING "Marvell RVU NIC Virtual Function Driver"
21 static const struct pci_device_id otx2_vf_id_table
[] = {
22 { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM
, PCI_DEVID_OCTEONTX2_RVU_AFVF
) },
23 { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM
, PCI_DEVID_OCTEONTX2_RVU_VF
) },
27 MODULE_AUTHOR("Sunil Goutham <sgoutham@marvell.com>");
28 MODULE_DESCRIPTION(DRV_STRING
);
29 MODULE_LICENSE("GPL v2");
30 MODULE_DEVICE_TABLE(pci
, otx2_vf_id_table
);
32 /* RVU VF Interrupt Vector Enumeration */
34 RVU_VF_INT_VEC_MBOX
= 0x0,
37 static void otx2vf_process_vfaf_mbox_msg(struct otx2_nic
*vf
,
38 struct mbox_msghdr
*msg
)
40 if (msg
->id
>= MBOX_MSG_MAX
) {
42 "Mbox msg with unknown ID %d\n", msg
->id
);
46 if (msg
->sig
!= OTX2_MBOX_RSP_SIG
) {
48 "Mbox msg with wrong signature %x, ID %d\n",
53 if (msg
->rc
== MBOX_MSG_INVALID
) {
55 "PF/AF says the sent msg(s) %d were invalid\n",
62 vf
->pcifunc
= msg
->pcifunc
;
64 case MBOX_MSG_MSIX_OFFSET
:
65 mbox_handler_msix_offset(vf
, (struct msix_offset_rsp
*)msg
);
67 case MBOX_MSG_NPA_LF_ALLOC
:
68 mbox_handler_npa_lf_alloc(vf
, (struct npa_lf_alloc_rsp
*)msg
);
70 case MBOX_MSG_NIX_LF_ALLOC
:
71 mbox_handler_nix_lf_alloc(vf
, (struct nix_lf_alloc_rsp
*)msg
);
73 case MBOX_MSG_NIX_BP_ENABLE
:
74 mbox_handler_nix_bp_enable(vf
, (struct nix_bp_cfg_rsp
*)msg
);
79 "Mbox msg response has err %d, ID %d\n",
84 static void otx2vf_vfaf_mbox_handler(struct work_struct
*work
)
86 struct otx2_mbox_dev
*mdev
;
87 struct mbox_hdr
*rsp_hdr
;
88 struct mbox_msghdr
*msg
;
89 struct otx2_mbox
*mbox
;
94 af_mbox
= container_of(work
, struct mbox
, mbox_wrk
);
95 mbox
= &af_mbox
->mbox
;
97 rsp_hdr
= (struct mbox_hdr
*)(mdev
->mbase
+ mbox
->rx_start
);
98 num_msgs
= rsp_hdr
->num_msgs
;
103 offset
= mbox
->rx_start
+ ALIGN(sizeof(*rsp_hdr
), MBOX_MSG_ALIGN
);
105 for (id
= 0; id
< num_msgs
; id
++) {
106 msg
= (struct mbox_msghdr
*)(mdev
->mbase
+ offset
);
107 otx2vf_process_vfaf_mbox_msg(af_mbox
->pfvf
, msg
);
108 offset
= mbox
->rx_start
+ msg
->next_msgoff
;
109 if (mdev
->msgs_acked
== (af_mbox
->num_msgs
- 1))
110 __otx2_mbox_reset(mbox
, 0);
115 static int otx2vf_process_mbox_msg_up(struct otx2_nic
*vf
,
116 struct mbox_msghdr
*req
)
121 /* Check if valid, if not reply with a invalid msg */
122 if (req
->sig
!= OTX2_MBOX_REQ_SIG
) {
123 otx2_reply_invalid_msg(&vf
->mbox
.mbox_up
, 0, 0, req
->id
);
128 case MBOX_MSG_CGX_LINK_EVENT
:
129 rsp
= (struct msg_rsp
*)otx2_mbox_alloc_msg(
130 &vf
->mbox
.mbox_up
, 0,
131 sizeof(struct msg_rsp
));
135 rsp
->hdr
.id
= MBOX_MSG_CGX_LINK_EVENT
;
136 rsp
->hdr
.sig
= OTX2_MBOX_RSP_SIG
;
137 rsp
->hdr
.pcifunc
= 0;
139 err
= otx2_mbox_up_handler_cgx_link_event(
140 vf
, (struct cgx_link_info_msg
*)req
, rsp
);
143 otx2_reply_invalid_msg(&vf
->mbox
.mbox_up
, 0, 0, req
->id
);
149 static void otx2vf_vfaf_mbox_up_handler(struct work_struct
*work
)
151 struct otx2_mbox_dev
*mdev
;
152 struct mbox_hdr
*rsp_hdr
;
153 struct mbox_msghdr
*msg
;
154 struct otx2_mbox
*mbox
;
155 struct mbox
*vf_mbox
;
160 vf_mbox
= container_of(work
, struct mbox
, mbox_up_wrk
);
162 mbox
= &vf_mbox
->mbox_up
;
163 mdev
= &mbox
->dev
[0];
165 rsp_hdr
= (struct mbox_hdr
*)(mdev
->mbase
+ mbox
->rx_start
);
166 num_msgs
= rsp_hdr
->num_msgs
;
171 offset
= mbox
->rx_start
+ ALIGN(sizeof(*rsp_hdr
), MBOX_MSG_ALIGN
);
173 for (id
= 0; id
< num_msgs
; id
++) {
174 msg
= (struct mbox_msghdr
*)(mdev
->mbase
+ offset
);
175 otx2vf_process_mbox_msg_up(vf
, msg
);
176 offset
= mbox
->rx_start
+ msg
->next_msgoff
;
179 otx2_mbox_msg_send(mbox
, 0);
182 static irqreturn_t
otx2vf_vfaf_mbox_intr_handler(int irq
, void *vf_irq
)
184 struct otx2_nic
*vf
= (struct otx2_nic
*)vf_irq
;
185 struct otx2_mbox_dev
*mdev
;
186 struct otx2_mbox
*mbox
;
187 struct mbox_hdr
*hdr
;
191 otx2_write64(vf
, RVU_VF_INT
, BIT_ULL(0));
193 mbox_data
= otx2_read64(vf
, RVU_VF_VFPF_MBOX0
);
195 /* Read latest mbox data */
198 if (mbox_data
& MBOX_DOWN_MSG
) {
199 mbox_data
&= ~MBOX_DOWN_MSG
;
200 otx2_write64(vf
, RVU_VF_VFPF_MBOX0
, mbox_data
);
202 /* Check for PF => VF response messages */
203 mbox
= &vf
->mbox
.mbox
;
204 mdev
= &mbox
->dev
[0];
205 otx2_sync_mbox_bbuf(mbox
, 0);
207 hdr
= (struct mbox_hdr
*)(mdev
->mbase
+ mbox
->rx_start
);
209 queue_work(vf
->mbox_wq
, &vf
->mbox
.mbox_wrk
);
211 trace_otx2_msg_interrupt(mbox
->pdev
, "DOWN reply from PF to VF",
215 if (mbox_data
& MBOX_UP_MSG
) {
216 mbox_data
&= ~MBOX_UP_MSG
;
217 otx2_write64(vf
, RVU_VF_VFPF_MBOX0
, mbox_data
);
219 /* Check for PF => VF notification messages */
220 mbox
= &vf
->mbox
.mbox_up
;
221 mdev
= &mbox
->dev
[0];
222 otx2_sync_mbox_bbuf(mbox
, 0);
224 hdr
= (struct mbox_hdr
*)(mdev
->mbase
+ mbox
->rx_start
);
226 queue_work(vf
->mbox_wq
, &vf
->mbox
.mbox_up_wrk
);
228 trace_otx2_msg_interrupt(mbox
->pdev
, "UP message from PF to VF",
235 static void otx2vf_disable_mbox_intr(struct otx2_nic
*vf
)
237 int vector
= pci_irq_vector(vf
->pdev
, RVU_VF_INT_VEC_MBOX
);
239 /* Disable VF => PF mailbox IRQ */
240 otx2_write64(vf
, RVU_VF_INT_ENA_W1C
, BIT_ULL(0));
241 free_irq(vector
, vf
);
244 static int otx2vf_register_mbox_intr(struct otx2_nic
*vf
, bool probe_pf
)
246 struct otx2_hw
*hw
= &vf
->hw
;
251 /* Register mailbox interrupt handler */
252 irq_name
= &hw
->irq_name
[RVU_VF_INT_VEC_MBOX
* NAME_SIZE
];
253 snprintf(irq_name
, NAME_SIZE
, "RVUVFAF Mbox");
254 err
= request_irq(pci_irq_vector(vf
->pdev
, RVU_VF_INT_VEC_MBOX
),
255 otx2vf_vfaf_mbox_intr_handler
, 0, irq_name
, vf
);
258 "RVUPF: IRQ registration failed for VFAF mbox irq\n");
262 /* Enable mailbox interrupt for msgs coming from PF.
263 * First clear to avoid spurious interrupts, if any.
265 otx2_write64(vf
, RVU_VF_INT
, BIT_ULL(0));
266 otx2_write64(vf
, RVU_VF_INT_ENA_W1S
, BIT_ULL(0));
271 /* Check mailbox communication with PF */
272 req
= otx2_mbox_alloc_msg_ready(&vf
->mbox
);
274 otx2vf_disable_mbox_intr(vf
);
278 err
= otx2_sync_mbox_msg(&vf
->mbox
);
281 "AF not responding to mailbox, deferring probe\n");
282 otx2vf_disable_mbox_intr(vf
);
283 return -EPROBE_DEFER
;
288 static void otx2vf_vfaf_mbox_destroy(struct otx2_nic
*vf
)
290 struct mbox
*mbox
= &vf
->mbox
;
293 destroy_workqueue(vf
->mbox_wq
);
297 if (mbox
->mbox
.hwbase
&& !test_bit(CN10K_MBOX
, &vf
->hw
.cap_flag
))
298 iounmap((void __iomem
*)mbox
->mbox
.hwbase
);
300 otx2_mbox_destroy(&mbox
->mbox
);
301 otx2_mbox_destroy(&mbox
->mbox_up
);
304 static int otx2vf_vfaf_mbox_init(struct otx2_nic
*vf
)
306 struct mbox
*mbox
= &vf
->mbox
;
307 void __iomem
*hwbase
;
311 vf
->mbox_wq
= alloc_ordered_workqueue("otx2_vfaf_mailbox",
312 WQ_HIGHPRI
| WQ_MEM_RECLAIM
);
316 if (test_bit(CN10K_MBOX
, &vf
->hw
.cap_flag
)) {
317 /* For cn10k platform, VF mailbox region is in its BAR2
320 hwbase
= vf
->reg_base
+ RVU_VF_MBOX_REGION
;
322 /* Mailbox is a reserved memory (in RAM) region shared between
323 * admin function (i.e PF0) and this VF, shouldn't be mapped as
324 * device memory to allow unaligned accesses.
326 hwbase
= ioremap_wc(pci_resource_start(vf
->pdev
,
328 pci_resource_len(vf
->pdev
,
331 dev_err(vf
->dev
, "Unable to map VFAF mailbox region\n");
337 err
= otx2_mbox_init(&mbox
->mbox
, hwbase
, vf
->pdev
, vf
->reg_base
,
342 err
= otx2_mbox_init(&mbox
->mbox_up
, hwbase
, vf
->pdev
, vf
->reg_base
,
343 MBOX_DIR_VFPF_UP
, 1);
347 err
= otx2_mbox_bbuf_init(mbox
, vf
->pdev
);
351 INIT_WORK(&mbox
->mbox_wrk
, otx2vf_vfaf_mbox_handler
);
352 INIT_WORK(&mbox
->mbox_up_wrk
, otx2vf_vfaf_mbox_up_handler
);
353 mutex_init(&mbox
->lock
);
357 if (hwbase
&& !test_bit(CN10K_MBOX
, &vf
->hw
.cap_flag
))
359 destroy_workqueue(vf
->mbox_wq
);
363 static int otx2vf_open(struct net_device
*netdev
)
368 err
= otx2_open(netdev
);
372 /* LBKs do not receive link events so tell everyone we are up here */
373 vf
= netdev_priv(netdev
);
374 if (is_otx2_lbkvf(vf
->pdev
)) {
375 pr_info("%s NIC Link is UP\n", netdev
->name
);
376 netif_carrier_on(netdev
);
377 netif_tx_start_all_queues(netdev
);
383 static int otx2vf_stop(struct net_device
*netdev
)
385 return otx2_stop(netdev
);
388 static netdev_tx_t
otx2vf_xmit(struct sk_buff
*skb
, struct net_device
*netdev
)
390 struct otx2_nic
*vf
= netdev_priv(netdev
);
391 int qidx
= skb_get_queue_mapping(skb
);
392 struct otx2_snd_queue
*sq
;
393 struct netdev_queue
*txq
;
395 sq
= &vf
->qset
.sq
[qidx
];
396 txq
= netdev_get_tx_queue(netdev
, qidx
);
398 if (!otx2_sq_append_skb(netdev
, sq
, skb
, qidx
)) {
399 netif_tx_stop_queue(txq
);
401 /* Check again, incase SQBs got freed up */
403 if (((sq
->num_sqbs
- *sq
->aura_fc_addr
) * sq
->sqe_per_sqb
)
405 netif_tx_wake_queue(txq
);
407 return NETDEV_TX_BUSY
;
413 static void otx2vf_set_rx_mode(struct net_device
*netdev
)
415 struct otx2_nic
*vf
= netdev_priv(netdev
);
417 queue_work(vf
->otx2_wq
, &vf
->rx_mode_work
);
420 static void otx2vf_do_set_rx_mode(struct work_struct
*work
)
422 struct otx2_nic
*vf
= container_of(work
, struct otx2_nic
, rx_mode_work
);
423 struct net_device
*netdev
= vf
->netdev
;
424 unsigned int flags
= netdev
->flags
;
425 struct nix_rx_mode
*req
;
427 mutex_lock(&vf
->mbox
.lock
);
429 req
= otx2_mbox_alloc_msg_nix_set_rx_mode(&vf
->mbox
);
431 mutex_unlock(&vf
->mbox
.lock
);
435 req
->mode
= NIX_RX_MODE_UCAST
;
437 if (flags
& IFF_PROMISC
)
438 req
->mode
|= NIX_RX_MODE_PROMISC
;
439 if (flags
& (IFF_ALLMULTI
| IFF_MULTICAST
))
440 req
->mode
|= NIX_RX_MODE_ALLMULTI
;
442 req
->mode
|= NIX_RX_MODE_USE_MCE
;
444 otx2_sync_mbox_msg(&vf
->mbox
);
446 mutex_unlock(&vf
->mbox
.lock
);
449 static int otx2vf_change_mtu(struct net_device
*netdev
, int new_mtu
)
451 bool if_up
= netif_running(netdev
);
457 netdev_info(netdev
, "Changing MTU from %d to %d\n",
458 netdev
->mtu
, new_mtu
);
459 netdev
->mtu
= new_mtu
;
462 err
= otx2vf_open(netdev
);
467 static void otx2vf_reset_task(struct work_struct
*work
)
469 struct otx2_nic
*vf
= container_of(work
, struct otx2_nic
, reset_task
);
473 if (netif_running(vf
->netdev
)) {
474 otx2vf_stop(vf
->netdev
);
476 otx2vf_open(vf
->netdev
);
482 static int otx2vf_set_features(struct net_device
*netdev
,
483 netdev_features_t features
)
485 return otx2_handle_ntuple_tc_features(netdev
, features
);
488 static const struct net_device_ops otx2vf_netdev_ops
= {
489 .ndo_open
= otx2vf_open
,
490 .ndo_stop
= otx2vf_stop
,
491 .ndo_start_xmit
= otx2vf_xmit
,
492 .ndo_select_queue
= otx2_select_queue
,
493 .ndo_set_rx_mode
= otx2vf_set_rx_mode
,
494 .ndo_set_mac_address
= otx2_set_mac_address
,
495 .ndo_change_mtu
= otx2vf_change_mtu
,
496 .ndo_set_features
= otx2vf_set_features
,
497 .ndo_get_stats64
= otx2_get_stats64
,
498 .ndo_tx_timeout
= otx2_tx_timeout
,
499 .ndo_eth_ioctl
= otx2_ioctl
,
500 .ndo_setup_tc
= otx2_setup_tc
,
503 static int otx2_wq_init(struct otx2_nic
*vf
)
505 vf
->otx2_wq
= create_singlethread_workqueue("otx2vf_wq");
509 INIT_WORK(&vf
->rx_mode_work
, otx2vf_do_set_rx_mode
);
510 INIT_WORK(&vf
->reset_task
, otx2vf_reset_task
);
514 static int otx2vf_realloc_msix_vectors(struct otx2_nic
*vf
)
516 struct otx2_hw
*hw
= &vf
->hw
;
519 num_vec
= hw
->nix_msixoff
;
520 num_vec
+= NIX_LF_CINT_VEC_START
+ hw
->max_queues
;
522 otx2vf_disable_mbox_intr(vf
);
523 pci_free_irq_vectors(hw
->pdev
);
524 err
= pci_alloc_irq_vectors(hw
->pdev
, num_vec
, num_vec
, PCI_IRQ_MSIX
);
526 dev_err(vf
->dev
, "%s: Failed to realloc %d IRQ vectors\n",
531 return otx2vf_register_mbox_intr(vf
, false);
534 static int otx2vf_probe(struct pci_dev
*pdev
, const struct pci_device_id
*id
)
536 int num_vec
= pci_msix_vec_count(pdev
);
537 struct device
*dev
= &pdev
->dev
;
538 int err
, qcount
, qos_txqs
;
539 struct net_device
*netdev
;
543 err
= pcim_enable_device(pdev
);
545 dev_err(dev
, "Failed to enable PCI device\n");
549 err
= pci_request_regions(pdev
, DRV_NAME
);
551 dev_err(dev
, "PCI request regions failed 0x%x\n", err
);
555 err
= dma_set_mask_and_coherent(dev
, DMA_BIT_MASK(48));
557 dev_err(dev
, "DMA mask config failed, abort\n");
558 goto err_release_regions
;
561 pci_set_master(pdev
);
563 qcount
= num_online_cpus();
564 qos_txqs
= min_t(int, qcount
, OTX2_QOS_MAX_LEAF_NODES
);
565 netdev
= alloc_etherdev_mqs(sizeof(*vf
), qcount
+ qos_txqs
, qcount
);
568 goto err_release_regions
;
571 pci_set_drvdata(pdev
, netdev
);
572 SET_NETDEV_DEV(netdev
, &pdev
->dev
);
573 vf
= netdev_priv(netdev
);
577 vf
->iommu_domain
= iommu_get_domain_for_dev(dev
);
579 vf
->flags
|= OTX2_FLAG_INTF_DOWN
;
582 hw
->rx_queues
= qcount
;
583 hw
->tx_queues
= qcount
;
584 hw
->max_queues
= qcount
;
585 hw
->non_qos_queues
= qcount
;
586 hw
->rbuf_len
= OTX2_DEFAULT_RBUF_LEN
;
587 /* Use CQE of 128 byte descriptor size by default */
590 hw
->irq_name
= devm_kmalloc_array(&hw
->pdev
->dev
, num_vec
, NAME_SIZE
,
594 goto err_free_netdev
;
597 hw
->affinity_mask
= devm_kcalloc(&hw
->pdev
->dev
, num_vec
,
598 sizeof(cpumask_var_t
), GFP_KERNEL
);
599 if (!hw
->affinity_mask
) {
601 goto err_free_netdev
;
604 err
= pci_alloc_irq_vectors(hw
->pdev
, num_vec
, num_vec
, PCI_IRQ_MSIX
);
606 dev_err(dev
, "%s: Failed to alloc %d IRQ vectors\n",
608 goto err_free_netdev
;
611 vf
->reg_base
= pcim_iomap(pdev
, PCI_CFG_REG_BAR_NUM
, 0);
613 dev_err(dev
, "Unable to map physical function CSRs, aborting\n");
615 goto err_free_irq_vectors
;
618 otx2_setup_dev_hw_settings(vf
);
619 /* Init VF <=> PF mailbox stuff */
620 err
= otx2vf_vfaf_mbox_init(vf
);
622 goto err_free_irq_vectors
;
624 /* Register mailbox interrupt */
625 err
= otx2vf_register_mbox_intr(vf
, true);
627 goto err_mbox_destroy
;
629 /* Request AF to attach NPA and LIX LFs to this AF */
630 err
= otx2_attach_npa_nix(vf
);
632 goto err_disable_mbox_intr
;
634 err
= otx2vf_realloc_msix_vectors(vf
);
636 goto err_detach_rsrc
;
638 err
= otx2_set_real_num_queues(netdev
, qcount
, qcount
);
640 goto err_detach_rsrc
;
642 err
= cn10k_lmtst_init(vf
);
644 goto err_detach_rsrc
;
646 /* Don't check for error. Proceed without ptp */
649 /* Assign default mac address */
650 otx2_get_mac_from_af(netdev
);
652 netdev
->hw_features
= NETIF_F_RXCSUM
| NETIF_F_IP_CSUM
|
653 NETIF_F_IPV6_CSUM
| NETIF_F_RXHASH
|
654 NETIF_F_SG
| NETIF_F_TSO
| NETIF_F_TSO6
|
656 netdev
->features
= netdev
->hw_features
;
657 /* Support TSO on tag interface */
658 netdev
->vlan_features
|= netdev
->features
;
659 netdev
->hw_features
|= NETIF_F_HW_VLAN_CTAG_TX
|
660 NETIF_F_HW_VLAN_STAG_TX
;
661 netdev
->features
|= netdev
->hw_features
;
663 netdev
->hw_features
|= NETIF_F_NTUPLE
;
664 netdev
->hw_features
|= NETIF_F_RXALL
;
665 netdev
->hw_features
|= NETIF_F_HW_TC
;
667 netif_set_tso_max_segs(netdev
, OTX2_MAX_GSO_SEGS
);
668 netdev
->watchdog_timeo
= OTX2_TX_TIMEOUT
;
670 netdev
->netdev_ops
= &otx2vf_netdev_ops
;
672 netdev
->min_mtu
= OTX2_MIN_MTU
;
673 netdev
->max_mtu
= otx2_get_max_mtu(vf
);
675 /* To distinguish, for LBK VFs set netdev name explicitly */
676 if (is_otx2_lbkvf(vf
->pdev
)) {
679 n
= (vf
->pcifunc
>> RVU_PFVF_FUNC_SHIFT
) & RVU_PFVF_FUNC_MASK
;
680 /* Need to subtract 1 to get proper VF number */
682 snprintf(netdev
->name
, sizeof(netdev
->name
), "lbk%d", n
);
685 err
= register_netdev(netdev
);
687 dev_err(dev
, "Failed to register netdevice\n");
688 goto err_ptp_destroy
;
691 err
= otx2_wq_init(vf
);
693 goto err_unreg_netdev
;
695 otx2vf_set_ethtool_ops(netdev
);
697 err
= otx2vf_mcam_flow_init(vf
);
699 goto err_unreg_netdev
;
701 err
= otx2_init_tc(vf
);
703 goto err_unreg_netdev
;
705 err
= otx2_register_dl(vf
);
707 goto err_shutdown_tc
;
710 err
= otx2_dcbnl_set_ops(netdev
);
712 goto err_shutdown_tc
;
714 otx2_qos_init(vf
, qos_txqs
);
719 otx2_shutdown_tc(vf
);
721 unregister_netdev(netdev
);
723 otx2_ptp_destroy(vf
);
725 free_percpu(vf
->hw
.lmt_info
);
726 if (test_bit(CN10K_LMTST
, &vf
->hw
.cap_flag
))
727 qmem_free(vf
->dev
, vf
->dync_lmt
);
728 otx2_detach_resources(&vf
->mbox
);
729 err_disable_mbox_intr
:
730 otx2vf_disable_mbox_intr(vf
);
732 otx2vf_vfaf_mbox_destroy(vf
);
733 err_free_irq_vectors
:
734 pci_free_irq_vectors(hw
->pdev
);
736 pci_set_drvdata(pdev
, NULL
);
739 pci_release_regions(pdev
);
743 static void otx2vf_remove(struct pci_dev
*pdev
)
745 struct net_device
*netdev
= pci_get_drvdata(pdev
);
751 vf
= netdev_priv(netdev
);
753 /* Disable 802.3x pause frames */
754 if (vf
->flags
& OTX2_FLAG_RX_PAUSE_ENABLED
||
755 (vf
->flags
& OTX2_FLAG_TX_PAUSE_ENABLED
)) {
756 vf
->flags
&= ~OTX2_FLAG_RX_PAUSE_ENABLED
;
757 vf
->flags
&= ~OTX2_FLAG_TX_PAUSE_ENABLED
;
758 otx2_config_pause_frm(vf
);
762 /* Disable PFC config */
765 otx2_config_priority_flow_ctrl(vf
);
769 cancel_work_sync(&vf
->reset_task
);
770 otx2_unregister_dl(vf
);
771 unregister_netdev(netdev
);
773 destroy_workqueue(vf
->otx2_wq
);
774 otx2_ptp_destroy(vf
);
775 otx2_mcam_flow_del(vf
);
776 otx2_shutdown_tc(vf
);
777 otx2_shutdown_qos(vf
);
778 otx2vf_disable_mbox_intr(vf
);
779 otx2_detach_resources(&vf
->mbox
);
780 free_percpu(vf
->hw
.lmt_info
);
781 if (test_bit(CN10K_LMTST
, &vf
->hw
.cap_flag
))
782 qmem_free(vf
->dev
, vf
->dync_lmt
);
783 otx2vf_vfaf_mbox_destroy(vf
);
784 pci_free_irq_vectors(vf
->pdev
);
785 pci_set_drvdata(pdev
, NULL
);
788 pci_release_regions(pdev
);
791 static struct pci_driver otx2vf_driver
= {
793 .id_table
= otx2_vf_id_table
,
794 .probe
= otx2vf_probe
,
795 .remove
= otx2vf_remove
,
796 .shutdown
= otx2vf_remove
,
799 static int __init
otx2vf_init_module(void)
801 pr_info("%s: %s\n", DRV_NAME
, DRV_STRING
);
803 return pci_register_driver(&otx2vf_driver
);
806 static void __exit
otx2vf_cleanup_module(void)
808 pci_unregister_driver(&otx2vf_driver
);
811 module_init(otx2vf_init_module
);
812 module_exit(otx2vf_cleanup_module
);