1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell OcteonTx2 RVU Virtual Function ethernet driver */
4 #include <linux/etherdevice.h>
5 #include <linux/module.h>
8 #include "otx2_common.h"
11 #define DRV_NAME "octeontx2-nicvf"
12 #define DRV_STRING "Marvell OcteonTX2 NIC Virtual Function Driver"
14 static const struct pci_device_id otx2_vf_id_table
[] = {
15 { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM
, PCI_DEVID_OCTEONTX2_RVU_AFVF
) },
16 { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM
, PCI_DEVID_OCTEONTX2_RVU_VF
) },
20 MODULE_AUTHOR("Sunil Goutham <sgoutham@marvell.com>");
21 MODULE_DESCRIPTION(DRV_STRING
);
22 MODULE_LICENSE("GPL v2");
23 MODULE_DEVICE_TABLE(pci
, otx2_vf_id_table
);
25 /* RVU VF Interrupt Vector Enumeration */
27 RVU_VF_INT_VEC_MBOX
= 0x0,
30 static void otx2vf_process_vfaf_mbox_msg(struct otx2_nic
*vf
,
31 struct mbox_msghdr
*msg
)
33 if (msg
->id
>= MBOX_MSG_MAX
) {
35 "Mbox msg with unknown ID %d\n", msg
->id
);
39 if (msg
->sig
!= OTX2_MBOX_RSP_SIG
) {
41 "Mbox msg with wrong signature %x, ID %d\n",
46 if (msg
->rc
== MBOX_MSG_INVALID
) {
48 "PF/AF says the sent msg(s) %d were invalid\n",
55 vf
->pcifunc
= msg
->pcifunc
;
57 case MBOX_MSG_MSIX_OFFSET
:
58 mbox_handler_msix_offset(vf
, (struct msix_offset_rsp
*)msg
);
60 case MBOX_MSG_NPA_LF_ALLOC
:
61 mbox_handler_npa_lf_alloc(vf
, (struct npa_lf_alloc_rsp
*)msg
);
63 case MBOX_MSG_NIX_LF_ALLOC
:
64 mbox_handler_nix_lf_alloc(vf
, (struct nix_lf_alloc_rsp
*)msg
);
66 case MBOX_MSG_NIX_TXSCH_ALLOC
:
67 mbox_handler_nix_txsch_alloc(vf
,
68 (struct nix_txsch_alloc_rsp
*)msg
);
70 case MBOX_MSG_NIX_BP_ENABLE
:
71 mbox_handler_nix_bp_enable(vf
, (struct nix_bp_cfg_rsp
*)msg
);
76 "Mbox msg response has err %d, ID %d\n",
81 static void otx2vf_vfaf_mbox_handler(struct work_struct
*work
)
83 struct otx2_mbox_dev
*mdev
;
84 struct mbox_hdr
*rsp_hdr
;
85 struct mbox_msghdr
*msg
;
86 struct otx2_mbox
*mbox
;
90 af_mbox
= container_of(work
, struct mbox
, mbox_wrk
);
91 mbox
= &af_mbox
->mbox
;
93 rsp_hdr
= (struct mbox_hdr
*)(mdev
->mbase
+ mbox
->rx_start
);
94 if (af_mbox
->num_msgs
== 0)
96 offset
= mbox
->rx_start
+ ALIGN(sizeof(*rsp_hdr
), MBOX_MSG_ALIGN
);
98 for (id
= 0; id
< af_mbox
->num_msgs
; id
++) {
99 msg
= (struct mbox_msghdr
*)(mdev
->mbase
+ offset
);
100 otx2vf_process_vfaf_mbox_msg(af_mbox
->pfvf
, msg
);
101 offset
= mbox
->rx_start
+ msg
->next_msgoff
;
105 otx2_mbox_reset(mbox
, 0);
108 static int otx2vf_process_mbox_msg_up(struct otx2_nic
*vf
,
109 struct mbox_msghdr
*req
)
114 /* Check if valid, if not reply with a invalid msg */
115 if (req
->sig
!= OTX2_MBOX_REQ_SIG
) {
116 otx2_reply_invalid_msg(&vf
->mbox
.mbox_up
, 0, 0, req
->id
);
121 case MBOX_MSG_CGX_LINK_EVENT
:
122 rsp
= (struct msg_rsp
*)otx2_mbox_alloc_msg(
123 &vf
->mbox
.mbox_up
, 0,
124 sizeof(struct msg_rsp
));
128 rsp
->hdr
.id
= MBOX_MSG_CGX_LINK_EVENT
;
129 rsp
->hdr
.sig
= OTX2_MBOX_RSP_SIG
;
130 rsp
->hdr
.pcifunc
= 0;
132 err
= otx2_mbox_up_handler_cgx_link_event(
133 vf
, (struct cgx_link_info_msg
*)req
, rsp
);
136 otx2_reply_invalid_msg(&vf
->mbox
.mbox_up
, 0, 0, req
->id
);
142 static void otx2vf_vfaf_mbox_up_handler(struct work_struct
*work
)
144 struct otx2_mbox_dev
*mdev
;
145 struct mbox_hdr
*rsp_hdr
;
146 struct mbox_msghdr
*msg
;
147 struct otx2_mbox
*mbox
;
148 struct mbox
*vf_mbox
;
152 vf_mbox
= container_of(work
, struct mbox
, mbox_up_wrk
);
154 mbox
= &vf_mbox
->mbox_up
;
155 mdev
= &mbox
->dev
[0];
157 rsp_hdr
= (struct mbox_hdr
*)(mdev
->mbase
+ mbox
->rx_start
);
158 if (vf_mbox
->up_num_msgs
== 0)
161 offset
= mbox
->rx_start
+ ALIGN(sizeof(*rsp_hdr
), MBOX_MSG_ALIGN
);
163 for (id
= 0; id
< vf_mbox
->up_num_msgs
; id
++) {
164 msg
= (struct mbox_msghdr
*)(mdev
->mbase
+ offset
);
165 otx2vf_process_mbox_msg_up(vf
, msg
);
166 offset
= mbox
->rx_start
+ msg
->next_msgoff
;
169 otx2_mbox_msg_send(mbox
, 0);
172 static irqreturn_t
otx2vf_vfaf_mbox_intr_handler(int irq
, void *vf_irq
)
174 struct otx2_nic
*vf
= (struct otx2_nic
*)vf_irq
;
175 struct otx2_mbox_dev
*mdev
;
176 struct otx2_mbox
*mbox
;
177 struct mbox_hdr
*hdr
;
180 otx2_write64(vf
, RVU_VF_INT
, BIT_ULL(0));
182 /* Read latest mbox data */
185 /* Check for PF => VF response messages */
186 mbox
= &vf
->mbox
.mbox
;
187 mdev
= &mbox
->dev
[0];
188 otx2_sync_mbox_bbuf(mbox
, 0);
190 hdr
= (struct mbox_hdr
*)(mdev
->mbase
+ mbox
->rx_start
);
192 vf
->mbox
.num_msgs
= hdr
->num_msgs
;
194 memset(mbox
->hwbase
+ mbox
->rx_start
, 0,
195 ALIGN(sizeof(struct mbox_hdr
), sizeof(u64
)));
196 queue_work(vf
->mbox_wq
, &vf
->mbox
.mbox_wrk
);
198 /* Check for PF => VF notification messages */
199 mbox
= &vf
->mbox
.mbox_up
;
200 mdev
= &mbox
->dev
[0];
201 otx2_sync_mbox_bbuf(mbox
, 0);
203 hdr
= (struct mbox_hdr
*)(mdev
->mbase
+ mbox
->rx_start
);
205 vf
->mbox
.up_num_msgs
= hdr
->num_msgs
;
207 memset(mbox
->hwbase
+ mbox
->rx_start
, 0,
208 ALIGN(sizeof(struct mbox_hdr
), sizeof(u64
)));
209 queue_work(vf
->mbox_wq
, &vf
->mbox
.mbox_up_wrk
);
215 static void otx2vf_disable_mbox_intr(struct otx2_nic
*vf
)
217 int vector
= pci_irq_vector(vf
->pdev
, RVU_VF_INT_VEC_MBOX
);
219 /* Disable VF => PF mailbox IRQ */
220 otx2_write64(vf
, RVU_VF_INT_ENA_W1C
, BIT_ULL(0));
221 free_irq(vector
, vf
);
224 static int otx2vf_register_mbox_intr(struct otx2_nic
*vf
, bool probe_pf
)
226 struct otx2_hw
*hw
= &vf
->hw
;
231 /* Register mailbox interrupt handler */
232 irq_name
= &hw
->irq_name
[RVU_VF_INT_VEC_MBOX
* NAME_SIZE
];
233 snprintf(irq_name
, NAME_SIZE
, "RVUVFAF Mbox");
234 err
= request_irq(pci_irq_vector(vf
->pdev
, RVU_VF_INT_VEC_MBOX
),
235 otx2vf_vfaf_mbox_intr_handler
, 0, irq_name
, vf
);
238 "RVUPF: IRQ registration failed for VFAF mbox irq\n");
242 /* Enable mailbox interrupt for msgs coming from PF.
243 * First clear to avoid spurious interrupts, if any.
245 otx2_write64(vf
, RVU_VF_INT
, BIT_ULL(0));
246 otx2_write64(vf
, RVU_VF_INT_ENA_W1S
, BIT_ULL(0));
251 /* Check mailbox communication with PF */
252 req
= otx2_mbox_alloc_msg_ready(&vf
->mbox
);
254 otx2vf_disable_mbox_intr(vf
);
258 err
= otx2_sync_mbox_msg(&vf
->mbox
);
261 "AF not responding to mailbox, deferring probe\n");
262 otx2vf_disable_mbox_intr(vf
);
263 return -EPROBE_DEFER
;
268 static void otx2vf_vfaf_mbox_destroy(struct otx2_nic
*vf
)
270 struct mbox
*mbox
= &vf
->mbox
;
273 flush_workqueue(vf
->mbox_wq
);
274 destroy_workqueue(vf
->mbox_wq
);
278 if (mbox
->mbox
.hwbase
)
279 iounmap((void __iomem
*)mbox
->mbox
.hwbase
);
281 otx2_mbox_destroy(&mbox
->mbox
);
282 otx2_mbox_destroy(&mbox
->mbox_up
);
285 static int otx2vf_vfaf_mbox_init(struct otx2_nic
*vf
)
287 struct mbox
*mbox
= &vf
->mbox
;
288 void __iomem
*hwbase
;
292 vf
->mbox_wq
= alloc_workqueue("otx2_vfaf_mailbox",
293 WQ_UNBOUND
| WQ_HIGHPRI
|
298 /* Mailbox is a reserved memory (in RAM) region shared between
299 * admin function (i.e PF0) and this VF, shouldn't be mapped as
300 * device memory to allow unaligned accesses.
302 hwbase
= ioremap_wc(pci_resource_start(vf
->pdev
, PCI_MBOX_BAR_NUM
),
303 pci_resource_len(vf
->pdev
, PCI_MBOX_BAR_NUM
));
305 dev_err(vf
->dev
, "Unable to map VFAF mailbox region\n");
310 err
= otx2_mbox_init(&mbox
->mbox
, hwbase
, vf
->pdev
, vf
->reg_base
,
315 err
= otx2_mbox_init(&mbox
->mbox_up
, hwbase
, vf
->pdev
, vf
->reg_base
,
316 MBOX_DIR_VFPF_UP
, 1);
320 err
= otx2_mbox_bbuf_init(mbox
, vf
->pdev
);
324 INIT_WORK(&mbox
->mbox_wrk
, otx2vf_vfaf_mbox_handler
);
325 INIT_WORK(&mbox
->mbox_up_wrk
, otx2vf_vfaf_mbox_up_handler
);
326 mutex_init(&mbox
->lock
);
330 destroy_workqueue(vf
->mbox_wq
);
334 static int otx2vf_open(struct net_device
*netdev
)
339 err
= otx2_open(netdev
);
343 /* LBKs do not receive link events so tell everyone we are up here */
344 vf
= netdev_priv(netdev
);
345 if (is_otx2_lbkvf(vf
->pdev
)) {
346 pr_info("%s NIC Link is UP\n", netdev
->name
);
347 netif_carrier_on(netdev
);
348 netif_tx_start_all_queues(netdev
);
354 static int otx2vf_stop(struct net_device
*netdev
)
356 return otx2_stop(netdev
);
359 static netdev_tx_t
otx2vf_xmit(struct sk_buff
*skb
, struct net_device
*netdev
)
361 struct otx2_nic
*vf
= netdev_priv(netdev
);
362 int qidx
= skb_get_queue_mapping(skb
);
363 struct otx2_snd_queue
*sq
;
364 struct netdev_queue
*txq
;
366 sq
= &vf
->qset
.sq
[qidx
];
367 txq
= netdev_get_tx_queue(netdev
, qidx
);
369 if (!otx2_sq_append_skb(netdev
, sq
, skb
, qidx
)) {
370 netif_tx_stop_queue(txq
);
372 /* Check again, incase SQBs got freed up */
374 if (((sq
->num_sqbs
- *sq
->aura_fc_addr
) * sq
->sqe_per_sqb
)
376 netif_tx_wake_queue(txq
);
378 return NETDEV_TX_BUSY
;
384 static int otx2vf_change_mtu(struct net_device
*netdev
, int new_mtu
)
386 bool if_up
= netif_running(netdev
);
392 netdev_info(netdev
, "Changing MTU from %d to %d\n",
393 netdev
->mtu
, new_mtu
);
394 netdev
->mtu
= new_mtu
;
397 err
= otx2vf_open(netdev
);
402 static void otx2vf_reset_task(struct work_struct
*work
)
404 struct otx2_nic
*vf
= container_of(work
, struct otx2_nic
, reset_task
);
408 if (netif_running(vf
->netdev
)) {
409 otx2vf_stop(vf
->netdev
);
411 otx2vf_open(vf
->netdev
);
417 static const struct net_device_ops otx2vf_netdev_ops
= {
418 .ndo_open
= otx2vf_open
,
419 .ndo_stop
= otx2vf_stop
,
420 .ndo_start_xmit
= otx2vf_xmit
,
421 .ndo_set_mac_address
= otx2_set_mac_address
,
422 .ndo_change_mtu
= otx2vf_change_mtu
,
423 .ndo_get_stats64
= otx2_get_stats64
,
424 .ndo_tx_timeout
= otx2_tx_timeout
,
427 static int otx2vf_realloc_msix_vectors(struct otx2_nic
*vf
)
429 struct otx2_hw
*hw
= &vf
->hw
;
432 num_vec
= hw
->nix_msixoff
;
433 num_vec
+= NIX_LF_CINT_VEC_START
+ hw
->max_queues
;
435 otx2vf_disable_mbox_intr(vf
);
436 pci_free_irq_vectors(hw
->pdev
);
437 err
= pci_alloc_irq_vectors(hw
->pdev
, num_vec
, num_vec
, PCI_IRQ_MSIX
);
439 dev_err(vf
->dev
, "%s: Failed to realloc %d IRQ vectors\n",
444 return otx2vf_register_mbox_intr(vf
, false);
447 static int otx2vf_probe(struct pci_dev
*pdev
, const struct pci_device_id
*id
)
449 int num_vec
= pci_msix_vec_count(pdev
);
450 struct device
*dev
= &pdev
->dev
;
451 struct net_device
*netdev
;
456 err
= pcim_enable_device(pdev
);
458 dev_err(dev
, "Failed to enable PCI device\n");
462 err
= pci_request_regions(pdev
, DRV_NAME
);
464 dev_err(dev
, "PCI request regions failed 0x%x\n", err
);
468 err
= dma_set_mask_and_coherent(dev
, DMA_BIT_MASK(48));
470 dev_err(dev
, "DMA mask config failed, abort\n");
471 goto err_release_regions
;
474 pci_set_master(pdev
);
476 qcount
= num_online_cpus();
477 netdev
= alloc_etherdev_mqs(sizeof(*vf
), qcount
, qcount
);
480 goto err_release_regions
;
483 pci_set_drvdata(pdev
, netdev
);
484 SET_NETDEV_DEV(netdev
, &pdev
->dev
);
485 vf
= netdev_priv(netdev
);
489 vf
->iommu_domain
= iommu_get_domain_for_dev(dev
);
491 vf
->flags
|= OTX2_FLAG_INTF_DOWN
;
494 hw
->rx_queues
= qcount
;
495 hw
->tx_queues
= qcount
;
496 hw
->max_queues
= qcount
;
498 hw
->irq_name
= devm_kmalloc_array(&hw
->pdev
->dev
, num_vec
, NAME_SIZE
,
502 goto err_free_netdev
;
505 hw
->affinity_mask
= devm_kcalloc(&hw
->pdev
->dev
, num_vec
,
506 sizeof(cpumask_var_t
), GFP_KERNEL
);
507 if (!hw
->affinity_mask
) {
509 goto err_free_netdev
;
512 err
= pci_alloc_irq_vectors(hw
->pdev
, num_vec
, num_vec
, PCI_IRQ_MSIX
);
514 dev_err(dev
, "%s: Failed to alloc %d IRQ vectors\n",
516 goto err_free_netdev
;
519 vf
->reg_base
= pcim_iomap(pdev
, PCI_CFG_REG_BAR_NUM
, 0);
521 dev_err(dev
, "Unable to map physical function CSRs, aborting\n");
523 goto err_free_irq_vectors
;
526 /* Init VF <=> PF mailbox stuff */
527 err
= otx2vf_vfaf_mbox_init(vf
);
529 goto err_free_irq_vectors
;
531 /* Register mailbox interrupt */
532 err
= otx2vf_register_mbox_intr(vf
, true);
534 goto err_mbox_destroy
;
536 /* Request AF to attach NPA and LIX LFs to this AF */
537 err
= otx2_attach_npa_nix(vf
);
539 goto err_disable_mbox_intr
;
541 err
= otx2vf_realloc_msix_vectors(vf
);
543 goto err_mbox_destroy
;
545 err
= otx2_set_real_num_queues(netdev
, qcount
, qcount
);
547 goto err_detach_rsrc
;
549 otx2_setup_dev_hw_settings(vf
);
551 /* Assign default mac address */
552 otx2_get_mac_from_af(netdev
);
554 netdev
->hw_features
= NETIF_F_RXCSUM
| NETIF_F_IP_CSUM
|
555 NETIF_F_IPV6_CSUM
| NETIF_F_RXHASH
|
556 NETIF_F_SG
| NETIF_F_TSO
| NETIF_F_TSO6
;
557 netdev
->features
= netdev
->hw_features
;
559 netdev
->gso_max_segs
= OTX2_MAX_GSO_SEGS
;
560 netdev
->watchdog_timeo
= OTX2_TX_TIMEOUT
;
562 netdev
->netdev_ops
= &otx2vf_netdev_ops
;
564 /* MTU range: 68 - 9190 */
565 netdev
->min_mtu
= OTX2_MIN_MTU
;
566 netdev
->max_mtu
= OTX2_MAX_MTU
;
568 INIT_WORK(&vf
->reset_task
, otx2vf_reset_task
);
570 /* To distinguish, for LBK VFs set netdev name explicitly */
571 if (is_otx2_lbkvf(vf
->pdev
)) {
574 n
= (vf
->pcifunc
>> RVU_PFVF_FUNC_SHIFT
) & RVU_PFVF_FUNC_MASK
;
575 /* Need to subtract 1 to get proper VF number */
577 snprintf(netdev
->name
, sizeof(netdev
->name
), "lbk%d", n
);
580 err
= register_netdev(netdev
);
582 dev_err(dev
, "Failed to register netdevice\n");
583 goto err_detach_rsrc
;
586 otx2vf_set_ethtool_ops(netdev
);
588 /* Enable pause frames by default */
589 vf
->flags
|= OTX2_FLAG_RX_PAUSE_ENABLED
;
590 vf
->flags
|= OTX2_FLAG_TX_PAUSE_ENABLED
;
595 otx2_detach_resources(&vf
->mbox
);
596 err_disable_mbox_intr
:
597 otx2vf_disable_mbox_intr(vf
);
599 otx2vf_vfaf_mbox_destroy(vf
);
600 err_free_irq_vectors
:
601 pci_free_irq_vectors(hw
->pdev
);
603 pci_set_drvdata(pdev
, NULL
);
606 pci_release_regions(pdev
);
610 static void otx2vf_remove(struct pci_dev
*pdev
)
612 struct net_device
*netdev
= pci_get_drvdata(pdev
);
618 vf
= netdev_priv(netdev
);
620 otx2vf_disable_mbox_intr(vf
);
622 otx2_detach_resources(&vf
->mbox
);
623 otx2vf_vfaf_mbox_destroy(vf
);
624 pci_free_irq_vectors(vf
->pdev
);
625 pci_set_drvdata(pdev
, NULL
);
628 pci_release_regions(pdev
);
631 static struct pci_driver otx2vf_driver
= {
633 .id_table
= otx2_vf_id_table
,
634 .probe
= otx2vf_probe
,
635 .remove
= otx2vf_remove
,
636 .shutdown
= otx2vf_remove
,
639 static int __init
otx2vf_init_module(void)
641 pr_info("%s: %s\n", DRV_NAME
, DRV_STRING
);
643 return pci_register_driver(&otx2vf_driver
);
646 static void __exit
otx2vf_cleanup_module(void)
648 pci_unregister_driver(&otx2vf_driver
);
651 module_init(otx2vf_init_module
);
652 module_exit(otx2vf_cleanup_module
);