1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell RVU Physical Function ethernet driver
4 * Copyright (C) 2020 Marvell.
8 #include <linux/module.h>
9 #include <linux/interrupt.h>
10 #include <linux/pci.h>
11 #include <linux/etherdevice.h>
13 #include <linux/if_vlan.h>
14 #include <linux/iommu.h>
16 #include <linux/bpf.h>
17 #include <linux/bpf_trace.h>
18 #include <linux/bitfield.h>
19 #include <net/page_pool/types.h>
22 #include "otx2_common.h"
23 #include "otx2_txrx.h"
24 #include "otx2_struct.h"
28 #include <rvu_trace.h>
30 #define DRV_NAME "rvu_nicpf"
31 #define DRV_STRING "Marvell RVU NIC Physical Function Driver"
33 /* Supported devices */
34 static const struct pci_device_id otx2_pf_id_table
[] = {
35 { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM
, PCI_DEVID_OCTEONTX2_RVU_PF
) },
36 { 0, } /* end of table */
39 MODULE_AUTHOR("Sunil Goutham <sgoutham@marvell.com>");
40 MODULE_DESCRIPTION(DRV_STRING
);
41 MODULE_LICENSE("GPL v2");
42 MODULE_DEVICE_TABLE(pci
, otx2_pf_id_table
);
44 static void otx2_vf_link_event_task(struct work_struct
*work
);
51 static int otx2_config_hw_tx_tstamp(struct otx2_nic
*pfvf
, bool enable
);
52 static int otx2_config_hw_rx_tstamp(struct otx2_nic
*pfvf
, bool enable
);
54 static int otx2_change_mtu(struct net_device
*netdev
, int new_mtu
)
56 struct otx2_nic
*pf
= netdev_priv(netdev
);
57 bool if_up
= netif_running(netdev
);
60 if (pf
->xdp_prog
&& new_mtu
> MAX_XDP_MTU
) {
61 netdev_warn(netdev
, "Jumbo frames not yet supported with XDP, current MTU %d.\n",
68 netdev_info(netdev
, "Changing MTU from %d to %d\n",
69 netdev
->mtu
, new_mtu
);
70 netdev
->mtu
= new_mtu
;
73 err
= otx2_open(netdev
);
78 static void otx2_disable_flr_me_intr(struct otx2_nic
*pf
)
80 int irq
, vfs
= pf
->total_vfs
;
82 /* Disable VFs ME interrupts */
83 otx2_write64(pf
, RVU_PF_VFME_INT_ENA_W1CX(0), INTR_MASK(vfs
));
84 irq
= pci_irq_vector(pf
->pdev
, RVU_PF_INT_VEC_VFME0
);
87 /* Disable VFs FLR interrupts */
88 otx2_write64(pf
, RVU_PF_VFFLR_INT_ENA_W1CX(0), INTR_MASK(vfs
));
89 irq
= pci_irq_vector(pf
->pdev
, RVU_PF_INT_VEC_VFFLR0
);
95 otx2_write64(pf
, RVU_PF_VFME_INT_ENA_W1CX(1), INTR_MASK(vfs
- 64));
96 irq
= pci_irq_vector(pf
->pdev
, RVU_PF_INT_VEC_VFME1
);
99 otx2_write64(pf
, RVU_PF_VFFLR_INT_ENA_W1CX(1), INTR_MASK(vfs
- 64));
100 irq
= pci_irq_vector(pf
->pdev
, RVU_PF_INT_VEC_VFFLR1
);
104 static void otx2_flr_wq_destroy(struct otx2_nic
*pf
)
108 destroy_workqueue(pf
->flr_wq
);
110 devm_kfree(pf
->dev
, pf
->flr_wrk
);
113 static void otx2_flr_handler(struct work_struct
*work
)
115 struct flr_work
*flrwork
= container_of(work
, struct flr_work
, work
);
116 struct otx2_nic
*pf
= flrwork
->pf
;
117 struct mbox
*mbox
= &pf
->mbox
;
121 vf
= flrwork
- pf
->flr_wrk
;
123 mutex_lock(&mbox
->lock
);
124 req
= otx2_mbox_alloc_msg_vf_flr(mbox
);
126 mutex_unlock(&mbox
->lock
);
129 req
->hdr
.pcifunc
&= RVU_PFVF_FUNC_MASK
;
130 req
->hdr
.pcifunc
|= (vf
+ 1) & RVU_PFVF_FUNC_MASK
;
132 if (!otx2_sync_mbox_msg(&pf
->mbox
)) {
137 /* clear transcation pending bit */
138 otx2_write64(pf
, RVU_PF_VFTRPENDX(reg
), BIT_ULL(vf
));
139 otx2_write64(pf
, RVU_PF_VFFLR_INT_ENA_W1SX(reg
), BIT_ULL(vf
));
142 mutex_unlock(&mbox
->lock
);
145 static irqreturn_t
otx2_pf_flr_intr_handler(int irq
, void *pf_irq
)
147 struct otx2_nic
*pf
= (struct otx2_nic
*)pf_irq
;
148 int reg
, dev
, vf
, start_vf
, num_reg
= 1;
151 if (pf
->total_vfs
> 64)
154 for (reg
= 0; reg
< num_reg
; reg
++) {
155 intr
= otx2_read64(pf
, RVU_PF_VFFLR_INTX(reg
));
159 for (vf
= 0; vf
< 64; vf
++) {
160 if (!(intr
& BIT_ULL(vf
)))
163 queue_work(pf
->flr_wq
, &pf
->flr_wrk
[dev
].work
);
164 /* Clear interrupt */
165 otx2_write64(pf
, RVU_PF_VFFLR_INTX(reg
), BIT_ULL(vf
));
166 /* Disable the interrupt */
167 otx2_write64(pf
, RVU_PF_VFFLR_INT_ENA_W1CX(reg
),
174 static irqreturn_t
otx2_pf_me_intr_handler(int irq
, void *pf_irq
)
176 struct otx2_nic
*pf
= (struct otx2_nic
*)pf_irq
;
177 int vf
, reg
, num_reg
= 1;
180 if (pf
->total_vfs
> 64)
183 for (reg
= 0; reg
< num_reg
; reg
++) {
184 intr
= otx2_read64(pf
, RVU_PF_VFME_INTX(reg
));
187 for (vf
= 0; vf
< 64; vf
++) {
188 if (!(intr
& BIT_ULL(vf
)))
190 /* clear trpend bit */
191 otx2_write64(pf
, RVU_PF_VFTRPENDX(reg
), BIT_ULL(vf
));
192 /* clear interrupt */
193 otx2_write64(pf
, RVU_PF_VFME_INTX(reg
), BIT_ULL(vf
));
199 static int otx2_register_flr_me_intr(struct otx2_nic
*pf
, int numvfs
)
201 struct otx2_hw
*hw
= &pf
->hw
;
205 /* Register ME interrupt handler*/
206 irq_name
= &hw
->irq_name
[RVU_PF_INT_VEC_VFME0
* NAME_SIZE
];
207 snprintf(irq_name
, NAME_SIZE
, "RVUPF%d_ME0", rvu_get_pf(pf
->pcifunc
));
208 ret
= request_irq(pci_irq_vector(pf
->pdev
, RVU_PF_INT_VEC_VFME0
),
209 otx2_pf_me_intr_handler
, 0, irq_name
, pf
);
212 "RVUPF: IRQ registration failed for ME0\n");
215 /* Register FLR interrupt handler */
216 irq_name
= &hw
->irq_name
[RVU_PF_INT_VEC_VFFLR0
* NAME_SIZE
];
217 snprintf(irq_name
, NAME_SIZE
, "RVUPF%d_FLR0", rvu_get_pf(pf
->pcifunc
));
218 ret
= request_irq(pci_irq_vector(pf
->pdev
, RVU_PF_INT_VEC_VFFLR0
),
219 otx2_pf_flr_intr_handler
, 0, irq_name
, pf
);
222 "RVUPF: IRQ registration failed for FLR0\n");
227 irq_name
= &hw
->irq_name
[RVU_PF_INT_VEC_VFME1
* NAME_SIZE
];
228 snprintf(irq_name
, NAME_SIZE
, "RVUPF%d_ME1",
229 rvu_get_pf(pf
->pcifunc
));
230 ret
= request_irq(pci_irq_vector
231 (pf
->pdev
, RVU_PF_INT_VEC_VFME1
),
232 otx2_pf_me_intr_handler
, 0, irq_name
, pf
);
235 "RVUPF: IRQ registration failed for ME1\n");
237 irq_name
= &hw
->irq_name
[RVU_PF_INT_VEC_VFFLR1
* NAME_SIZE
];
238 snprintf(irq_name
, NAME_SIZE
, "RVUPF%d_FLR1",
239 rvu_get_pf(pf
->pcifunc
));
240 ret
= request_irq(pci_irq_vector
241 (pf
->pdev
, RVU_PF_INT_VEC_VFFLR1
),
242 otx2_pf_flr_intr_handler
, 0, irq_name
, pf
);
245 "RVUPF: IRQ registration failed for FLR1\n");
250 /* Enable ME interrupt for all VFs*/
251 otx2_write64(pf
, RVU_PF_VFME_INTX(0), INTR_MASK(numvfs
));
252 otx2_write64(pf
, RVU_PF_VFME_INT_ENA_W1SX(0), INTR_MASK(numvfs
));
254 /* Enable FLR interrupt for all VFs*/
255 otx2_write64(pf
, RVU_PF_VFFLR_INTX(0), INTR_MASK(numvfs
));
256 otx2_write64(pf
, RVU_PF_VFFLR_INT_ENA_W1SX(0), INTR_MASK(numvfs
));
261 otx2_write64(pf
, RVU_PF_VFME_INTX(1), INTR_MASK(numvfs
));
262 otx2_write64(pf
, RVU_PF_VFME_INT_ENA_W1SX(1),
265 otx2_write64(pf
, RVU_PF_VFFLR_INTX(1), INTR_MASK(numvfs
));
266 otx2_write64(pf
, RVU_PF_VFFLR_INT_ENA_W1SX(1),
272 static int otx2_pf_flr_init(struct otx2_nic
*pf
, int num_vfs
)
276 pf
->flr_wq
= alloc_ordered_workqueue("otx2_pf_flr_wq", WQ_HIGHPRI
);
280 pf
->flr_wrk
= devm_kcalloc(pf
->dev
, num_vfs
,
281 sizeof(struct flr_work
), GFP_KERNEL
);
283 destroy_workqueue(pf
->flr_wq
);
287 for (vf
= 0; vf
< num_vfs
; vf
++) {
288 pf
->flr_wrk
[vf
].pf
= pf
;
289 INIT_WORK(&pf
->flr_wrk
[vf
].work
, otx2_flr_handler
);
295 static void otx2_queue_vf_work(struct mbox
*mw
, struct workqueue_struct
*mbox_wq
,
296 int first
, int mdevs
, u64 intr
)
298 struct otx2_mbox_dev
*mdev
;
299 struct otx2_mbox
*mbox
;
300 struct mbox_hdr
*hdr
;
303 for (i
= first
; i
< mdevs
; i
++) {
305 if (!(intr
& BIT_ULL(i
- first
)))
309 mdev
= &mbox
->dev
[i
];
310 hdr
= mdev
->mbase
+ mbox
->rx_start
;
311 /* The hdr->num_msgs is set to zero immediately in the interrupt
312 * handler to ensure that it holds a correct value next time
313 * when the interrupt handler is called. pf->mw[i].num_msgs
314 * holds the data for use in otx2_pfvf_mbox_handler and
315 * pf->mw[i].up_num_msgs holds the data for use in
316 * otx2_pfvf_mbox_up_handler.
319 mw
[i
].num_msgs
= hdr
->num_msgs
;
321 queue_work(mbox_wq
, &mw
[i
].mbox_wrk
);
325 mdev
= &mbox
->dev
[i
];
326 hdr
= mdev
->mbase
+ mbox
->rx_start
;
328 mw
[i
].up_num_msgs
= hdr
->num_msgs
;
330 queue_work(mbox_wq
, &mw
[i
].mbox_up_wrk
);
335 static void otx2_forward_msg_pfvf(struct otx2_mbox_dev
*mdev
,
336 struct otx2_mbox
*pfvf_mbox
, void *bbuf_base
,
339 struct otx2_mbox_dev
*src_mdev
= mdev
;
342 /* Msgs are already copied, trigger VF's mbox irq */
345 otx2_mbox_wait_for_zero(pfvf_mbox
, devid
);
347 offset
= pfvf_mbox
->trigger
| (devid
<< pfvf_mbox
->tr_shift
);
348 writeq(MBOX_DOWN_MSG
, (void __iomem
*)pfvf_mbox
->reg_base
+ offset
);
350 /* Restore VF's mbox bounce buffer region address */
351 src_mdev
->mbase
= bbuf_base
;
354 static int otx2_forward_vf_mbox_msgs(struct otx2_nic
*pf
,
355 struct otx2_mbox
*src_mbox
,
356 int dir
, int vf
, int num_msgs
)
358 struct otx2_mbox_dev
*src_mdev
, *dst_mdev
;
359 struct mbox_hdr
*mbox_hdr
;
360 struct mbox_hdr
*req_hdr
;
361 struct mbox
*dst_mbox
;
364 if (dir
== MBOX_DIR_PFAF
) {
365 /* Set VF's mailbox memory as PF's bounce buffer memory, so
366 * that explicit copying of VF's msgs to PF=>AF mbox region
367 * and AF=>PF responses to VF's mbox region can be avoided.
369 src_mdev
= &src_mbox
->dev
[vf
];
370 mbox_hdr
= src_mbox
->hwbase
+
371 src_mbox
->rx_start
+ (vf
* MBOX_SIZE
);
373 dst_mbox
= &pf
->mbox
;
374 dst_size
= dst_mbox
->mbox
.tx_size
-
375 ALIGN(sizeof(*mbox_hdr
), MBOX_MSG_ALIGN
);
376 /* Check if msgs fit into destination area and has valid size */
377 if (mbox_hdr
->msg_size
> dst_size
|| !mbox_hdr
->msg_size
)
380 dst_mdev
= &dst_mbox
->mbox
.dev
[0];
382 mutex_lock(&pf
->mbox
.lock
);
383 dst_mdev
->mbase
= src_mdev
->mbase
;
384 dst_mdev
->msg_size
= mbox_hdr
->msg_size
;
385 dst_mdev
->num_msgs
= num_msgs
;
386 err
= otx2_sync_mbox_msg(dst_mbox
);
387 /* Error code -EIO indicate there is a communication failure
388 * to the AF. Rest of the error codes indicate that AF processed
389 * VF messages and set the error codes in response messages
390 * (if any) so simply forward responses to VF.
394 "AF not responding to VF%d messages\n", vf
);
395 /* restore PF mbase and exit */
396 dst_mdev
->mbase
= pf
->mbox
.bbuf_base
;
397 mutex_unlock(&pf
->mbox
.lock
);
400 /* At this point, all the VF messages sent to AF are acked
401 * with proper responses and responses are copied to VF
402 * mailbox hence raise interrupt to VF.
404 req_hdr
= (struct mbox_hdr
*)(dst_mdev
->mbase
+
405 dst_mbox
->mbox
.rx_start
);
406 req_hdr
->num_msgs
= num_msgs
;
408 otx2_forward_msg_pfvf(dst_mdev
, &pf
->mbox_pfvf
[0].mbox
,
409 pf
->mbox
.bbuf_base
, vf
);
410 mutex_unlock(&pf
->mbox
.lock
);
411 } else if (dir
== MBOX_DIR_PFVF_UP
) {
412 src_mdev
= &src_mbox
->dev
[0];
413 mbox_hdr
= src_mbox
->hwbase
+ src_mbox
->rx_start
;
414 req_hdr
= (struct mbox_hdr
*)(src_mdev
->mbase
+
416 req_hdr
->num_msgs
= num_msgs
;
418 dst_mbox
= &pf
->mbox_pfvf
[0];
419 dst_size
= dst_mbox
->mbox_up
.tx_size
-
420 ALIGN(sizeof(*mbox_hdr
), MBOX_MSG_ALIGN
);
421 /* Check if msgs fit into destination area */
422 if (mbox_hdr
->msg_size
> dst_size
)
425 dst_mdev
= &dst_mbox
->mbox_up
.dev
[vf
];
426 dst_mdev
->mbase
= src_mdev
->mbase
;
427 dst_mdev
->msg_size
= mbox_hdr
->msg_size
;
428 dst_mdev
->num_msgs
= mbox_hdr
->num_msgs
;
429 err
= otx2_sync_mbox_up_msg(dst_mbox
, vf
);
432 "VF%d is not responding to mailbox\n", vf
);
435 } else if (dir
== MBOX_DIR_VFPF_UP
) {
436 req_hdr
= (struct mbox_hdr
*)(src_mbox
->dev
[0].mbase
+
438 req_hdr
->num_msgs
= num_msgs
;
439 otx2_forward_msg_pfvf(&pf
->mbox_pfvf
->mbox_up
.dev
[vf
],
441 pf
->mbox_pfvf
[vf
].bbuf_base
,
448 static void otx2_pfvf_mbox_handler(struct work_struct
*work
)
450 struct mbox_msghdr
*msg
= NULL
;
451 int offset
, vf_idx
, id
, err
;
452 struct otx2_mbox_dev
*mdev
;
453 struct mbox_hdr
*req_hdr
;
454 struct otx2_mbox
*mbox
;
455 struct mbox
*vf_mbox
;
458 vf_mbox
= container_of(work
, struct mbox
, mbox_wrk
);
460 vf_idx
= vf_mbox
- pf
->mbox_pfvf
;
462 mbox
= &pf
->mbox_pfvf
[0].mbox
;
463 mdev
= &mbox
->dev
[vf_idx
];
464 req_hdr
= (struct mbox_hdr
*)(mdev
->mbase
+ mbox
->rx_start
);
466 offset
= ALIGN(sizeof(*req_hdr
), MBOX_MSG_ALIGN
);
468 for (id
= 0; id
< vf_mbox
->num_msgs
; id
++) {
469 msg
= (struct mbox_msghdr
*)(mdev
->mbase
+ mbox
->rx_start
+
472 if (msg
->sig
!= OTX2_MBOX_REQ_SIG
)
475 /* Set VF's number in each of the msg */
476 msg
->pcifunc
&= RVU_PFVF_FUNC_MASK
;
477 msg
->pcifunc
|= (vf_idx
+ 1) & RVU_PFVF_FUNC_MASK
;
478 offset
= msg
->next_msgoff
;
480 err
= otx2_forward_vf_mbox_msgs(pf
, mbox
, MBOX_DIR_PFAF
, vf_idx
,
487 otx2_reply_invalid_msg(mbox
, vf_idx
, 0, msg
->id
);
488 otx2_mbox_msg_send(mbox
, vf_idx
);
491 static void otx2_pfvf_mbox_up_handler(struct work_struct
*work
)
493 struct mbox
*vf_mbox
= container_of(work
, struct mbox
, mbox_up_wrk
);
494 struct otx2_nic
*pf
= vf_mbox
->pfvf
;
495 struct otx2_mbox_dev
*mdev
;
496 int offset
, id
, vf_idx
= 0;
497 struct mbox_hdr
*rsp_hdr
;
498 struct mbox_msghdr
*msg
;
499 struct otx2_mbox
*mbox
;
501 vf_idx
= vf_mbox
- pf
->mbox_pfvf
;
502 mbox
= &pf
->mbox_pfvf
[0].mbox_up
;
503 mdev
= &mbox
->dev
[vf_idx
];
505 rsp_hdr
= (struct mbox_hdr
*)(mdev
->mbase
+ mbox
->rx_start
);
506 offset
= mbox
->rx_start
+ ALIGN(sizeof(*rsp_hdr
), MBOX_MSG_ALIGN
);
508 for (id
= 0; id
< vf_mbox
->up_num_msgs
; id
++) {
509 msg
= mdev
->mbase
+ offset
;
511 if (msg
->id
>= MBOX_MSG_MAX
) {
513 "Mbox msg with unknown ID 0x%x\n", msg
->id
);
517 if (msg
->sig
!= OTX2_MBOX_RSP_SIG
) {
519 "Mbox msg with wrong signature %x, ID 0x%x\n",
525 case MBOX_MSG_CGX_LINK_EVENT
:
530 "Mbox msg response has err %d, ID 0x%x\n",
536 offset
= mbox
->rx_start
+ msg
->next_msgoff
;
537 if (mdev
->msgs_acked
== (vf_mbox
->up_num_msgs
- 1))
538 __otx2_mbox_reset(mbox
, vf_idx
);
543 static irqreturn_t
otx2_pfvf_mbox_intr_handler(int irq
, void *pf_irq
)
545 struct otx2_nic
*pf
= (struct otx2_nic
*)(pf_irq
);
546 int vfs
= pf
->total_vfs
;
550 mbox
= pf
->mbox_pfvf
;
551 /* Handle VF interrupts */
553 intr
= otx2_read64(pf
, RVU_PF_VFPF_MBOX_INTX(1));
554 otx2_write64(pf
, RVU_PF_VFPF_MBOX_INTX(1), intr
);
555 otx2_queue_vf_work(mbox
, pf
->mbox_pfvf_wq
, 64, vfs
, intr
);
557 trace_otx2_msg_interrupt(mbox
->mbox
.pdev
, "VF(s) to PF", intr
);
561 intr
= otx2_read64(pf
, RVU_PF_VFPF_MBOX_INTX(0));
562 otx2_write64(pf
, RVU_PF_VFPF_MBOX_INTX(0), intr
);
564 otx2_queue_vf_work(mbox
, pf
->mbox_pfvf_wq
, 0, vfs
, intr
);
567 trace_otx2_msg_interrupt(mbox
->mbox
.pdev
, "VF(s) to PF", intr
);
572 static int otx2_pfvf_mbox_init(struct otx2_nic
*pf
, int numvfs
)
574 void __iomem
*hwbase
;
582 pf
->mbox_pfvf
= devm_kcalloc(&pf
->pdev
->dev
, numvfs
,
583 sizeof(struct mbox
), GFP_KERNEL
);
587 pf
->mbox_pfvf_wq
= alloc_workqueue("otx2_pfvf_mailbox",
588 WQ_UNBOUND
| WQ_HIGHPRI
|
590 if (!pf
->mbox_pfvf_wq
)
593 /* On CN10K platform, PF <-> VF mailbox region follows after
594 * PF <-> AF mailbox region.
596 if (test_bit(CN10K_MBOX
, &pf
->hw
.cap_flag
))
597 base
= pci_resource_start(pf
->pdev
, PCI_MBOX_BAR_NUM
) +
600 base
= readq((void __iomem
*)((u64
)pf
->reg_base
+
601 RVU_PF_VF_BAR4_ADDR
));
603 hwbase
= ioremap_wc(base
, MBOX_SIZE
* pf
->total_vfs
);
609 mbox
= &pf
->mbox_pfvf
[0];
610 err
= otx2_mbox_init(&mbox
->mbox
, hwbase
, pf
->pdev
, pf
->reg_base
,
611 MBOX_DIR_PFVF
, numvfs
);
615 err
= otx2_mbox_init(&mbox
->mbox_up
, hwbase
, pf
->pdev
, pf
->reg_base
,
616 MBOX_DIR_PFVF_UP
, numvfs
);
620 for (vf
= 0; vf
< numvfs
; vf
++) {
622 INIT_WORK(&mbox
->mbox_wrk
, otx2_pfvf_mbox_handler
);
623 INIT_WORK(&mbox
->mbox_up_wrk
, otx2_pfvf_mbox_up_handler
);
633 destroy_workqueue(pf
->mbox_pfvf_wq
);
637 static void otx2_pfvf_mbox_destroy(struct otx2_nic
*pf
)
639 struct mbox
*mbox
= &pf
->mbox_pfvf
[0];
644 if (pf
->mbox_pfvf_wq
) {
645 destroy_workqueue(pf
->mbox_pfvf_wq
);
646 pf
->mbox_pfvf_wq
= NULL
;
649 if (mbox
->mbox
.hwbase
)
650 iounmap(mbox
->mbox
.hwbase
);
652 otx2_mbox_destroy(&mbox
->mbox
);
655 static void otx2_enable_pfvf_mbox_intr(struct otx2_nic
*pf
, int numvfs
)
657 /* Clear PF <=> VF mailbox IRQ */
658 otx2_write64(pf
, RVU_PF_VFPF_MBOX_INTX(0), ~0ull);
659 otx2_write64(pf
, RVU_PF_VFPF_MBOX_INTX(1), ~0ull);
661 /* Enable PF <=> VF mailbox IRQ */
662 otx2_write64(pf
, RVU_PF_VFPF_MBOX_INT_ENA_W1SX(0), INTR_MASK(numvfs
));
665 otx2_write64(pf
, RVU_PF_VFPF_MBOX_INT_ENA_W1SX(1),
670 static void otx2_disable_pfvf_mbox_intr(struct otx2_nic
*pf
, int numvfs
)
674 /* Disable PF <=> VF mailbox IRQ */
675 otx2_write64(pf
, RVU_PF_VFPF_MBOX_INT_ENA_W1CX(0), ~0ull);
676 otx2_write64(pf
, RVU_PF_VFPF_MBOX_INT_ENA_W1CX(1), ~0ull);
678 otx2_write64(pf
, RVU_PF_VFPF_MBOX_INTX(0), ~0ull);
679 vector
= pci_irq_vector(pf
->pdev
, RVU_PF_INT_VEC_VFPF_MBOX0
);
680 free_irq(vector
, pf
);
683 otx2_write64(pf
, RVU_PF_VFPF_MBOX_INTX(1), ~0ull);
684 vector
= pci_irq_vector(pf
->pdev
, RVU_PF_INT_VEC_VFPF_MBOX1
);
685 free_irq(vector
, pf
);
689 static int otx2_register_pfvf_mbox_intr(struct otx2_nic
*pf
, int numvfs
)
691 struct otx2_hw
*hw
= &pf
->hw
;
695 /* Register MBOX0 interrupt handler */
696 irq_name
= &hw
->irq_name
[RVU_PF_INT_VEC_VFPF_MBOX0
* NAME_SIZE
];
698 snprintf(irq_name
, NAME_SIZE
,
699 "RVUPF%d_VF Mbox0", rvu_get_pf(pf
->pcifunc
));
701 snprintf(irq_name
, NAME_SIZE
, "RVUPF_VF Mbox0");
702 err
= request_irq(pci_irq_vector(pf
->pdev
, RVU_PF_INT_VEC_VFPF_MBOX0
),
703 otx2_pfvf_mbox_intr_handler
, 0, irq_name
, pf
);
706 "RVUPF: IRQ registration failed for PFVF mbox0 irq\n");
711 /* Register MBOX1 interrupt handler */
712 irq_name
= &hw
->irq_name
[RVU_PF_INT_VEC_VFPF_MBOX1
* NAME_SIZE
];
714 snprintf(irq_name
, NAME_SIZE
,
715 "RVUPF%d_VF Mbox1", rvu_get_pf(pf
->pcifunc
));
717 snprintf(irq_name
, NAME_SIZE
, "RVUPF_VF Mbox1");
718 err
= request_irq(pci_irq_vector(pf
->pdev
,
719 RVU_PF_INT_VEC_VFPF_MBOX1
),
720 otx2_pfvf_mbox_intr_handler
,
724 "RVUPF: IRQ registration failed for PFVF mbox1 irq\n");
729 otx2_enable_pfvf_mbox_intr(pf
, numvfs
);
734 static void otx2_process_pfaf_mbox_msg(struct otx2_nic
*pf
,
735 struct mbox_msghdr
*msg
)
739 if (msg
->id
>= MBOX_MSG_MAX
) {
741 "Mbox msg with unknown ID 0x%x\n", msg
->id
);
745 if (msg
->sig
!= OTX2_MBOX_RSP_SIG
) {
747 "Mbox msg with wrong signature %x, ID 0x%x\n",
752 /* message response heading VF */
753 devid
= msg
->pcifunc
& RVU_PFVF_FUNC_MASK
;
755 struct otx2_vf_config
*config
= &pf
->vf_configs
[devid
- 1];
756 struct delayed_work
*dwork
;
759 case MBOX_MSG_NIX_LF_START_RX
:
760 config
->intf_down
= false;
761 dwork
= &config
->link_event_work
;
762 schedule_delayed_work(dwork
, msecs_to_jiffies(100));
764 case MBOX_MSG_NIX_LF_STOP_RX
:
765 config
->intf_down
= true;
774 pf
->pcifunc
= msg
->pcifunc
;
776 case MBOX_MSG_MSIX_OFFSET
:
777 mbox_handler_msix_offset(pf
, (struct msix_offset_rsp
*)msg
);
779 case MBOX_MSG_NPA_LF_ALLOC
:
780 mbox_handler_npa_lf_alloc(pf
, (struct npa_lf_alloc_rsp
*)msg
);
782 case MBOX_MSG_NIX_LF_ALLOC
:
783 mbox_handler_nix_lf_alloc(pf
, (struct nix_lf_alloc_rsp
*)msg
);
785 case MBOX_MSG_NIX_BP_ENABLE
:
786 mbox_handler_nix_bp_enable(pf
, (struct nix_bp_cfg_rsp
*)msg
);
788 case MBOX_MSG_CGX_STATS
:
789 mbox_handler_cgx_stats(pf
, (struct cgx_stats_rsp
*)msg
);
791 case MBOX_MSG_CGX_FEC_STATS
:
792 mbox_handler_cgx_fec_stats(pf
, (struct cgx_fec_stats_rsp
*)msg
);
797 "Mbox msg response has err %d, ID 0x%x\n",
803 static void otx2_pfaf_mbox_handler(struct work_struct
*work
)
805 struct otx2_mbox_dev
*mdev
;
806 struct mbox_hdr
*rsp_hdr
;
807 struct mbox_msghdr
*msg
;
808 struct otx2_mbox
*mbox
;
809 struct mbox
*af_mbox
;
814 af_mbox
= container_of(work
, struct mbox
, mbox_wrk
);
815 mbox
= &af_mbox
->mbox
;
816 mdev
= &mbox
->dev
[0];
817 rsp_hdr
= (struct mbox_hdr
*)(mdev
->mbase
+ mbox
->rx_start
);
818 num_msgs
= rsp_hdr
->num_msgs
;
820 offset
= mbox
->rx_start
+ ALIGN(sizeof(*rsp_hdr
), MBOX_MSG_ALIGN
);
823 for (id
= 0; id
< num_msgs
; id
++) {
824 msg
= (struct mbox_msghdr
*)(mdev
->mbase
+ offset
);
825 otx2_process_pfaf_mbox_msg(pf
, msg
);
826 offset
= mbox
->rx_start
+ msg
->next_msgoff
;
827 if (mdev
->msgs_acked
== (num_msgs
- 1))
828 __otx2_mbox_reset(mbox
, 0);
834 static void otx2_handle_link_event(struct otx2_nic
*pf
)
836 struct cgx_link_user_info
*linfo
= &pf
->linfo
;
837 struct net_device
*netdev
= pf
->netdev
;
839 pr_info("%s NIC Link is %s %d Mbps %s duplex\n", netdev
->name
,
840 linfo
->link_up
? "UP" : "DOWN", linfo
->speed
,
841 linfo
->full_duplex
? "Full" : "Half");
842 if (linfo
->link_up
) {
843 netif_carrier_on(netdev
);
844 netif_tx_start_all_queues(netdev
);
846 netif_tx_stop_all_queues(netdev
);
847 netif_carrier_off(netdev
);
851 int otx2_mbox_up_handler_mcs_intr_notify(struct otx2_nic
*pf
,
852 struct mcs_intr_info
*event
,
855 cn10k_handle_mcs_event(pf
, event
);
860 int otx2_mbox_up_handler_cgx_link_event(struct otx2_nic
*pf
,
861 struct cgx_link_info_msg
*msg
,
866 /* Copy the link info sent by AF */
867 pf
->linfo
= msg
->link_info
;
869 /* notify VFs about link event */
870 for (i
= 0; i
< pci_num_vf(pf
->pdev
); i
++) {
871 struct otx2_vf_config
*config
= &pf
->vf_configs
[i
];
872 struct delayed_work
*dwork
= &config
->link_event_work
;
874 if (config
->intf_down
)
877 schedule_delayed_work(dwork
, msecs_to_jiffies(100));
880 /* interface has not been fully configured yet */
881 if (pf
->flags
& OTX2_FLAG_INTF_DOWN
)
884 otx2_handle_link_event(pf
);
888 static int otx2_process_mbox_msg_up(struct otx2_nic
*pf
,
889 struct mbox_msghdr
*req
)
891 /* Check if valid, if not reply with a invalid msg */
892 if (req
->sig
!= OTX2_MBOX_REQ_SIG
) {
893 otx2_reply_invalid_msg(&pf
->mbox
.mbox_up
, 0, 0, req
->id
);
898 #define M(_name, _id, _fn_name, _req_type, _rsp_type) \
900 struct _rsp_type *rsp; \
903 rsp = (struct _rsp_type *)otx2_mbox_alloc_msg( \
904 &pf->mbox.mbox_up, 0, \
905 sizeof(struct _rsp_type)); \
910 rsp->hdr.sig = OTX2_MBOX_RSP_SIG; \
911 rsp->hdr.pcifunc = 0; \
914 err = otx2_mbox_up_handler_ ## _fn_name( \
915 pf, (struct _req_type *)req, rsp); \
923 otx2_reply_invalid_msg(&pf
->mbox
.mbox_up
, 0, 0, req
->id
);
929 static void otx2_pfaf_mbox_up_handler(struct work_struct
*work
)
931 struct mbox
*af_mbox
= container_of(work
, struct mbox
, mbox_up_wrk
);
932 struct otx2_mbox
*mbox
= &af_mbox
->mbox_up
;
933 struct otx2_mbox_dev
*mdev
= &mbox
->dev
[0];
934 struct otx2_nic
*pf
= af_mbox
->pfvf
;
935 int offset
, id
, devid
= 0;
936 struct mbox_hdr
*rsp_hdr
;
937 struct mbox_msghdr
*msg
;
940 rsp_hdr
= (struct mbox_hdr
*)(mdev
->mbase
+ mbox
->rx_start
);
941 num_msgs
= rsp_hdr
->num_msgs
;
943 offset
= mbox
->rx_start
+ ALIGN(sizeof(*rsp_hdr
), MBOX_MSG_ALIGN
);
945 for (id
= 0; id
< num_msgs
; id
++) {
946 msg
= (struct mbox_msghdr
*)(mdev
->mbase
+ offset
);
948 devid
= msg
->pcifunc
& RVU_PFVF_FUNC_MASK
;
949 /* Skip processing VF's messages */
951 otx2_process_mbox_msg_up(pf
, msg
);
952 offset
= mbox
->rx_start
+ msg
->next_msgoff
;
954 /* Forward to VF iff VFs are really present */
955 if (devid
&& pci_num_vf(pf
->pdev
)) {
956 otx2_forward_vf_mbox_msgs(pf
, &pf
->mbox
.mbox_up
,
957 MBOX_DIR_PFVF_UP
, devid
- 1,
962 otx2_mbox_msg_send(mbox
, 0);
965 static irqreturn_t
otx2_pfaf_mbox_intr_handler(int irq
, void *pf_irq
)
967 struct otx2_nic
*pf
= (struct otx2_nic
*)pf_irq
;
968 struct mbox
*mw
= &pf
->mbox
;
969 struct otx2_mbox_dev
*mdev
;
970 struct otx2_mbox
*mbox
;
971 struct mbox_hdr
*hdr
;
975 otx2_write64(pf
, RVU_PF_INT
, BIT_ULL(0));
978 mbox_data
= otx2_read64(pf
, RVU_PF_PFAF_MBOX0
);
980 if (mbox_data
& MBOX_UP_MSG
) {
981 mbox_data
&= ~MBOX_UP_MSG
;
982 otx2_write64(pf
, RVU_PF_PFAF_MBOX0
, mbox_data
);
985 mdev
= &mbox
->dev
[0];
986 otx2_sync_mbox_bbuf(mbox
, 0);
988 hdr
= (struct mbox_hdr
*)(mdev
->mbase
+ mbox
->rx_start
);
990 queue_work(pf
->mbox_wq
, &mw
->mbox_up_wrk
);
992 trace_otx2_msg_interrupt(pf
->pdev
, "UP message from AF to PF",
996 if (mbox_data
& MBOX_DOWN_MSG
) {
997 mbox_data
&= ~MBOX_DOWN_MSG
;
998 otx2_write64(pf
, RVU_PF_PFAF_MBOX0
, mbox_data
);
1001 mdev
= &mbox
->dev
[0];
1002 otx2_sync_mbox_bbuf(mbox
, 0);
1004 hdr
= (struct mbox_hdr
*)(mdev
->mbase
+ mbox
->rx_start
);
1006 queue_work(pf
->mbox_wq
, &mw
->mbox_wrk
);
1008 trace_otx2_msg_interrupt(pf
->pdev
, "DOWN reply from AF to PF",
1015 static void otx2_disable_mbox_intr(struct otx2_nic
*pf
)
1017 int vector
= pci_irq_vector(pf
->pdev
, RVU_PF_INT_VEC_AFPF_MBOX
);
1019 /* Disable AF => PF mailbox IRQ */
1020 otx2_write64(pf
, RVU_PF_INT_ENA_W1C
, BIT_ULL(0));
1021 free_irq(vector
, pf
);
1024 static int otx2_register_mbox_intr(struct otx2_nic
*pf
, bool probe_af
)
1026 struct otx2_hw
*hw
= &pf
->hw
;
1027 struct msg_req
*req
;
1031 /* Register mailbox interrupt handler */
1032 irq_name
= &hw
->irq_name
[RVU_PF_INT_VEC_AFPF_MBOX
* NAME_SIZE
];
1033 snprintf(irq_name
, NAME_SIZE
, "RVUPFAF Mbox");
1034 err
= request_irq(pci_irq_vector(pf
->pdev
, RVU_PF_INT_VEC_AFPF_MBOX
),
1035 otx2_pfaf_mbox_intr_handler
, 0, irq_name
, pf
);
1038 "RVUPF: IRQ registration failed for PFAF mbox irq\n");
1042 /* Enable mailbox interrupt for msgs coming from AF.
1043 * First clear to avoid spurious interrupts, if any.
1045 otx2_write64(pf
, RVU_PF_INT
, BIT_ULL(0));
1046 otx2_write64(pf
, RVU_PF_INT_ENA_W1S
, BIT_ULL(0));
1051 /* Check mailbox communication with AF */
1052 req
= otx2_mbox_alloc_msg_ready(&pf
->mbox
);
1054 otx2_disable_mbox_intr(pf
);
1057 err
= otx2_sync_mbox_msg(&pf
->mbox
);
1060 "AF not responding to mailbox, deferring probe\n");
1061 otx2_disable_mbox_intr(pf
);
1062 return -EPROBE_DEFER
;
1068 static void otx2_pfaf_mbox_destroy(struct otx2_nic
*pf
)
1070 struct mbox
*mbox
= &pf
->mbox
;
1073 destroy_workqueue(pf
->mbox_wq
);
1077 if (mbox
->mbox
.hwbase
)
1078 iounmap((void __iomem
*)mbox
->mbox
.hwbase
);
1080 otx2_mbox_destroy(&mbox
->mbox
);
1081 otx2_mbox_destroy(&mbox
->mbox_up
);
1084 static int otx2_pfaf_mbox_init(struct otx2_nic
*pf
)
1086 struct mbox
*mbox
= &pf
->mbox
;
1087 void __iomem
*hwbase
;
1091 pf
->mbox_wq
= alloc_ordered_workqueue("otx2_pfaf_mailbox",
1092 WQ_HIGHPRI
| WQ_MEM_RECLAIM
);
1096 /* Mailbox is a reserved memory (in RAM) region shared between
1097 * admin function (i.e AF) and this PF, shouldn't be mapped as
1098 * device memory to allow unaligned accesses.
1100 hwbase
= ioremap_wc(pci_resource_start(pf
->pdev
, PCI_MBOX_BAR_NUM
),
1103 dev_err(pf
->dev
, "Unable to map PFAF mailbox region\n");
1108 err
= otx2_mbox_init(&mbox
->mbox
, hwbase
, pf
->pdev
, pf
->reg_base
,
1113 err
= otx2_mbox_init(&mbox
->mbox_up
, hwbase
, pf
->pdev
, pf
->reg_base
,
1114 MBOX_DIR_PFAF_UP
, 1);
1118 err
= otx2_mbox_bbuf_init(mbox
, pf
->pdev
);
1122 INIT_WORK(&mbox
->mbox_wrk
, otx2_pfaf_mbox_handler
);
1123 INIT_WORK(&mbox
->mbox_up_wrk
, otx2_pfaf_mbox_up_handler
);
1124 mutex_init(&mbox
->lock
);
1128 otx2_pfaf_mbox_destroy(pf
);
1132 static int otx2_cgx_config_linkevents(struct otx2_nic
*pf
, bool enable
)
1134 struct msg_req
*msg
;
1137 mutex_lock(&pf
->mbox
.lock
);
1139 msg
= otx2_mbox_alloc_msg_cgx_start_linkevents(&pf
->mbox
);
1141 msg
= otx2_mbox_alloc_msg_cgx_stop_linkevents(&pf
->mbox
);
1144 mutex_unlock(&pf
->mbox
.lock
);
1148 err
= otx2_sync_mbox_msg(&pf
->mbox
);
1149 mutex_unlock(&pf
->mbox
.lock
);
1153 static int otx2_cgx_config_loopback(struct otx2_nic
*pf
, bool enable
)
1155 struct msg_req
*msg
;
1158 if (enable
&& !bitmap_empty(pf
->flow_cfg
->dmacflt_bmap
,
1159 pf
->flow_cfg
->dmacflt_max_flows
))
1160 netdev_warn(pf
->netdev
,
1161 "CGX/RPM internal loopback might not work as DMAC filters are active\n");
1163 mutex_lock(&pf
->mbox
.lock
);
1165 msg
= otx2_mbox_alloc_msg_cgx_intlbk_enable(&pf
->mbox
);
1167 msg
= otx2_mbox_alloc_msg_cgx_intlbk_disable(&pf
->mbox
);
1170 mutex_unlock(&pf
->mbox
.lock
);
1174 err
= otx2_sync_mbox_msg(&pf
->mbox
);
1175 mutex_unlock(&pf
->mbox
.lock
);
1179 int otx2_set_real_num_queues(struct net_device
*netdev
,
1180 int tx_queues
, int rx_queues
)
1184 err
= netif_set_real_num_tx_queues(netdev
, tx_queues
);
1187 "Failed to set no of Tx queues: %d\n", tx_queues
);
1191 err
= netif_set_real_num_rx_queues(netdev
, rx_queues
);
1194 "Failed to set no of Rx queues: %d\n", rx_queues
);
1197 EXPORT_SYMBOL(otx2_set_real_num_queues
);
1199 static char *nix_sqoperr_e_str
[NIX_SQOPERR_MAX
] = {
1201 "NIX_SQOPERR_CTX_FAULT",
1202 "NIX_SQOPERR_CTX_POISON",
1203 "NIX_SQOPERR_DISABLED",
1204 "NIX_SQOPERR_SIZE_ERR",
1205 "NIX_SQOPERR_OFLOW",
1206 "NIX_SQOPERR_SQB_NULL",
1207 "NIX_SQOPERR_SQB_FAULT",
1208 "NIX_SQOPERR_SQE_SZ_ZERO",
1211 static char *nix_mnqerr_e_str
[NIX_MNQERR_MAX
] = {
1212 "NIX_MNQERR_SQ_CTX_FAULT",
1213 "NIX_MNQERR_SQ_CTX_POISON",
1214 "NIX_MNQERR_SQB_FAULT",
1215 "NIX_MNQERR_SQB_POISON",
1216 "NIX_MNQERR_TOTAL_ERR",
1217 "NIX_MNQERR_LSO_ERR",
1218 "NIX_MNQERR_CQ_QUERY_ERR",
1219 "NIX_MNQERR_MAX_SQE_SIZE_ERR",
1220 "NIX_MNQERR_MAXLEN_ERR",
1221 "NIX_MNQERR_SQE_SIZEM1_ZERO",
1224 static char *nix_snd_status_e_str
[NIX_SND_STATUS_MAX
] = {
1225 [NIX_SND_STATUS_GOOD
] = "NIX_SND_STATUS_GOOD",
1226 [NIX_SND_STATUS_SQ_CTX_FAULT
] = "NIX_SND_STATUS_SQ_CTX_FAULT",
1227 [NIX_SND_STATUS_SQ_CTX_POISON
] = "NIX_SND_STATUS_SQ_CTX_POISON",
1228 [NIX_SND_STATUS_SQB_FAULT
] = "NIX_SND_STATUS_SQB_FAULT",
1229 [NIX_SND_STATUS_SQB_POISON
] = "NIX_SND_STATUS_SQB_POISON",
1230 [NIX_SND_STATUS_HDR_ERR
] = "NIX_SND_STATUS_HDR_ERR",
1231 [NIX_SND_STATUS_EXT_ERR
] = "NIX_SND_STATUS_EXT_ERR",
1232 [NIX_SND_STATUS_JUMP_FAULT
] = "NIX_SND_STATUS_JUMP_FAULT",
1233 [NIX_SND_STATUS_JUMP_POISON
] = "NIX_SND_STATUS_JUMP_POISON",
1234 [NIX_SND_STATUS_CRC_ERR
] = "NIX_SND_STATUS_CRC_ERR",
1235 [NIX_SND_STATUS_IMM_ERR
] = "NIX_SND_STATUS_IMM_ERR",
1236 [NIX_SND_STATUS_SG_ERR
] = "NIX_SND_STATUS_SG_ERR",
1237 [NIX_SND_STATUS_MEM_ERR
] = "NIX_SND_STATUS_MEM_ERR",
1238 [NIX_SND_STATUS_INVALID_SUBDC
] = "NIX_SND_STATUS_INVALID_SUBDC",
1239 [NIX_SND_STATUS_SUBDC_ORDER_ERR
] = "NIX_SND_STATUS_SUBDC_ORDER_ERR",
1240 [NIX_SND_STATUS_DATA_FAULT
] = "NIX_SND_STATUS_DATA_FAULT",
1241 [NIX_SND_STATUS_DATA_POISON
] = "NIX_SND_STATUS_DATA_POISON",
1242 [NIX_SND_STATUS_NPC_DROP_ACTION
] = "NIX_SND_STATUS_NPC_DROP_ACTION",
1243 [NIX_SND_STATUS_LOCK_VIOL
] = "NIX_SND_STATUS_LOCK_VIOL",
1244 [NIX_SND_STATUS_NPC_UCAST_CHAN_ERR
] = "NIX_SND_STAT_NPC_UCAST_CHAN_ERR",
1245 [NIX_SND_STATUS_NPC_MCAST_CHAN_ERR
] = "NIX_SND_STAT_NPC_MCAST_CHAN_ERR",
1246 [NIX_SND_STATUS_NPC_MCAST_ABORT
] = "NIX_SND_STATUS_NPC_MCAST_ABORT",
1247 [NIX_SND_STATUS_NPC_VTAG_PTR_ERR
] = "NIX_SND_STATUS_NPC_VTAG_PTR_ERR",
1248 [NIX_SND_STATUS_NPC_VTAG_SIZE_ERR
] = "NIX_SND_STATUS_NPC_VTAG_SIZE_ERR",
1249 [NIX_SND_STATUS_SEND_MEM_FAULT
] = "NIX_SND_STATUS_SEND_MEM_FAULT",
1250 [NIX_SND_STATUS_SEND_STATS_ERR
] = "NIX_SND_STATUS_SEND_STATS_ERR",
1253 static irqreturn_t
otx2_q_intr_handler(int irq
, void *data
)
1255 struct otx2_nic
*pf
= data
;
1256 struct otx2_snd_queue
*sq
;
1261 for (qidx
= 0; qidx
< pf
->qset
.cq_cnt
; qidx
++) {
1262 ptr
= otx2_get_regaddr(pf
, NIX_LF_CQ_OP_INT
);
1263 val
= otx2_atomic64_add((qidx
<< 44), ptr
);
1265 otx2_write64(pf
, NIX_LF_CQ_OP_INT
, (qidx
<< 44) |
1266 (val
& NIX_CQERRINT_BITS
));
1267 if (!(val
& (NIX_CQERRINT_BITS
| BIT_ULL(42))))
1270 if (val
& BIT_ULL(42)) {
1271 netdev_err(pf
->netdev
,
1272 "CQ%lld: error reading NIX_LF_CQ_OP_INT, NIX_LF_ERR_INT 0x%llx\n",
1273 qidx
, otx2_read64(pf
, NIX_LF_ERR_INT
));
1275 if (val
& BIT_ULL(NIX_CQERRINT_DOOR_ERR
))
1276 netdev_err(pf
->netdev
, "CQ%lld: Doorbell error",
1278 if (val
& BIT_ULL(NIX_CQERRINT_CQE_FAULT
))
1279 netdev_err(pf
->netdev
,
1280 "CQ%lld: Memory fault on CQE write to LLC/DRAM",
1284 schedule_work(&pf
->reset_task
);
1288 for (qidx
= 0; qidx
< otx2_get_total_tx_queues(pf
); qidx
++) {
1289 u64 sq_op_err_dbg
, mnq_err_dbg
, snd_err_dbg
;
1290 u8 sq_op_err_code
, mnq_err_code
, snd_err_code
;
1292 sq
= &pf
->qset
.sq
[qidx
];
1296 /* Below debug registers captures first errors corresponding to
1297 * those registers. We don't have to check against SQ qid as
1298 * these are fatal errors.
1301 ptr
= otx2_get_regaddr(pf
, NIX_LF_SQ_OP_INT
);
1302 val
= otx2_atomic64_add((qidx
<< 44), ptr
);
1303 otx2_write64(pf
, NIX_LF_SQ_OP_INT
, (qidx
<< 44) |
1304 (val
& NIX_SQINT_BITS
));
1306 if (val
& BIT_ULL(42)) {
1307 netdev_err(pf
->netdev
,
1308 "SQ%lld: error reading NIX_LF_SQ_OP_INT, NIX_LF_ERR_INT 0x%llx\n",
1309 qidx
, otx2_read64(pf
, NIX_LF_ERR_INT
));
1313 sq_op_err_dbg
= otx2_read64(pf
, NIX_LF_SQ_OP_ERR_DBG
);
1314 if (!(sq_op_err_dbg
& BIT(44)))
1315 goto chk_mnq_err_dbg
;
1317 sq_op_err_code
= FIELD_GET(GENMASK(7, 0), sq_op_err_dbg
);
1318 netdev_err(pf
->netdev
,
1319 "SQ%lld: NIX_LF_SQ_OP_ERR_DBG(0x%llx) err=%s(%#x)\n",
1320 qidx
, sq_op_err_dbg
,
1321 nix_sqoperr_e_str
[sq_op_err_code
],
1324 otx2_write64(pf
, NIX_LF_SQ_OP_ERR_DBG
, BIT_ULL(44));
1326 if (sq_op_err_code
== NIX_SQOPERR_SQB_NULL
)
1327 goto chk_mnq_err_dbg
;
1329 /* Err is not NIX_SQOPERR_SQB_NULL, call aq function to read SQ structure.
1330 * TODO: But we are in irq context. How to call mbox functions which does sleep
1334 mnq_err_dbg
= otx2_read64(pf
, NIX_LF_MNQ_ERR_DBG
);
1335 if (!(mnq_err_dbg
& BIT(44)))
1336 goto chk_snd_err_dbg
;
1338 mnq_err_code
= FIELD_GET(GENMASK(7, 0), mnq_err_dbg
);
1339 netdev_err(pf
->netdev
,
1340 "SQ%lld: NIX_LF_MNQ_ERR_DBG(0x%llx) err=%s(%#x)\n",
1341 qidx
, mnq_err_dbg
, nix_mnqerr_e_str
[mnq_err_code
],
1343 otx2_write64(pf
, NIX_LF_MNQ_ERR_DBG
, BIT_ULL(44));
1346 snd_err_dbg
= otx2_read64(pf
, NIX_LF_SEND_ERR_DBG
);
1347 if (snd_err_dbg
& BIT(44)) {
1348 snd_err_code
= FIELD_GET(GENMASK(7, 0), snd_err_dbg
);
1349 netdev_err(pf
->netdev
,
1350 "SQ%lld: NIX_LF_SND_ERR_DBG:0x%llx err=%s(%#x)\n",
1352 nix_snd_status_e_str
[snd_err_code
],
1354 otx2_write64(pf
, NIX_LF_SEND_ERR_DBG
, BIT_ULL(44));
1358 /* Print values and reset */
1359 if (val
& BIT_ULL(NIX_SQINT_SQB_ALLOC_FAIL
))
1360 netdev_err(pf
->netdev
, "SQ%lld: SQB allocation failed",
1363 schedule_work(&pf
->reset_task
);
1369 static irqreturn_t
otx2_cq_intr_handler(int irq
, void *cq_irq
)
1371 struct otx2_cq_poll
*cq_poll
= (struct otx2_cq_poll
*)cq_irq
;
1372 struct otx2_nic
*pf
= (struct otx2_nic
*)cq_poll
->dev
;
1373 int qidx
= cq_poll
->cint_idx
;
1375 /* Disable interrupts.
1377 * Completion interrupts behave in a level-triggered interrupt
1378 * fashion, and hence have to be cleared only after it is serviced.
1380 otx2_write64(pf
, NIX_LF_CINTX_ENA_W1C(qidx
), BIT_ULL(0));
1384 napi_schedule_irqoff(&cq_poll
->napi
);
1389 static void otx2_disable_napi(struct otx2_nic
*pf
)
1391 struct otx2_qset
*qset
= &pf
->qset
;
1392 struct otx2_cq_poll
*cq_poll
;
1395 for (qidx
= 0; qidx
< pf
->hw
.cint_cnt
; qidx
++) {
1396 cq_poll
= &qset
->napi
[qidx
];
1397 cancel_work_sync(&cq_poll
->dim
.work
);
1398 napi_disable(&cq_poll
->napi
);
1399 netif_napi_del(&cq_poll
->napi
);
1403 static void otx2_free_cq_res(struct otx2_nic
*pf
)
1405 struct otx2_qset
*qset
= &pf
->qset
;
1406 struct otx2_cq_queue
*cq
;
1410 otx2_ctx_disable(&pf
->mbox
, NIX_AQ_CTYPE_CQ
, false);
1411 for (qidx
= 0; qidx
< qset
->cq_cnt
; qidx
++) {
1412 cq
= &qset
->cq
[qidx
];
1413 qmem_free(pf
->dev
, cq
->cqe
);
1417 static void otx2_free_sq_res(struct otx2_nic
*pf
)
1419 struct otx2_qset
*qset
= &pf
->qset
;
1420 struct otx2_snd_queue
*sq
;
1424 otx2_ctx_disable(&pf
->mbox
, NIX_AQ_CTYPE_SQ
, false);
1425 /* Free SQB pointers */
1426 otx2_sq_free_sqbs(pf
);
1427 for (qidx
= 0; qidx
< otx2_get_total_tx_queues(pf
); qidx
++) {
1428 sq
= &qset
->sq
[qidx
];
1429 /* Skip freeing Qos queues if they are not initialized */
1432 qmem_free(pf
->dev
, sq
->sqe
);
1433 qmem_free(pf
->dev
, sq
->tso_hdrs
);
1435 kfree(sq
->sqb_ptrs
);
1439 static int otx2_get_rbuf_size(struct otx2_nic
*pf
, int mtu
)
1445 if (pf
->hw
.rbuf_len
)
1446 return ALIGN(pf
->hw
.rbuf_len
, OTX2_ALIGN
) + OTX2_HEAD_ROOM
;
1448 /* The data transferred by NIX to memory consists of actual packet
1449 * plus additional data which has timestamp and/or EDSA/HIGIG2
1450 * headers if interface is configured in corresponding modes.
1451 * NIX transfers entire data using 6 segments/buffers and writes
1452 * a CQE_RX descriptor with those segment addresses. First segment
1453 * has additional data prepended to packet. Also software omits a
1454 * headroom of 128 bytes in each segment. Hence the total size of
1455 * memory needed to receive a packet with 'mtu' is:
1456 * frame size = mtu + additional data;
1457 * memory = frame_size + headroom * 6;
1458 * each receive buffer size = memory / 6;
1460 frame_size
= mtu
+ OTX2_ETH_HLEN
+ OTX2_HW_TIMESTAMP_LEN
;
1461 total_size
= frame_size
+ OTX2_HEAD_ROOM
* 6;
1462 rbuf_size
= total_size
/ 6;
1464 return ALIGN(rbuf_size
, 2048);
1467 static int otx2_init_hw_resources(struct otx2_nic
*pf
)
1469 struct nix_lf_free_req
*free_req
;
1470 struct mbox
*mbox
= &pf
->mbox
;
1471 struct otx2_hw
*hw
= &pf
->hw
;
1472 struct msg_req
*req
;
1475 /* Set required NPA LF's pool counts
1476 * Auras and Pools are used in a 1:1 mapping,
1477 * so, aura count = pool count.
1479 hw
->rqpool_cnt
= hw
->rx_queues
;
1480 hw
->sqpool_cnt
= otx2_get_total_tx_queues(pf
);
1481 hw
->pool_cnt
= hw
->rqpool_cnt
+ hw
->sqpool_cnt
;
1483 /* Maximum hardware supported transmit length */
1484 pf
->tx_max_pktlen
= pf
->netdev
->max_mtu
+ OTX2_ETH_HLEN
;
1486 pf
->rbsize
= otx2_get_rbuf_size(pf
, pf
->netdev
->mtu
);
1488 mutex_lock(&mbox
->lock
);
1490 err
= otx2_config_npa(pf
);
1495 err
= otx2_config_nix(pf
);
1497 goto err_free_npa_lf
;
1499 /* Enable backpressure for CGX mapped PF/VFs */
1500 if (!is_otx2_lbkvf(pf
->pdev
))
1501 otx2_nix_config_bp(pf
, true);
1503 /* Init Auras and pools used by NIX RQ, for free buffer ptrs */
1504 err
= otx2_rq_aura_pool_init(pf
);
1506 mutex_unlock(&mbox
->lock
);
1507 goto err_free_nix_lf
;
1509 /* Init Auras and pools used by NIX SQ, for queueing SQEs */
1510 err
= otx2_sq_aura_pool_init(pf
);
1512 mutex_unlock(&mbox
->lock
);
1513 goto err_free_rq_ptrs
;
1516 err
= otx2_txsch_alloc(pf
);
1518 mutex_unlock(&mbox
->lock
);
1519 goto err_free_sq_ptrs
;
1524 err
= otx2_pfc_txschq_alloc(pf
);
1526 mutex_unlock(&mbox
->lock
);
1527 goto err_free_sq_ptrs
;
1532 err
= otx2_config_nix_queues(pf
);
1534 mutex_unlock(&mbox
->lock
);
1535 goto err_free_txsch
;
1538 for (lvl
= 0; lvl
< NIX_TXSCH_LVL_CNT
; lvl
++) {
1539 err
= otx2_txschq_config(pf
, lvl
, 0, false);
1541 mutex_unlock(&mbox
->lock
);
1542 goto err_free_nix_queues
;
1548 err
= otx2_pfc_txschq_config(pf
);
1550 mutex_unlock(&mbox
->lock
);
1551 goto err_free_nix_queues
;
1556 mutex_unlock(&mbox
->lock
);
1559 err_free_nix_queues
:
1560 otx2_free_sq_res(pf
);
1561 otx2_free_cq_res(pf
);
1562 otx2_ctx_disable(mbox
, NIX_AQ_CTYPE_RQ
, false);
1564 otx2_txschq_stop(pf
);
1566 otx2_sq_free_sqbs(pf
);
1568 otx2_free_aura_ptr(pf
, AURA_NIX_RQ
);
1569 otx2_ctx_disable(mbox
, NPA_AQ_CTYPE_POOL
, true);
1570 otx2_ctx_disable(mbox
, NPA_AQ_CTYPE_AURA
, true);
1571 otx2_aura_pool_free(pf
);
1573 mutex_lock(&mbox
->lock
);
1574 free_req
= otx2_mbox_alloc_msg_nix_lf_free(mbox
);
1576 free_req
->flags
= NIX_LF_DISABLE_FLOWS
;
1577 if (otx2_sync_mbox_msg(mbox
))
1578 dev_err(pf
->dev
, "%s failed to free nixlf\n", __func__
);
1582 req
= otx2_mbox_alloc_msg_npa_lf_free(mbox
);
1584 if (otx2_sync_mbox_msg(mbox
))
1585 dev_err(pf
->dev
, "%s failed to free npalf\n", __func__
);
1588 mutex_unlock(&mbox
->lock
);
1592 static void otx2_free_hw_resources(struct otx2_nic
*pf
)
1594 struct otx2_qset
*qset
= &pf
->qset
;
1595 struct nix_lf_free_req
*free_req
;
1596 struct mbox
*mbox
= &pf
->mbox
;
1597 struct otx2_cq_queue
*cq
;
1598 struct otx2_pool
*pool
;
1599 struct msg_req
*req
;
1603 /* Ensure all SQE are processed */
1606 /* Stop transmission */
1607 otx2_txschq_stop(pf
);
1611 otx2_pfc_txschq_stop(pf
);
1614 otx2_clean_qos_queues(pf
);
1616 mutex_lock(&mbox
->lock
);
1617 /* Disable backpressure */
1618 if (!(pf
->pcifunc
& RVU_PFVF_FUNC_MASK
))
1619 otx2_nix_config_bp(pf
, false);
1620 mutex_unlock(&mbox
->lock
);
1623 otx2_ctx_disable(mbox
, NIX_AQ_CTYPE_RQ
, false);
1625 /*Dequeue all CQEs */
1626 for (qidx
= 0; qidx
< qset
->cq_cnt
; qidx
++) {
1627 cq
= &qset
->cq
[qidx
];
1628 if (cq
->cq_type
== CQ_RX
)
1629 otx2_cleanup_rx_cqes(pf
, cq
, qidx
);
1631 otx2_cleanup_tx_cqes(pf
, cq
);
1633 otx2_free_pending_sqe(pf
);
1635 otx2_free_sq_res(pf
);
1637 /* Free RQ buffer pointers*/
1638 otx2_free_aura_ptr(pf
, AURA_NIX_RQ
);
1640 for (qidx
= 0; qidx
< pf
->hw
.rx_queues
; qidx
++) {
1641 pool_id
= otx2_get_pool_idx(pf
, AURA_NIX_RQ
, qidx
);
1642 pool
= &pf
->qset
.pool
[pool_id
];
1643 page_pool_destroy(pool
->page_pool
);
1644 pool
->page_pool
= NULL
;
1647 otx2_free_cq_res(pf
);
1649 /* Free all ingress bandwidth profiles allocated */
1650 cn10k_free_all_ipolicers(pf
);
1652 mutex_lock(&mbox
->lock
);
1654 free_req
= otx2_mbox_alloc_msg_nix_lf_free(mbox
);
1656 free_req
->flags
= NIX_LF_DISABLE_FLOWS
;
1657 if (!(pf
->flags
& OTX2_FLAG_PF_SHUTDOWN
))
1658 free_req
->flags
|= NIX_LF_DONT_FREE_TX_VTAG
;
1659 if (otx2_sync_mbox_msg(mbox
))
1660 dev_err(pf
->dev
, "%s failed to free nixlf\n", __func__
);
1662 mutex_unlock(&mbox
->lock
);
1664 /* Disable NPA Pool and Aura hw context */
1665 otx2_ctx_disable(mbox
, NPA_AQ_CTYPE_POOL
, true);
1666 otx2_ctx_disable(mbox
, NPA_AQ_CTYPE_AURA
, true);
1667 otx2_aura_pool_free(pf
);
1669 mutex_lock(&mbox
->lock
);
1671 req
= otx2_mbox_alloc_msg_npa_lf_free(mbox
);
1673 if (otx2_sync_mbox_msg(mbox
))
1674 dev_err(pf
->dev
, "%s failed to free npalf\n", __func__
);
1676 mutex_unlock(&mbox
->lock
);
1679 static bool otx2_promisc_use_mce_list(struct otx2_nic
*pfvf
)
1683 /* The AF driver will determine whether to allow the VF netdev or not */
1684 if (is_otx2_vf(pfvf
->pcifunc
))
1687 /* check if there are any trusted VFs associated with the PF netdev */
1688 for (vf
= 0; vf
< pci_num_vf(pfvf
->pdev
); vf
++)
1689 if (pfvf
->vf_configs
[vf
].trusted
)
1694 static void otx2_do_set_rx_mode(struct otx2_nic
*pf
)
1696 struct net_device
*netdev
= pf
->netdev
;
1697 struct nix_rx_mode
*req
;
1698 bool promisc
= false;
1700 if (!(netdev
->flags
& IFF_UP
))
1703 if ((netdev
->flags
& IFF_PROMISC
) ||
1704 (netdev_uc_count(netdev
) > OTX2_MAX_UNICAST_FLOWS
)) {
1708 /* Write unicast address to mcam entries or del from mcam */
1709 if (!promisc
&& netdev
->priv_flags
& IFF_UNICAST_FLT
)
1710 __dev_uc_sync(netdev
, otx2_add_macfilter
, otx2_del_macfilter
);
1712 mutex_lock(&pf
->mbox
.lock
);
1713 req
= otx2_mbox_alloc_msg_nix_set_rx_mode(&pf
->mbox
);
1715 mutex_unlock(&pf
->mbox
.lock
);
1719 req
->mode
= NIX_RX_MODE_UCAST
;
1722 req
->mode
|= NIX_RX_MODE_PROMISC
;
1723 if (netdev
->flags
& (IFF_ALLMULTI
| IFF_MULTICAST
))
1724 req
->mode
|= NIX_RX_MODE_ALLMULTI
;
1726 if (otx2_promisc_use_mce_list(pf
))
1727 req
->mode
|= NIX_RX_MODE_USE_MCE
;
1729 otx2_sync_mbox_msg(&pf
->mbox
);
1730 mutex_unlock(&pf
->mbox
.lock
);
1733 static void otx2_set_irq_coalesce(struct otx2_nic
*pfvf
)
1737 for (cint
= 0; cint
< pfvf
->hw
.cint_cnt
; cint
++)
1738 otx2_config_irq_coalescing(pfvf
, cint
);
1741 static void otx2_dim_work(struct work_struct
*w
)
1743 struct dim_cq_moder cur_moder
;
1744 struct otx2_cq_poll
*cq_poll
;
1745 struct otx2_nic
*pfvf
;
1748 dim
= container_of(w
, struct dim
, work
);
1749 cur_moder
= net_dim_get_rx_moderation(dim
->mode
, dim
->profile_ix
);
1750 cq_poll
= container_of(dim
, struct otx2_cq_poll
, dim
);
1751 pfvf
= (struct otx2_nic
*)cq_poll
->dev
;
1752 pfvf
->hw
.cq_time_wait
= (cur_moder
.usec
> CQ_TIMER_THRESH_MAX
) ?
1753 CQ_TIMER_THRESH_MAX
: cur_moder
.usec
;
1754 pfvf
->hw
.cq_ecount_wait
= (cur_moder
.pkts
> NAPI_POLL_WEIGHT
) ?
1755 NAPI_POLL_WEIGHT
: cur_moder
.pkts
;
1756 otx2_set_irq_coalesce(pfvf
);
1757 dim
->state
= DIM_START_MEASURE
;
1760 int otx2_open(struct net_device
*netdev
)
1762 struct otx2_nic
*pf
= netdev_priv(netdev
);
1763 struct otx2_cq_poll
*cq_poll
= NULL
;
1764 struct otx2_qset
*qset
= &pf
->qset
;
1765 int err
= 0, qidx
, vec
;
1768 netif_carrier_off(netdev
);
1770 /* RQ and SQs are mapped to different CQs,
1771 * so find out max CQ IRQs (i.e CINTs) needed.
1773 pf
->hw
.non_qos_queues
= pf
->hw
.tx_queues
+ pf
->hw
.xdp_queues
;
1774 pf
->hw
.cint_cnt
= max3(pf
->hw
.rx_queues
, pf
->hw
.tx_queues
,
1775 pf
->hw
.tc_tx_queues
);
1777 pf
->qset
.cq_cnt
= pf
->hw
.rx_queues
+ otx2_get_total_tx_queues(pf
);
1779 qset
->napi
= kcalloc(pf
->hw
.cint_cnt
, sizeof(*cq_poll
), GFP_KERNEL
);
1784 qset
->rqe_cnt
= qset
->rqe_cnt
? qset
->rqe_cnt
: Q_COUNT(Q_SIZE_256
);
1786 qset
->sqe_cnt
= qset
->sqe_cnt
? qset
->sqe_cnt
: Q_COUNT(Q_SIZE_4K
);
1789 qset
->cq
= kcalloc(pf
->qset
.cq_cnt
,
1790 sizeof(struct otx2_cq_queue
), GFP_KERNEL
);
1794 qset
->sq
= kcalloc(otx2_get_total_tx_queues(pf
),
1795 sizeof(struct otx2_snd_queue
), GFP_KERNEL
);
1799 qset
->rq
= kcalloc(pf
->hw
.rx_queues
,
1800 sizeof(struct otx2_rcv_queue
), GFP_KERNEL
);
1804 err
= otx2_init_hw_resources(pf
);
1808 /* Register NAPI handler */
1809 for (qidx
= 0; qidx
< pf
->hw
.cint_cnt
; qidx
++) {
1810 cq_poll
= &qset
->napi
[qidx
];
1811 cq_poll
->cint_idx
= qidx
;
1812 /* RQ0 & SQ0 are mapped to CINT0 and so on..
1813 * 'cq_ids[0]' points to RQ's CQ and
1814 * 'cq_ids[1]' points to SQ's CQ and
1815 * 'cq_ids[2]' points to XDP's CQ and
1817 cq_poll
->cq_ids
[CQ_RX
] =
1818 (qidx
< pf
->hw
.rx_queues
) ? qidx
: CINT_INVALID_CQ
;
1819 cq_poll
->cq_ids
[CQ_TX
] = (qidx
< pf
->hw
.tx_queues
) ?
1820 qidx
+ pf
->hw
.rx_queues
: CINT_INVALID_CQ
;
1822 cq_poll
->cq_ids
[CQ_XDP
] = (qidx
< pf
->hw
.xdp_queues
) ?
1823 (qidx
+ pf
->hw
.rx_queues
+
1827 cq_poll
->cq_ids
[CQ_XDP
] = CINT_INVALID_CQ
;
1829 cq_poll
->cq_ids
[CQ_QOS
] = (qidx
< pf
->hw
.tc_tx_queues
) ?
1830 (qidx
+ pf
->hw
.rx_queues
+
1831 pf
->hw
.non_qos_queues
) :
1834 cq_poll
->dev
= (void *)pf
;
1835 cq_poll
->dim
.mode
= DIM_CQ_PERIOD_MODE_START_FROM_CQE
;
1836 INIT_WORK(&cq_poll
->dim
.work
, otx2_dim_work
);
1837 netif_napi_add(netdev
, &cq_poll
->napi
, otx2_napi_handler
);
1838 napi_enable(&cq_poll
->napi
);
1841 /* Set maximum frame size allowed in HW */
1842 err
= otx2_hw_set_mtu(pf
, netdev
->mtu
);
1844 goto err_disable_napi
;
1846 /* Setup segmentation algorithms, if failed, clear offload capability */
1847 otx2_setup_segmentation(pf
);
1849 /* Initialize RSS */
1850 err
= otx2_rss_init(pf
);
1852 goto err_disable_napi
;
1854 /* Register Queue IRQ handlers */
1855 vec
= pf
->hw
.nix_msixoff
+ NIX_LF_QINT_VEC_START
;
1856 irq_name
= &pf
->hw
.irq_name
[vec
* NAME_SIZE
];
1858 snprintf(irq_name
, NAME_SIZE
, "%s-qerr", pf
->netdev
->name
);
1860 err
= request_irq(pci_irq_vector(pf
->pdev
, vec
),
1861 otx2_q_intr_handler
, 0, irq_name
, pf
);
1864 "RVUPF%d: IRQ registration failed for QERR\n",
1865 rvu_get_pf(pf
->pcifunc
));
1866 goto err_disable_napi
;
1869 /* Enable QINT IRQ */
1870 otx2_write64(pf
, NIX_LF_QINTX_ENA_W1S(0), BIT_ULL(0));
1872 /* Register CQ IRQ handlers */
1873 vec
= pf
->hw
.nix_msixoff
+ NIX_LF_CINT_VEC_START
;
1874 for (qidx
= 0; qidx
< pf
->hw
.cint_cnt
; qidx
++) {
1875 irq_name
= &pf
->hw
.irq_name
[vec
* NAME_SIZE
];
1877 snprintf(irq_name
, NAME_SIZE
, "%s-rxtx-%d", pf
->netdev
->name
,
1880 err
= request_irq(pci_irq_vector(pf
->pdev
, vec
),
1881 otx2_cq_intr_handler
, 0, irq_name
,
1885 "RVUPF%d: IRQ registration failed for CQ%d\n",
1886 rvu_get_pf(pf
->pcifunc
), qidx
);
1887 goto err_free_cints
;
1891 otx2_config_irq_coalescing(pf
, qidx
);
1894 otx2_write64(pf
, NIX_LF_CINTX_INT(qidx
), BIT_ULL(0));
1895 otx2_write64(pf
, NIX_LF_CINTX_ENA_W1S(qidx
), BIT_ULL(0));
1898 otx2_set_cints_affinity(pf
);
1900 if (pf
->flags
& OTX2_FLAG_RX_VLAN_SUPPORT
)
1901 otx2_enable_rxvlan(pf
, true);
1903 /* When reinitializing enable time stamping if it is enabled before */
1904 if (pf
->flags
& OTX2_FLAG_TX_TSTAMP_ENABLED
) {
1905 pf
->flags
&= ~OTX2_FLAG_TX_TSTAMP_ENABLED
;
1906 otx2_config_hw_tx_tstamp(pf
, true);
1908 if (pf
->flags
& OTX2_FLAG_RX_TSTAMP_ENABLED
) {
1909 pf
->flags
&= ~OTX2_FLAG_RX_TSTAMP_ENABLED
;
1910 otx2_config_hw_rx_tstamp(pf
, true);
1913 pf
->flags
&= ~OTX2_FLAG_INTF_DOWN
;
1914 /* 'intf_down' may be checked on any cpu */
1917 /* Enable QoS configuration before starting tx queues */
1918 otx2_qos_config_txschq(pf
);
1920 /* we have already received link status notification */
1921 if (pf
->linfo
.link_up
&& !(pf
->pcifunc
& RVU_PFVF_FUNC_MASK
))
1922 otx2_handle_link_event(pf
);
1924 /* Install DMAC Filters */
1925 if (pf
->flags
& OTX2_FLAG_DMACFLTR_SUPPORT
)
1926 otx2_dmacflt_reinstall_flows(pf
);
1928 otx2_tc_apply_ingress_police_rules(pf
);
1930 err
= otx2_rxtx_enable(pf
, true);
1931 /* If a mbox communication error happens at this point then interface
1932 * will end up in a state such that it is in down state but hardware
1933 * mcam entries are enabled to receive the packets. Hence disable the
1937 goto err_disable_rxtx
;
1939 goto err_tx_stop_queues
;
1941 otx2_do_set_rx_mode(pf
);
1946 otx2_rxtx_enable(pf
, false);
1948 netif_tx_stop_all_queues(netdev
);
1949 netif_carrier_off(netdev
);
1950 pf
->flags
|= OTX2_FLAG_INTF_DOWN
;
1952 otx2_free_cints(pf
, qidx
);
1953 vec
= pci_irq_vector(pf
->pdev
,
1954 pf
->hw
.nix_msixoff
+ NIX_LF_QINT_VEC_START
);
1955 otx2_write64(pf
, NIX_LF_QINTX_ENA_W1C(0), BIT_ULL(0));
1958 otx2_disable_napi(pf
);
1959 otx2_free_hw_resources(pf
);
1967 EXPORT_SYMBOL(otx2_open
);
1969 int otx2_stop(struct net_device
*netdev
)
1971 struct otx2_nic
*pf
= netdev_priv(netdev
);
1972 struct otx2_cq_poll
*cq_poll
= NULL
;
1973 struct otx2_qset
*qset
= &pf
->qset
;
1974 struct otx2_rss_info
*rss
;
1977 /* If the DOWN flag is set resources are already freed */
1978 if (pf
->flags
& OTX2_FLAG_INTF_DOWN
)
1981 netif_carrier_off(netdev
);
1982 netif_tx_stop_all_queues(netdev
);
1984 pf
->flags
|= OTX2_FLAG_INTF_DOWN
;
1985 /* 'intf_down' may be checked on any cpu */
1988 /* First stop packet Rx/Tx */
1989 otx2_rxtx_enable(pf
, false);
1991 /* Clear RSS enable flag */
1992 rss
= &pf
->hw
.rss_info
;
1993 rss
->enable
= false;
1994 if (!netif_is_rxfh_configured(netdev
))
1995 kfree(rss
->rss_ctx
[DEFAULT_RSS_CONTEXT_GROUP
]);
1997 /* Cleanup Queue IRQ */
1998 vec
= pci_irq_vector(pf
->pdev
,
1999 pf
->hw
.nix_msixoff
+ NIX_LF_QINT_VEC_START
);
2000 otx2_write64(pf
, NIX_LF_QINTX_ENA_W1C(0), BIT_ULL(0));
2003 /* Cleanup CQ NAPI and IRQ */
2004 vec
= pf
->hw
.nix_msixoff
+ NIX_LF_CINT_VEC_START
;
2005 for (qidx
= 0; qidx
< pf
->hw
.cint_cnt
; qidx
++) {
2006 /* Disable interrupt */
2007 otx2_write64(pf
, NIX_LF_CINTX_ENA_W1C(qidx
), BIT_ULL(0));
2009 synchronize_irq(pci_irq_vector(pf
->pdev
, vec
));
2011 cq_poll
= &qset
->napi
[qidx
];
2012 napi_synchronize(&cq_poll
->napi
);
2016 netif_tx_disable(netdev
);
2018 for (wrk
= 0; wrk
< pf
->qset
.cq_cnt
; wrk
++)
2019 cancel_delayed_work_sync(&pf
->refill_wrk
[wrk
].pool_refill_work
);
2020 devm_kfree(pf
->dev
, pf
->refill_wrk
);
2022 otx2_free_hw_resources(pf
);
2023 otx2_free_cints(pf
, pf
->hw
.cint_cnt
);
2024 otx2_disable_napi(pf
);
2026 for (qidx
= 0; qidx
< netdev
->num_tx_queues
; qidx
++)
2027 netdev_tx_reset_queue(netdev_get_tx_queue(netdev
, qidx
));
2034 /* Do not clear RQ/SQ ringsize settings */
2035 memset_startat(qset
, 0, sqe_cnt
);
2038 EXPORT_SYMBOL(otx2_stop
);
2040 static netdev_tx_t
otx2_xmit(struct sk_buff
*skb
, struct net_device
*netdev
)
2042 struct otx2_nic
*pf
= netdev_priv(netdev
);
2043 int qidx
= skb_get_queue_mapping(skb
);
2044 struct otx2_snd_queue
*sq
;
2045 struct netdev_queue
*txq
;
2048 /* XDP SQs are not mapped with TXQs
2049 * advance qid to derive correct sq mapped with QOS
2051 sq_idx
= (qidx
>= pf
->hw
.tx_queues
) ? (qidx
+ pf
->hw
.xdp_queues
) : qidx
;
2053 /* Check for minimum and maximum packet length */
2054 if (skb
->len
<= ETH_HLEN
||
2055 (!skb_shinfo(skb
)->gso_size
&& skb
->len
> pf
->tx_max_pktlen
)) {
2057 return NETDEV_TX_OK
;
2060 sq
= &pf
->qset
.sq
[sq_idx
];
2061 txq
= netdev_get_tx_queue(netdev
, qidx
);
2063 if (!otx2_sq_append_skb(netdev
, sq
, skb
, qidx
)) {
2064 netif_tx_stop_queue(txq
);
2066 /* Check again, incase SQBs got freed up */
2068 if (((sq
->num_sqbs
- *sq
->aura_fc_addr
) * sq
->sqe_per_sqb
)
2070 netif_tx_wake_queue(txq
);
2072 return NETDEV_TX_BUSY
;
2075 return NETDEV_TX_OK
;
2078 static int otx2_qos_select_htb_queue(struct otx2_nic
*pf
, struct sk_buff
*skb
,
2083 if ((TC_H_MAJ(skb
->priority
) >> 16) == htb_maj_id
)
2084 classid
= TC_H_MIN(skb
->priority
);
2086 classid
= READ_ONCE(pf
->qos
.defcls
);
2091 return otx2_get_txq_by_classid(pf
, classid
);
2094 u16
otx2_select_queue(struct net_device
*netdev
, struct sk_buff
*skb
,
2095 struct net_device
*sb_dev
)
2097 struct otx2_nic
*pf
= netdev_priv(netdev
);
2104 qos_enabled
= netdev
->real_num_tx_queues
> pf
->hw
.tx_queues
;
2105 if (unlikely(qos_enabled
)) {
2106 /* This smp_load_acquire() pairs with smp_store_release() in
2107 * otx2_qos_root_add() called from htb offload root creation
2109 u16 htb_maj_id
= smp_load_acquire(&pf
->qos
.maj_id
);
2111 if (unlikely(htb_maj_id
)) {
2112 txq
= otx2_qos_select_htb_queue(pf
, skb
, htb_maj_id
);
2121 if (!skb_vlan_tag_present(skb
))
2124 vlan_prio
= skb
->vlan_tci
>> 13;
2125 if ((vlan_prio
> pf
->hw
.tx_queues
- 1) ||
2126 !pf
->pfc_alloc_status
[vlan_prio
])
2133 txq
= netdev_pick_tx(netdev
, skb
, NULL
);
2134 if (unlikely(qos_enabled
))
2135 return txq
% pf
->hw
.tx_queues
;
2139 EXPORT_SYMBOL(otx2_select_queue
);
2141 static netdev_features_t
otx2_fix_features(struct net_device
*dev
,
2142 netdev_features_t features
)
2144 if (features
& NETIF_F_HW_VLAN_CTAG_RX
)
2145 features
|= NETIF_F_HW_VLAN_STAG_RX
;
2147 features
&= ~NETIF_F_HW_VLAN_STAG_RX
;
2152 static void otx2_set_rx_mode(struct net_device
*netdev
)
2154 struct otx2_nic
*pf
= netdev_priv(netdev
);
2156 queue_work(pf
->otx2_wq
, &pf
->rx_mode_work
);
2159 static void otx2_rx_mode_wrk_handler(struct work_struct
*work
)
2161 struct otx2_nic
*pf
= container_of(work
, struct otx2_nic
, rx_mode_work
);
2163 otx2_do_set_rx_mode(pf
);
2166 static int otx2_set_features(struct net_device
*netdev
,
2167 netdev_features_t features
)
2169 netdev_features_t changed
= features
^ netdev
->features
;
2170 struct otx2_nic
*pf
= netdev_priv(netdev
);
2172 if ((changed
& NETIF_F_LOOPBACK
) && netif_running(netdev
))
2173 return otx2_cgx_config_loopback(pf
,
2174 features
& NETIF_F_LOOPBACK
);
2176 if ((changed
& NETIF_F_HW_VLAN_CTAG_RX
) && netif_running(netdev
))
2177 return otx2_enable_rxvlan(pf
,
2178 features
& NETIF_F_HW_VLAN_CTAG_RX
);
2180 return otx2_handle_ntuple_tc_features(netdev
, features
);
2183 static void otx2_reset_task(struct work_struct
*work
)
2185 struct otx2_nic
*pf
= container_of(work
, struct otx2_nic
, reset_task
);
2187 if (!netif_running(pf
->netdev
))
2191 otx2_stop(pf
->netdev
);
2193 otx2_open(pf
->netdev
);
2194 netif_trans_update(pf
->netdev
);
2198 static int otx2_config_hw_rx_tstamp(struct otx2_nic
*pfvf
, bool enable
)
2200 struct msg_req
*req
;
2203 if (pfvf
->flags
& OTX2_FLAG_RX_TSTAMP_ENABLED
&& enable
)
2206 mutex_lock(&pfvf
->mbox
.lock
);
2208 req
= otx2_mbox_alloc_msg_cgx_ptp_rx_enable(&pfvf
->mbox
);
2210 req
= otx2_mbox_alloc_msg_cgx_ptp_rx_disable(&pfvf
->mbox
);
2212 mutex_unlock(&pfvf
->mbox
.lock
);
2216 err
= otx2_sync_mbox_msg(&pfvf
->mbox
);
2218 mutex_unlock(&pfvf
->mbox
.lock
);
2222 mutex_unlock(&pfvf
->mbox
.lock
);
2224 pfvf
->flags
|= OTX2_FLAG_RX_TSTAMP_ENABLED
;
2226 pfvf
->flags
&= ~OTX2_FLAG_RX_TSTAMP_ENABLED
;
2230 static int otx2_config_hw_tx_tstamp(struct otx2_nic
*pfvf
, bool enable
)
2232 struct msg_req
*req
;
2235 if (pfvf
->flags
& OTX2_FLAG_TX_TSTAMP_ENABLED
&& enable
)
2238 mutex_lock(&pfvf
->mbox
.lock
);
2240 req
= otx2_mbox_alloc_msg_nix_lf_ptp_tx_enable(&pfvf
->mbox
);
2242 req
= otx2_mbox_alloc_msg_nix_lf_ptp_tx_disable(&pfvf
->mbox
);
2244 mutex_unlock(&pfvf
->mbox
.lock
);
2248 err
= otx2_sync_mbox_msg(&pfvf
->mbox
);
2250 mutex_unlock(&pfvf
->mbox
.lock
);
2254 mutex_unlock(&pfvf
->mbox
.lock
);
2256 pfvf
->flags
|= OTX2_FLAG_TX_TSTAMP_ENABLED
;
2258 pfvf
->flags
&= ~OTX2_FLAG_TX_TSTAMP_ENABLED
;
2262 int otx2_config_hwtstamp(struct net_device
*netdev
, struct ifreq
*ifr
)
2264 struct otx2_nic
*pfvf
= netdev_priv(netdev
);
2265 struct hwtstamp_config config
;
2270 if (copy_from_user(&config
, ifr
->ifr_data
, sizeof(config
)))
2273 switch (config
.tx_type
) {
2274 case HWTSTAMP_TX_OFF
:
2275 if (pfvf
->flags
& OTX2_FLAG_PTP_ONESTEP_SYNC
)
2276 pfvf
->flags
&= ~OTX2_FLAG_PTP_ONESTEP_SYNC
;
2278 cancel_delayed_work(&pfvf
->ptp
->synctstamp_work
);
2279 otx2_config_hw_tx_tstamp(pfvf
, false);
2281 case HWTSTAMP_TX_ONESTEP_SYNC
:
2282 if (!test_bit(CN10K_PTP_ONESTEP
, &pfvf
->hw
.cap_flag
))
2284 pfvf
->flags
|= OTX2_FLAG_PTP_ONESTEP_SYNC
;
2285 schedule_delayed_work(&pfvf
->ptp
->synctstamp_work
,
2286 msecs_to_jiffies(500));
2288 case HWTSTAMP_TX_ON
:
2289 otx2_config_hw_tx_tstamp(pfvf
, true);
2295 switch (config
.rx_filter
) {
2296 case HWTSTAMP_FILTER_NONE
:
2297 otx2_config_hw_rx_tstamp(pfvf
, false);
2299 case HWTSTAMP_FILTER_ALL
:
2300 case HWTSTAMP_FILTER_SOME
:
2301 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT
:
2302 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC
:
2303 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ
:
2304 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT
:
2305 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC
:
2306 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ
:
2307 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT
:
2308 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC
:
2309 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ
:
2310 case HWTSTAMP_FILTER_PTP_V2_EVENT
:
2311 case HWTSTAMP_FILTER_PTP_V2_SYNC
:
2312 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ
:
2313 otx2_config_hw_rx_tstamp(pfvf
, true);
2314 config
.rx_filter
= HWTSTAMP_FILTER_ALL
;
2320 memcpy(&pfvf
->tstamp
, &config
, sizeof(config
));
2322 return copy_to_user(ifr
->ifr_data
, &config
,
2323 sizeof(config
)) ? -EFAULT
: 0;
2325 EXPORT_SYMBOL(otx2_config_hwtstamp
);
2327 int otx2_ioctl(struct net_device
*netdev
, struct ifreq
*req
, int cmd
)
2329 struct otx2_nic
*pfvf
= netdev_priv(netdev
);
2330 struct hwtstamp_config
*cfg
= &pfvf
->tstamp
;
2334 return otx2_config_hwtstamp(netdev
, req
);
2336 return copy_to_user(req
->ifr_data
, cfg
,
2337 sizeof(*cfg
)) ? -EFAULT
: 0;
2342 EXPORT_SYMBOL(otx2_ioctl
);
2344 static int otx2_do_set_vf_mac(struct otx2_nic
*pf
, int vf
, const u8
*mac
)
2346 struct npc_install_flow_req
*req
;
2349 mutex_lock(&pf
->mbox
.lock
);
2350 req
= otx2_mbox_alloc_msg_npc_install_flow(&pf
->mbox
);
2356 ether_addr_copy(req
->packet
.dmac
, mac
);
2357 eth_broadcast_addr((u8
*)&req
->mask
.dmac
);
2358 req
->features
= BIT_ULL(NPC_DMAC
);
2359 req
->channel
= pf
->hw
.rx_chan_base
;
2360 req
->intf
= NIX_INTF_RX
;
2361 req
->default_rule
= 1;
2364 req
->op
= NIX_RX_ACTION_DEFAULT
;
2366 err
= otx2_sync_mbox_msg(&pf
->mbox
);
2368 mutex_unlock(&pf
->mbox
.lock
);
2372 static int otx2_set_vf_mac(struct net_device
*netdev
, int vf
, u8
*mac
)
2374 struct otx2_nic
*pf
= netdev_priv(netdev
);
2375 struct pci_dev
*pdev
= pf
->pdev
;
2376 struct otx2_vf_config
*config
;
2379 if (!netif_running(netdev
))
2382 if (vf
>= pf
->total_vfs
)
2385 if (!is_valid_ether_addr(mac
))
2388 config
= &pf
->vf_configs
[vf
];
2389 ether_addr_copy(config
->mac
, mac
);
2391 ret
= otx2_do_set_vf_mac(pf
, vf
, mac
);
2393 dev_info(&pdev
->dev
,
2394 "Load/Reload VF driver\n");
2399 static int otx2_do_set_vf_vlan(struct otx2_nic
*pf
, int vf
, u16 vlan
, u8 qos
,
2402 struct otx2_flow_config
*flow_cfg
= pf
->flow_cfg
;
2403 struct nix_vtag_config_rsp
*vtag_rsp
;
2404 struct npc_delete_flow_req
*del_req
;
2405 struct nix_vtag_config
*vtag_req
;
2406 struct npc_install_flow_req
*req
;
2407 struct otx2_vf_config
*config
;
2411 config
= &pf
->vf_configs
[vf
];
2413 if (!vlan
&& !config
->vlan
)
2416 mutex_lock(&pf
->mbox
.lock
);
2418 /* free old tx vtag entry */
2420 vtag_req
= otx2_mbox_alloc_msg_nix_vtag_cfg(&pf
->mbox
);
2425 vtag_req
->cfg_type
= 0;
2426 vtag_req
->tx
.free_vtag0
= 1;
2427 vtag_req
->tx
.vtag0_idx
= config
->tx_vtag_idx
;
2429 err
= otx2_sync_mbox_msg(&pf
->mbox
);
2434 if (!vlan
&& config
->vlan
) {
2436 del_req
= otx2_mbox_alloc_msg_npc_delete_flow(&pf
->mbox
);
2441 idx
= ((vf
* OTX2_PER_VF_VLAN_FLOWS
) + OTX2_VF_VLAN_RX_INDEX
);
2443 flow_cfg
->def_ent
[flow_cfg
->vf_vlan_offset
+ idx
];
2444 err
= otx2_sync_mbox_msg(&pf
->mbox
);
2449 del_req
= otx2_mbox_alloc_msg_npc_delete_flow(&pf
->mbox
);
2454 idx
= ((vf
* OTX2_PER_VF_VLAN_FLOWS
) + OTX2_VF_VLAN_TX_INDEX
);
2456 flow_cfg
->def_ent
[flow_cfg
->vf_vlan_offset
+ idx
];
2457 err
= otx2_sync_mbox_msg(&pf
->mbox
);
2463 req
= otx2_mbox_alloc_msg_npc_install_flow(&pf
->mbox
);
2469 idx
= ((vf
* OTX2_PER_VF_VLAN_FLOWS
) + OTX2_VF_VLAN_RX_INDEX
);
2470 req
->entry
= flow_cfg
->def_ent
[flow_cfg
->vf_vlan_offset
+ idx
];
2471 req
->packet
.vlan_tci
= htons(vlan
);
2472 req
->mask
.vlan_tci
= htons(VLAN_VID_MASK
);
2473 /* af fills the destination mac addr */
2474 eth_broadcast_addr((u8
*)&req
->mask
.dmac
);
2475 req
->features
= BIT_ULL(NPC_OUTER_VID
) | BIT_ULL(NPC_DMAC
);
2476 req
->channel
= pf
->hw
.rx_chan_base
;
2477 req
->intf
= NIX_INTF_RX
;
2479 req
->op
= NIX_RX_ACTION_DEFAULT
;
2480 req
->vtag0_valid
= true;
2481 req
->vtag0_type
= NIX_AF_LFX_RX_VTAG_TYPE7
;
2484 err
= otx2_sync_mbox_msg(&pf
->mbox
);
2489 vtag_req
= otx2_mbox_alloc_msg_nix_vtag_cfg(&pf
->mbox
);
2495 /* configure tx vtag params */
2496 vtag_req
->vtag_size
= VTAGSIZE_T4
;
2497 vtag_req
->cfg_type
= 0; /* tx vlan cfg */
2498 vtag_req
->tx
.cfg_vtag0
= 1;
2499 vtag_req
->tx
.vtag0
= ((u64
)ntohs(proto
) << 16) | vlan
;
2501 err
= otx2_sync_mbox_msg(&pf
->mbox
);
2505 vtag_rsp
= (struct nix_vtag_config_rsp
*)otx2_mbox_get_rsp
2506 (&pf
->mbox
.mbox
, 0, &vtag_req
->hdr
);
2507 if (IS_ERR(vtag_rsp
)) {
2508 err
= PTR_ERR(vtag_rsp
);
2511 config
->tx_vtag_idx
= vtag_rsp
->vtag0_idx
;
2513 req
= otx2_mbox_alloc_msg_npc_install_flow(&pf
->mbox
);
2519 eth_zero_addr((u8
*)&req
->mask
.dmac
);
2520 idx
= ((vf
* OTX2_PER_VF_VLAN_FLOWS
) + OTX2_VF_VLAN_TX_INDEX
);
2521 req
->entry
= flow_cfg
->def_ent
[flow_cfg
->vf_vlan_offset
+ idx
];
2522 req
->features
= BIT_ULL(NPC_DMAC
);
2523 req
->channel
= pf
->hw
.tx_chan_base
;
2524 req
->intf
= NIX_INTF_TX
;
2526 req
->op
= NIX_TX_ACTIONOP_UCAST_DEFAULT
;
2527 req
->vtag0_def
= vtag_rsp
->vtag0_idx
;
2528 req
->vtag0_op
= VTAG_INSERT
;
2531 err
= otx2_sync_mbox_msg(&pf
->mbox
);
2533 config
->vlan
= vlan
;
2534 mutex_unlock(&pf
->mbox
.lock
);
2538 static int otx2_set_vf_vlan(struct net_device
*netdev
, int vf
, u16 vlan
, u8 qos
,
2541 struct otx2_nic
*pf
= netdev_priv(netdev
);
2542 struct pci_dev
*pdev
= pf
->pdev
;
2544 if (!netif_running(netdev
))
2547 if (vf
>= pci_num_vf(pdev
))
2550 /* qos is currently unsupported */
2551 if (vlan
>= VLAN_N_VID
|| qos
)
2554 if (proto
!= htons(ETH_P_8021Q
))
2555 return -EPROTONOSUPPORT
;
2557 if (!(pf
->flags
& OTX2_FLAG_VF_VLAN_SUPPORT
))
2560 return otx2_do_set_vf_vlan(pf
, vf
, vlan
, qos
, proto
);
2563 static int otx2_get_vf_config(struct net_device
*netdev
, int vf
,
2564 struct ifla_vf_info
*ivi
)
2566 struct otx2_nic
*pf
= netdev_priv(netdev
);
2567 struct pci_dev
*pdev
= pf
->pdev
;
2568 struct otx2_vf_config
*config
;
2570 if (!netif_running(netdev
))
2573 if (vf
>= pci_num_vf(pdev
))
2576 config
= &pf
->vf_configs
[vf
];
2578 ether_addr_copy(ivi
->mac
, config
->mac
);
2579 ivi
->vlan
= config
->vlan
;
2580 ivi
->trusted
= config
->trusted
;
2585 static int otx2_xdp_xmit_tx(struct otx2_nic
*pf
, struct xdp_frame
*xdpf
,
2592 dma_addr
= otx2_dma_map_page(pf
, virt_to_page(xdpf
->data
),
2593 offset_in_page(xdpf
->data
), xdpf
->len
,
2595 if (dma_mapping_error(pf
->dev
, dma_addr
))
2598 err
= otx2_xdp_sq_append_pkt(pf
, dma_addr
, xdpf
->len
, qidx
);
2600 otx2_dma_unmap_page(pf
, dma_addr
, xdpf
->len
, DMA_TO_DEVICE
);
2601 page
= virt_to_page(xdpf
->data
);
2608 static int otx2_xdp_xmit(struct net_device
*netdev
, int n
,
2609 struct xdp_frame
**frames
, u32 flags
)
2611 struct otx2_nic
*pf
= netdev_priv(netdev
);
2612 int qidx
= smp_processor_id();
2613 struct otx2_snd_queue
*sq
;
2616 if (!netif_running(netdev
))
2619 qidx
+= pf
->hw
.tx_queues
;
2620 sq
= pf
->xdp_prog
? &pf
->qset
.sq
[qidx
] : NULL
;
2622 /* Abort xmit if xdp queue is not */
2626 if (unlikely(flags
& ~XDP_XMIT_FLAGS_MASK
))
2629 for (i
= 0; i
< n
; i
++) {
2630 struct xdp_frame
*xdpf
= frames
[i
];
2633 err
= otx2_xdp_xmit_tx(pf
, xdpf
, qidx
);
2640 static int otx2_xdp_setup(struct otx2_nic
*pf
, struct bpf_prog
*prog
)
2642 struct net_device
*dev
= pf
->netdev
;
2643 bool if_up
= netif_running(pf
->netdev
);
2644 struct bpf_prog
*old_prog
;
2646 if (prog
&& dev
->mtu
> MAX_XDP_MTU
) {
2647 netdev_warn(dev
, "Jumbo frames not yet supported with XDP\n");
2652 otx2_stop(pf
->netdev
);
2654 old_prog
= xchg(&pf
->xdp_prog
, prog
);
2657 bpf_prog_put(old_prog
);
2660 bpf_prog_add(pf
->xdp_prog
, pf
->hw
.rx_queues
- 1);
2662 /* Network stack and XDP shared same rx queues.
2663 * Use separate tx queues for XDP and network stack.
2666 pf
->hw
.xdp_queues
= pf
->hw
.rx_queues
;
2667 xdp_features_set_redirect_target(dev
, false);
2669 pf
->hw
.xdp_queues
= 0;
2670 xdp_features_clear_redirect_target(dev
);
2674 otx2_open(pf
->netdev
);
2679 static int otx2_xdp(struct net_device
*netdev
, struct netdev_bpf
*xdp
)
2681 struct otx2_nic
*pf
= netdev_priv(netdev
);
2683 switch (xdp
->command
) {
2684 case XDP_SETUP_PROG
:
2685 return otx2_xdp_setup(pf
, xdp
->prog
);
2691 static int otx2_set_vf_permissions(struct otx2_nic
*pf
, int vf
,
2694 struct set_vf_perm
*req
;
2697 mutex_lock(&pf
->mbox
.lock
);
2698 req
= otx2_mbox_alloc_msg_set_vf_perm(&pf
->mbox
);
2704 /* Let AF reset VF permissions as sriov is disabled */
2705 if (req_perm
== OTX2_RESET_VF_PERM
) {
2706 req
->flags
|= RESET_VF_PERM
;
2707 } else if (req_perm
== OTX2_TRUSTED_VF
) {
2708 if (pf
->vf_configs
[vf
].trusted
)
2709 req
->flags
|= VF_TRUSTED
;
2713 rc
= otx2_sync_mbox_msg(&pf
->mbox
);
2715 mutex_unlock(&pf
->mbox
.lock
);
2719 static int otx2_ndo_set_vf_trust(struct net_device
*netdev
, int vf
,
2722 struct otx2_nic
*pf
= netdev_priv(netdev
);
2723 struct pci_dev
*pdev
= pf
->pdev
;
2726 if (vf
>= pci_num_vf(pdev
))
2729 if (pf
->vf_configs
[vf
].trusted
== enable
)
2732 pf
->vf_configs
[vf
].trusted
= enable
;
2733 rc
= otx2_set_vf_permissions(pf
, vf
, OTX2_TRUSTED_VF
);
2736 pf
->vf_configs
[vf
].trusted
= !enable
;
2738 netdev_info(pf
->netdev
, "VF %d is %strusted\n",
2739 vf
, enable
? "" : "not ");
2740 otx2_set_rx_mode(netdev
);
2746 static const struct net_device_ops otx2_netdev_ops
= {
2747 .ndo_open
= otx2_open
,
2748 .ndo_stop
= otx2_stop
,
2749 .ndo_start_xmit
= otx2_xmit
,
2750 .ndo_select_queue
= otx2_select_queue
,
2751 .ndo_fix_features
= otx2_fix_features
,
2752 .ndo_set_mac_address
= otx2_set_mac_address
,
2753 .ndo_change_mtu
= otx2_change_mtu
,
2754 .ndo_set_rx_mode
= otx2_set_rx_mode
,
2755 .ndo_set_features
= otx2_set_features
,
2756 .ndo_tx_timeout
= otx2_tx_timeout
,
2757 .ndo_get_stats64
= otx2_get_stats64
,
2758 .ndo_eth_ioctl
= otx2_ioctl
,
2759 .ndo_set_vf_mac
= otx2_set_vf_mac
,
2760 .ndo_set_vf_vlan
= otx2_set_vf_vlan
,
2761 .ndo_get_vf_config
= otx2_get_vf_config
,
2762 .ndo_bpf
= otx2_xdp
,
2763 .ndo_xdp_xmit
= otx2_xdp_xmit
,
2764 .ndo_setup_tc
= otx2_setup_tc
,
2765 .ndo_set_vf_trust
= otx2_ndo_set_vf_trust
,
2768 static int otx2_wq_init(struct otx2_nic
*pf
)
2770 pf
->otx2_wq
= create_singlethread_workqueue("otx2_wq");
2774 INIT_WORK(&pf
->rx_mode_work
, otx2_rx_mode_wrk_handler
);
2775 INIT_WORK(&pf
->reset_task
, otx2_reset_task
);
2779 static int otx2_check_pf_usable(struct otx2_nic
*nic
)
2783 rev
= otx2_read64(nic
, RVU_PF_BLOCK_ADDRX_DISC(BLKADDR_RVUM
));
2784 rev
= (rev
>> 12) & 0xFF;
2785 /* Check if AF has setup revision for RVUM block,
2786 * otherwise this driver probe should be deferred
2787 * until AF driver comes up.
2791 "AF is not initialized, deferring probe\n");
2792 return -EPROBE_DEFER
;
2797 static int otx2_realloc_msix_vectors(struct otx2_nic
*pf
)
2799 struct otx2_hw
*hw
= &pf
->hw
;
2802 /* NPA interrupts are inot registered, so alloc only
2803 * upto NIX vector offset.
2805 num_vec
= hw
->nix_msixoff
;
2806 num_vec
+= NIX_LF_CINT_VEC_START
+ hw
->max_queues
;
2808 otx2_disable_mbox_intr(pf
);
2809 pci_free_irq_vectors(hw
->pdev
);
2810 err
= pci_alloc_irq_vectors(hw
->pdev
, num_vec
, num_vec
, PCI_IRQ_MSIX
);
2812 dev_err(pf
->dev
, "%s: Failed to realloc %d IRQ vectors\n",
2817 return otx2_register_mbox_intr(pf
, false);
2820 static int otx2_sriov_vfcfg_init(struct otx2_nic
*pf
)
2824 pf
->vf_configs
= devm_kcalloc(pf
->dev
, pf
->total_vfs
,
2825 sizeof(struct otx2_vf_config
),
2827 if (!pf
->vf_configs
)
2830 for (i
= 0; i
< pf
->total_vfs
; i
++) {
2831 pf
->vf_configs
[i
].pf
= pf
;
2832 pf
->vf_configs
[i
].intf_down
= true;
2833 pf
->vf_configs
[i
].trusted
= false;
2834 INIT_DELAYED_WORK(&pf
->vf_configs
[i
].link_event_work
,
2835 otx2_vf_link_event_task
);
2841 static void otx2_sriov_vfcfg_cleanup(struct otx2_nic
*pf
)
2845 if (!pf
->vf_configs
)
2848 for (i
= 0; i
< pf
->total_vfs
; i
++) {
2849 cancel_delayed_work_sync(&pf
->vf_configs
[i
].link_event_work
);
2850 otx2_set_vf_permissions(pf
, i
, OTX2_RESET_VF_PERM
);
2854 static int otx2_probe(struct pci_dev
*pdev
, const struct pci_device_id
*id
)
2856 struct device
*dev
= &pdev
->dev
;
2857 int err
, qcount
, qos_txqs
;
2858 struct net_device
*netdev
;
2859 struct otx2_nic
*pf
;
2863 err
= pcim_enable_device(pdev
);
2865 dev_err(dev
, "Failed to enable PCI device\n");
2869 err
= pci_request_regions(pdev
, DRV_NAME
);
2871 dev_err(dev
, "PCI request regions failed 0x%x\n", err
);
2875 err
= dma_set_mask_and_coherent(dev
, DMA_BIT_MASK(48));
2877 dev_err(dev
, "DMA mask config failed, abort\n");
2878 goto err_release_regions
;
2881 pci_set_master(pdev
);
2883 /* Set number of queues */
2884 qcount
= min_t(int, num_online_cpus(), OTX2_MAX_CQ_CNT
);
2885 qos_txqs
= min_t(int, qcount
, OTX2_QOS_MAX_LEAF_NODES
);
2887 netdev
= alloc_etherdev_mqs(sizeof(*pf
), qcount
+ qos_txqs
, qcount
);
2890 goto err_release_regions
;
2893 pci_set_drvdata(pdev
, netdev
);
2894 SET_NETDEV_DEV(netdev
, &pdev
->dev
);
2895 pf
= netdev_priv(netdev
);
2896 pf
->netdev
= netdev
;
2899 pf
->total_vfs
= pci_sriov_get_totalvfs(pdev
);
2900 pf
->flags
|= OTX2_FLAG_INTF_DOWN
;
2904 hw
->rx_queues
= qcount
;
2905 hw
->tx_queues
= qcount
;
2906 hw
->non_qos_queues
= qcount
;
2907 hw
->max_queues
= qcount
;
2908 hw
->rbuf_len
= OTX2_DEFAULT_RBUF_LEN
;
2909 /* Use CQE of 128 byte descriptor size by default */
2912 num_vec
= pci_msix_vec_count(pdev
);
2913 hw
->irq_name
= devm_kmalloc_array(&hw
->pdev
->dev
, num_vec
, NAME_SIZE
,
2915 if (!hw
->irq_name
) {
2917 goto err_free_netdev
;
2920 hw
->affinity_mask
= devm_kcalloc(&hw
->pdev
->dev
, num_vec
,
2921 sizeof(cpumask_var_t
), GFP_KERNEL
);
2922 if (!hw
->affinity_mask
) {
2924 goto err_free_netdev
;
2928 pf
->reg_base
= pcim_iomap(pdev
, PCI_CFG_REG_BAR_NUM
, 0);
2929 if (!pf
->reg_base
) {
2930 dev_err(dev
, "Unable to map physical function CSRs, aborting\n");
2932 goto err_free_netdev
;
2935 err
= otx2_check_pf_usable(pf
);
2937 goto err_free_netdev
;
2939 err
= pci_alloc_irq_vectors(hw
->pdev
, RVU_PF_INT_VEC_CNT
,
2940 RVU_PF_INT_VEC_CNT
, PCI_IRQ_MSIX
);
2942 dev_err(dev
, "%s: Failed to alloc %d IRQ vectors\n",
2944 goto err_free_netdev
;
2947 otx2_setup_dev_hw_settings(pf
);
2949 /* Init PF <=> AF mailbox stuff */
2950 err
= otx2_pfaf_mbox_init(pf
);
2952 goto err_free_irq_vectors
;
2954 /* Register mailbox interrupt */
2955 err
= otx2_register_mbox_intr(pf
, true);
2957 goto err_mbox_destroy
;
2959 /* Request AF to attach NPA and NIX LFs to this PF.
2960 * NIX and NPA LFs are needed for this PF to function as a NIC.
2962 err
= otx2_attach_npa_nix(pf
);
2964 goto err_disable_mbox_intr
;
2966 err
= otx2_realloc_msix_vectors(pf
);
2968 goto err_detach_rsrc
;
2970 err
= otx2_set_real_num_queues(netdev
, hw
->tx_queues
, hw
->rx_queues
);
2972 goto err_detach_rsrc
;
2974 err
= cn10k_lmtst_init(pf
);
2976 goto err_detach_rsrc
;
2978 /* Assign default mac address */
2979 otx2_get_mac_from_af(netdev
);
2981 /* Don't check for error. Proceed without ptp */
2984 /* NPA's pool is a stack to which SW frees buffer pointers via Aura.
2985 * HW allocates buffer pointer from stack and uses it for DMA'ing
2986 * ingress packet. In some scenarios HW can free back allocated buffer
2987 * pointers to pool. This makes it impossible for SW to maintain a
2988 * parallel list where physical addresses of buffer pointers (IOVAs)
2989 * given to HW can be saved for later reference.
2991 * So the only way to convert Rx packet's buffer address is to use
2992 * IOMMU's iova_to_phys() handler which translates the address by
2993 * walking through the translation tables.
2995 pf
->iommu_domain
= iommu_get_domain_for_dev(dev
);
2997 netdev
->hw_features
= (NETIF_F_RXCSUM
| NETIF_F_IP_CSUM
|
2998 NETIF_F_IPV6_CSUM
| NETIF_F_RXHASH
|
2999 NETIF_F_SG
| NETIF_F_TSO
| NETIF_F_TSO6
|
3000 NETIF_F_GSO_UDP_L4
);
3001 netdev
->features
|= netdev
->hw_features
;
3003 err
= otx2_mcam_flow_init(pf
);
3005 goto err_ptp_destroy
;
3007 err
= cn10k_mcs_init(pf
);
3009 goto err_del_mcam_entries
;
3011 if (pf
->flags
& OTX2_FLAG_NTUPLE_SUPPORT
)
3012 netdev
->hw_features
|= NETIF_F_NTUPLE
;
3014 if (pf
->flags
& OTX2_FLAG_UCAST_FLTR_SUPPORT
)
3015 netdev
->priv_flags
|= IFF_UNICAST_FLT
;
3017 /* Support TSO on tag interface */
3018 netdev
->vlan_features
|= netdev
->features
;
3019 netdev
->hw_features
|= NETIF_F_HW_VLAN_CTAG_TX
|
3020 NETIF_F_HW_VLAN_STAG_TX
;
3021 if (pf
->flags
& OTX2_FLAG_RX_VLAN_SUPPORT
)
3022 netdev
->hw_features
|= NETIF_F_HW_VLAN_CTAG_RX
|
3023 NETIF_F_HW_VLAN_STAG_RX
;
3024 netdev
->features
|= netdev
->hw_features
;
3026 /* HW supports tc offload but mutually exclusive with n-tuple filters */
3027 if (pf
->flags
& OTX2_FLAG_TC_FLOWER_SUPPORT
)
3028 netdev
->hw_features
|= NETIF_F_HW_TC
;
3030 netdev
->hw_features
|= NETIF_F_LOOPBACK
| NETIF_F_RXALL
;
3032 netif_set_tso_max_segs(netdev
, OTX2_MAX_GSO_SEGS
);
3033 netdev
->watchdog_timeo
= OTX2_TX_TIMEOUT
;
3035 netdev
->netdev_ops
= &otx2_netdev_ops
;
3036 netdev
->xdp_features
= NETDEV_XDP_ACT_BASIC
| NETDEV_XDP_ACT_REDIRECT
;
3038 netdev
->min_mtu
= OTX2_MIN_MTU
;
3039 netdev
->max_mtu
= otx2_get_max_mtu(pf
);
3041 err
= register_netdev(netdev
);
3043 dev_err(dev
, "Failed to register netdevice\n");
3047 err
= otx2_wq_init(pf
);
3049 goto err_unreg_netdev
;
3051 otx2_set_ethtool_ops(netdev
);
3053 err
= otx2_init_tc(pf
);
3055 goto err_mcam_flow_del
;
3057 err
= otx2_register_dl(pf
);
3059 goto err_mcam_flow_del
;
3061 /* Initialize SR-IOV resources */
3062 err
= otx2_sriov_vfcfg_init(pf
);
3064 goto err_pf_sriov_init
;
3066 /* Enable link notifications */
3067 otx2_cgx_config_linkevents(pf
, true);
3070 err
= otx2_dcbnl_set_ops(netdev
);
3072 goto err_pf_sriov_init
;
3075 otx2_qos_init(pf
, qos_txqs
);
3080 otx2_shutdown_tc(pf
);
3082 otx2_mcam_flow_del(pf
);
3084 unregister_netdev(netdev
);
3087 err_del_mcam_entries
:
3088 otx2_mcam_flow_del(pf
);
3090 otx2_ptp_destroy(pf
);
3092 if (pf
->hw
.lmt_info
)
3093 free_percpu(pf
->hw
.lmt_info
);
3094 if (test_bit(CN10K_LMTST
, &pf
->hw
.cap_flag
))
3095 qmem_free(pf
->dev
, pf
->dync_lmt
);
3096 otx2_detach_resources(&pf
->mbox
);
3097 err_disable_mbox_intr
:
3098 otx2_disable_mbox_intr(pf
);
3100 otx2_pfaf_mbox_destroy(pf
);
3101 err_free_irq_vectors
:
3102 pci_free_irq_vectors(hw
->pdev
);
3104 pci_set_drvdata(pdev
, NULL
);
3105 free_netdev(netdev
);
3106 err_release_regions
:
3107 pci_release_regions(pdev
);
3111 static void otx2_vf_link_event_task(struct work_struct
*work
)
3113 struct otx2_vf_config
*config
;
3114 struct cgx_link_info_msg
*req
;
3115 struct mbox_msghdr
*msghdr
;
3116 struct delayed_work
*dwork
;
3117 struct otx2_nic
*pf
;
3120 config
= container_of(work
, struct otx2_vf_config
,
3121 link_event_work
.work
);
3122 vf_idx
= config
- config
->pf
->vf_configs
;
3125 mutex_lock(&pf
->mbox
.lock
);
3127 dwork
= &config
->link_event_work
;
3129 if (!otx2_mbox_wait_for_zero(&pf
->mbox_pfvf
[0].mbox_up
, vf_idx
)) {
3130 schedule_delayed_work(dwork
, msecs_to_jiffies(100));
3131 mutex_unlock(&pf
->mbox
.lock
);
3135 msghdr
= otx2_mbox_alloc_msg_rsp(&pf
->mbox_pfvf
[0].mbox_up
, vf_idx
,
3136 sizeof(*req
), sizeof(struct msg_rsp
));
3138 dev_err(pf
->dev
, "Failed to create VF%d link event\n", vf_idx
);
3139 mutex_unlock(&pf
->mbox
.lock
);
3143 req
= (struct cgx_link_info_msg
*)msghdr
;
3144 req
->hdr
.id
= MBOX_MSG_CGX_LINK_EVENT
;
3145 req
->hdr
.sig
= OTX2_MBOX_REQ_SIG
;
3146 memcpy(&req
->link_info
, &pf
->linfo
, sizeof(req
->link_info
));
3148 otx2_mbox_wait_for_zero(&pf
->mbox_pfvf
[0].mbox_up
, vf_idx
);
3150 otx2_sync_mbox_up_msg(&pf
->mbox_pfvf
[0], vf_idx
);
3152 mutex_unlock(&pf
->mbox
.lock
);
3155 static int otx2_sriov_enable(struct pci_dev
*pdev
, int numvfs
)
3157 struct net_device
*netdev
= pci_get_drvdata(pdev
);
3158 struct otx2_nic
*pf
= netdev_priv(netdev
);
3161 /* Init PF <=> VF mailbox stuff */
3162 ret
= otx2_pfvf_mbox_init(pf
, numvfs
);
3166 ret
= otx2_register_pfvf_mbox_intr(pf
, numvfs
);
3170 ret
= otx2_pf_flr_init(pf
, numvfs
);
3174 ret
= otx2_register_flr_me_intr(pf
, numvfs
);
3178 ret
= pci_enable_sriov(pdev
, numvfs
);
3184 otx2_disable_flr_me_intr(pf
);
3186 otx2_flr_wq_destroy(pf
);
3188 otx2_disable_pfvf_mbox_intr(pf
, numvfs
);
3190 otx2_pfvf_mbox_destroy(pf
);
3194 static int otx2_sriov_disable(struct pci_dev
*pdev
)
3196 struct net_device
*netdev
= pci_get_drvdata(pdev
);
3197 struct otx2_nic
*pf
= netdev_priv(netdev
);
3198 int numvfs
= pci_num_vf(pdev
);
3203 pci_disable_sriov(pdev
);
3205 otx2_disable_flr_me_intr(pf
);
3206 otx2_flr_wq_destroy(pf
);
3207 otx2_disable_pfvf_mbox_intr(pf
, numvfs
);
3208 otx2_pfvf_mbox_destroy(pf
);
3213 static int otx2_sriov_configure(struct pci_dev
*pdev
, int numvfs
)
3216 return otx2_sriov_disable(pdev
);
3218 return otx2_sriov_enable(pdev
, numvfs
);
3221 static void otx2_remove(struct pci_dev
*pdev
)
3223 struct net_device
*netdev
= pci_get_drvdata(pdev
);
3224 struct otx2_nic
*pf
;
3229 pf
= netdev_priv(netdev
);
3231 pf
->flags
|= OTX2_FLAG_PF_SHUTDOWN
;
3233 if (pf
->flags
& OTX2_FLAG_TX_TSTAMP_ENABLED
)
3234 otx2_config_hw_tx_tstamp(pf
, false);
3235 if (pf
->flags
& OTX2_FLAG_RX_TSTAMP_ENABLED
)
3236 otx2_config_hw_rx_tstamp(pf
, false);
3238 /* Disable 802.3x pause frames */
3239 if (pf
->flags
& OTX2_FLAG_RX_PAUSE_ENABLED
||
3240 (pf
->flags
& OTX2_FLAG_TX_PAUSE_ENABLED
)) {
3241 pf
->flags
&= ~OTX2_FLAG_RX_PAUSE_ENABLED
;
3242 pf
->flags
&= ~OTX2_FLAG_TX_PAUSE_ENABLED
;
3243 otx2_config_pause_frm(pf
);
3247 /* Disable PFC config */
3250 otx2_config_priority_flow_ctrl(pf
);
3253 cancel_work_sync(&pf
->reset_task
);
3254 /* Disable link notifications */
3255 otx2_cgx_config_linkevents(pf
, false);
3257 otx2_unregister_dl(pf
);
3258 unregister_netdev(netdev
);
3260 otx2_sriov_disable(pf
->pdev
);
3261 otx2_sriov_vfcfg_cleanup(pf
);
3263 destroy_workqueue(pf
->otx2_wq
);
3265 otx2_ptp_destroy(pf
);
3266 otx2_mcam_flow_del(pf
);
3267 otx2_shutdown_tc(pf
);
3268 otx2_shutdown_qos(pf
);
3269 otx2_detach_resources(&pf
->mbox
);
3270 if (pf
->hw
.lmt_info
)
3271 free_percpu(pf
->hw
.lmt_info
);
3272 if (test_bit(CN10K_LMTST
, &pf
->hw
.cap_flag
))
3273 qmem_free(pf
->dev
, pf
->dync_lmt
);
3274 otx2_disable_mbox_intr(pf
);
3275 otx2_pfaf_mbox_destroy(pf
);
3276 pci_free_irq_vectors(pf
->pdev
);
3277 pci_set_drvdata(pdev
, NULL
);
3278 free_netdev(netdev
);
3280 pci_release_regions(pdev
);
3283 static struct pci_driver otx2_pf_driver
= {
3285 .id_table
= otx2_pf_id_table
,
3286 .probe
= otx2_probe
,
3287 .shutdown
= otx2_remove
,
3288 .remove
= otx2_remove
,
3289 .sriov_configure
= otx2_sriov_configure
3292 static int __init
otx2_rvupf_init_module(void)
3294 pr_info("%s: %s\n", DRV_NAME
, DRV_STRING
);
3296 return pci_register_driver(&otx2_pf_driver
);
3299 static void __exit
otx2_rvupf_cleanup_module(void)
3301 pci_unregister_driver(&otx2_pf_driver
);
3304 module_init(otx2_rvupf_init_module
);
3305 module_exit(otx2_rvupf_cleanup_module
);