]>
Commit | Line | Data |
---|---|---|
3184fb5b TD |
1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* Marvell OcteonTx2 RVU Virtual Function ethernet driver */ | |
3 | ||
4 | #include <linux/etherdevice.h> | |
5 | #include <linux/module.h> | |
6 | #include <linux/pci.h> | |
7 | ||
8 | #include "otx2_common.h" | |
9 | #include "otx2_reg.h" | |
10 | ||
11 | #define DRV_NAME "octeontx2-nicvf" | |
12 | #define DRV_STRING "Marvell OcteonTX2 NIC Virtual Function Driver" | |
13 | ||
14 | static const struct pci_device_id otx2_vf_id_table[] = { | |
15 | { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_RVU_AFVF) }, | |
16 | { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_RVU_VF) }, | |
17 | { } | |
18 | }; | |
19 | ||
20 | MODULE_AUTHOR("Sunil Goutham <sgoutham@marvell.com>"); | |
21 | MODULE_DESCRIPTION(DRV_STRING); | |
22 | MODULE_LICENSE("GPL v2"); | |
23 | MODULE_DEVICE_TABLE(pci, otx2_vf_id_table); | |
24 | ||
25 | /* RVU VF Interrupt Vector Enumeration */ | |
26 | enum { | |
27 | RVU_VF_INT_VEC_MBOX = 0x0, | |
28 | }; | |
29 | ||
30 | static void otx2vf_process_vfaf_mbox_msg(struct otx2_nic *vf, | |
31 | struct mbox_msghdr *msg) | |
32 | { | |
33 | if (msg->id >= MBOX_MSG_MAX) { | |
34 | dev_err(vf->dev, | |
35 | "Mbox msg with unknown ID %d\n", msg->id); | |
36 | return; | |
37 | } | |
38 | ||
39 | if (msg->sig != OTX2_MBOX_RSP_SIG) { | |
40 | dev_err(vf->dev, | |
41 | "Mbox msg with wrong signature %x, ID %d\n", | |
42 | msg->sig, msg->id); | |
43 | return; | |
44 | } | |
45 | ||
46 | if (msg->rc == MBOX_MSG_INVALID) { | |
47 | dev_err(vf->dev, | |
48 | "PF/AF says the sent msg(s) %d were invalid\n", | |
49 | msg->id); | |
50 | return; | |
51 | } | |
52 | ||
53 | switch (msg->id) { | |
54 | case MBOX_MSG_READY: | |
55 | vf->pcifunc = msg->pcifunc; | |
56 | break; | |
57 | case MBOX_MSG_MSIX_OFFSET: | |
58 | mbox_handler_msix_offset(vf, (struct msix_offset_rsp *)msg); | |
59 | break; | |
60 | case MBOX_MSG_NPA_LF_ALLOC: | |
61 | mbox_handler_npa_lf_alloc(vf, (struct npa_lf_alloc_rsp *)msg); | |
62 | break; | |
63 | case MBOX_MSG_NIX_LF_ALLOC: | |
64 | mbox_handler_nix_lf_alloc(vf, (struct nix_lf_alloc_rsp *)msg); | |
65 | break; | |
66 | case MBOX_MSG_NIX_TXSCH_ALLOC: | |
67 | mbox_handler_nix_txsch_alloc(vf, | |
68 | (struct nix_txsch_alloc_rsp *)msg); | |
69 | break; | |
70 | case MBOX_MSG_NIX_BP_ENABLE: | |
71 | mbox_handler_nix_bp_enable(vf, (struct nix_bp_cfg_rsp *)msg); | |
72 | break; | |
73 | default: | |
74 | if (msg->rc) | |
75 | dev_err(vf->dev, | |
76 | "Mbox msg response has err %d, ID %d\n", | |
77 | msg->rc, msg->id); | |
78 | } | |
79 | } | |
80 | ||
81 | static void otx2vf_vfaf_mbox_handler(struct work_struct *work) | |
82 | { | |
83 | struct otx2_mbox_dev *mdev; | |
84 | struct mbox_hdr *rsp_hdr; | |
85 | struct mbox_msghdr *msg; | |
86 | struct otx2_mbox *mbox; | |
87 | struct mbox *af_mbox; | |
88 | int offset, id; | |
89 | ||
90 | af_mbox = container_of(work, struct mbox, mbox_wrk); | |
91 | mbox = &af_mbox->mbox; | |
92 | mdev = &mbox->dev[0]; | |
93 | rsp_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start); | |
94 | if (af_mbox->num_msgs == 0) | |
95 | return; | |
96 | offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN); | |
97 | ||
98 | for (id = 0; id < af_mbox->num_msgs; id++) { | |
99 | msg = (struct mbox_msghdr *)(mdev->mbase + offset); | |
100 | otx2vf_process_vfaf_mbox_msg(af_mbox->pfvf, msg); | |
101 | offset = mbox->rx_start + msg->next_msgoff; | |
102 | mdev->msgs_acked++; | |
103 | } | |
104 | ||
105 | otx2_mbox_reset(mbox, 0); | |
106 | } | |
107 | ||
108 | static int otx2vf_process_mbox_msg_up(struct otx2_nic *vf, | |
109 | struct mbox_msghdr *req) | |
110 | { | |
111 | struct msg_rsp *rsp; | |
112 | int err; | |
113 | ||
114 | /* Check if valid, if not reply with a invalid msg */ | |
115 | if (req->sig != OTX2_MBOX_REQ_SIG) { | |
116 | otx2_reply_invalid_msg(&vf->mbox.mbox_up, 0, 0, req->id); | |
117 | return -ENODEV; | |
118 | } | |
119 | ||
120 | switch (req->id) { | |
121 | case MBOX_MSG_CGX_LINK_EVENT: | |
122 | rsp = (struct msg_rsp *)otx2_mbox_alloc_msg( | |
123 | &vf->mbox.mbox_up, 0, | |
124 | sizeof(struct msg_rsp)); | |
125 | if (!rsp) | |
126 | return -ENOMEM; | |
127 | ||
128 | rsp->hdr.id = MBOX_MSG_CGX_LINK_EVENT; | |
129 | rsp->hdr.sig = OTX2_MBOX_RSP_SIG; | |
130 | rsp->hdr.pcifunc = 0; | |
131 | rsp->hdr.rc = 0; | |
132 | err = otx2_mbox_up_handler_cgx_link_event( | |
133 | vf, (struct cgx_link_info_msg *)req, rsp); | |
134 | return err; | |
135 | default: | |
136 | otx2_reply_invalid_msg(&vf->mbox.mbox_up, 0, 0, req->id); | |
137 | return -ENODEV; | |
138 | } | |
139 | return 0; | |
140 | } | |
141 | ||
142 | static void otx2vf_vfaf_mbox_up_handler(struct work_struct *work) | |
143 | { | |
144 | struct otx2_mbox_dev *mdev; | |
145 | struct mbox_hdr *rsp_hdr; | |
146 | struct mbox_msghdr *msg; | |
147 | struct otx2_mbox *mbox; | |
148 | struct mbox *vf_mbox; | |
149 | struct otx2_nic *vf; | |
150 | int offset, id; | |
151 | ||
152 | vf_mbox = container_of(work, struct mbox, mbox_up_wrk); | |
153 | vf = vf_mbox->pfvf; | |
154 | mbox = &vf_mbox->mbox_up; | |
155 | mdev = &mbox->dev[0]; | |
156 | ||
157 | rsp_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start); | |
158 | if (vf_mbox->up_num_msgs == 0) | |
159 | return; | |
160 | ||
161 | offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN); | |
162 | ||
163 | for (id = 0; id < vf_mbox->up_num_msgs; id++) { | |
164 | msg = (struct mbox_msghdr *)(mdev->mbase + offset); | |
165 | otx2vf_process_mbox_msg_up(vf, msg); | |
166 | offset = mbox->rx_start + msg->next_msgoff; | |
167 | } | |
168 | ||
169 | otx2_mbox_msg_send(mbox, 0); | |
170 | } | |
171 | ||
172 | static irqreturn_t otx2vf_vfaf_mbox_intr_handler(int irq, void *vf_irq) | |
173 | { | |
174 | struct otx2_nic *vf = (struct otx2_nic *)vf_irq; | |
175 | struct otx2_mbox_dev *mdev; | |
176 | struct otx2_mbox *mbox; | |
177 | struct mbox_hdr *hdr; | |
178 | ||
179 | /* Clear the IRQ */ | |
180 | otx2_write64(vf, RVU_VF_INT, BIT_ULL(0)); | |
181 | ||
182 | /* Read latest mbox data */ | |
183 | smp_rmb(); | |
184 | ||
185 | /* Check for PF => VF response messages */ | |
186 | mbox = &vf->mbox.mbox; | |
187 | mdev = &mbox->dev[0]; | |
188 | otx2_sync_mbox_bbuf(mbox, 0); | |
189 | ||
190 | hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start); | |
191 | if (hdr->num_msgs) { | |
192 | vf->mbox.num_msgs = hdr->num_msgs; | |
193 | hdr->num_msgs = 0; | |
194 | memset(mbox->hwbase + mbox->rx_start, 0, | |
195 | ALIGN(sizeof(struct mbox_hdr), sizeof(u64))); | |
196 | queue_work(vf->mbox_wq, &vf->mbox.mbox_wrk); | |
197 | } | |
198 | /* Check for PF => VF notification messages */ | |
199 | mbox = &vf->mbox.mbox_up; | |
200 | mdev = &mbox->dev[0]; | |
201 | otx2_sync_mbox_bbuf(mbox, 0); | |
202 | ||
203 | hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start); | |
204 | if (hdr->num_msgs) { | |
205 | vf->mbox.up_num_msgs = hdr->num_msgs; | |
206 | hdr->num_msgs = 0; | |
207 | memset(mbox->hwbase + mbox->rx_start, 0, | |
208 | ALIGN(sizeof(struct mbox_hdr), sizeof(u64))); | |
209 | queue_work(vf->mbox_wq, &vf->mbox.mbox_up_wrk); | |
210 | } | |
211 | ||
212 | return IRQ_HANDLED; | |
213 | } | |
214 | ||
215 | static void otx2vf_disable_mbox_intr(struct otx2_nic *vf) | |
216 | { | |
217 | int vector = pci_irq_vector(vf->pdev, RVU_VF_INT_VEC_MBOX); | |
218 | ||
219 | /* Disable VF => PF mailbox IRQ */ | |
220 | otx2_write64(vf, RVU_VF_INT_ENA_W1C, BIT_ULL(0)); | |
221 | free_irq(vector, vf); | |
222 | } | |
223 | ||
224 | static int otx2vf_register_mbox_intr(struct otx2_nic *vf, bool probe_pf) | |
225 | { | |
226 | struct otx2_hw *hw = &vf->hw; | |
227 | struct msg_req *req; | |
228 | char *irq_name; | |
229 | int err; | |
230 | ||
231 | /* Register mailbox interrupt handler */ | |
232 | irq_name = &hw->irq_name[RVU_VF_INT_VEC_MBOX * NAME_SIZE]; | |
233 | snprintf(irq_name, NAME_SIZE, "RVUVFAF Mbox"); | |
234 | err = request_irq(pci_irq_vector(vf->pdev, RVU_VF_INT_VEC_MBOX), | |
235 | otx2vf_vfaf_mbox_intr_handler, 0, irq_name, vf); | |
236 | if (err) { | |
237 | dev_err(vf->dev, | |
238 | "RVUPF: IRQ registration failed for VFAF mbox irq\n"); | |
239 | return err; | |
240 | } | |
241 | ||
242 | /* Enable mailbox interrupt for msgs coming from PF. | |
243 | * First clear to avoid spurious interrupts, if any. | |
244 | */ | |
245 | otx2_write64(vf, RVU_VF_INT, BIT_ULL(0)); | |
246 | otx2_write64(vf, RVU_VF_INT_ENA_W1S, BIT_ULL(0)); | |
247 | ||
248 | if (!probe_pf) | |
249 | return 0; | |
250 | ||
251 | /* Check mailbox communication with PF */ | |
252 | req = otx2_mbox_alloc_msg_ready(&vf->mbox); | |
253 | if (!req) { | |
254 | otx2vf_disable_mbox_intr(vf); | |
255 | return -ENOMEM; | |
256 | } | |
257 | ||
258 | err = otx2_sync_mbox_msg(&vf->mbox); | |
259 | if (err) { | |
260 | dev_warn(vf->dev, | |
261 | "AF not responding to mailbox, deferring probe\n"); | |
262 | otx2vf_disable_mbox_intr(vf); | |
263 | return -EPROBE_DEFER; | |
264 | } | |
265 | return 0; | |
266 | } | |
267 | ||
268 | static void otx2vf_vfaf_mbox_destroy(struct otx2_nic *vf) | |
269 | { | |
270 | struct mbox *mbox = &vf->mbox; | |
271 | ||
272 | if (vf->mbox_wq) { | |
273 | flush_workqueue(vf->mbox_wq); | |
274 | destroy_workqueue(vf->mbox_wq); | |
275 | vf->mbox_wq = NULL; | |
276 | } | |
277 | ||
278 | if (mbox->mbox.hwbase) | |
279 | iounmap((void __iomem *)mbox->mbox.hwbase); | |
280 | ||
281 | otx2_mbox_destroy(&mbox->mbox); | |
282 | otx2_mbox_destroy(&mbox->mbox_up); | |
283 | } | |
284 | ||
285 | static int otx2vf_vfaf_mbox_init(struct otx2_nic *vf) | |
286 | { | |
287 | struct mbox *mbox = &vf->mbox; | |
288 | void __iomem *hwbase; | |
289 | int err; | |
290 | ||
291 | mbox->pfvf = vf; | |
292 | vf->mbox_wq = alloc_workqueue("otx2_vfaf_mailbox", | |
293 | WQ_UNBOUND | WQ_HIGHPRI | | |
294 | WQ_MEM_RECLAIM, 1); | |
295 | if (!vf->mbox_wq) | |
296 | return -ENOMEM; | |
297 | ||
298 | /* Mailbox is a reserved memory (in RAM) region shared between | |
299 | * admin function (i.e PF0) and this VF, shouldn't be mapped as | |
300 | * device memory to allow unaligned accesses. | |
301 | */ | |
302 | hwbase = ioremap_wc(pci_resource_start(vf->pdev, PCI_MBOX_BAR_NUM), | |
303 | pci_resource_len(vf->pdev, PCI_MBOX_BAR_NUM)); | |
304 | if (!hwbase) { | |
305 | dev_err(vf->dev, "Unable to map VFAF mailbox region\n"); | |
306 | err = -ENOMEM; | |
307 | goto exit; | |
308 | } | |
309 | ||
310 | err = otx2_mbox_init(&mbox->mbox, hwbase, vf->pdev, vf->reg_base, | |
311 | MBOX_DIR_VFPF, 1); | |
312 | if (err) | |
313 | goto exit; | |
314 | ||
315 | err = otx2_mbox_init(&mbox->mbox_up, hwbase, vf->pdev, vf->reg_base, | |
316 | MBOX_DIR_VFPF_UP, 1); | |
317 | if (err) | |
318 | goto exit; | |
319 | ||
320 | err = otx2_mbox_bbuf_init(mbox, vf->pdev); | |
321 | if (err) | |
322 | goto exit; | |
323 | ||
324 | INIT_WORK(&mbox->mbox_wrk, otx2vf_vfaf_mbox_handler); | |
325 | INIT_WORK(&mbox->mbox_up_wrk, otx2vf_vfaf_mbox_up_handler); | |
4c3212f5 | 326 | mutex_init(&mbox->lock); |
3184fb5b TD |
327 | |
328 | return 0; | |
329 | exit: | |
330 | destroy_workqueue(vf->mbox_wq); | |
331 | return err; | |
332 | } | |
333 | ||
334 | static int otx2vf_open(struct net_device *netdev) | |
335 | { | |
336 | struct otx2_nic *vf; | |
337 | int err; | |
338 | ||
339 | err = otx2_open(netdev); | |
340 | if (err) | |
341 | return err; | |
342 | ||
343 | /* LBKs do not receive link events so tell everyone we are up here */ | |
344 | vf = netdev_priv(netdev); | |
345 | if (is_otx2_lbkvf(vf->pdev)) { | |
346 | pr_info("%s NIC Link is UP\n", netdev->name); | |
347 | netif_carrier_on(netdev); | |
348 | netif_tx_start_all_queues(netdev); | |
349 | } | |
350 | ||
351 | return 0; | |
352 | } | |
353 | ||
354 | static int otx2vf_stop(struct net_device *netdev) | |
355 | { | |
356 | return otx2_stop(netdev); | |
357 | } | |
358 | ||
359 | static netdev_tx_t otx2vf_xmit(struct sk_buff *skb, struct net_device *netdev) | |
360 | { | |
361 | struct otx2_nic *vf = netdev_priv(netdev); | |
362 | int qidx = skb_get_queue_mapping(skb); | |
363 | struct otx2_snd_queue *sq; | |
364 | struct netdev_queue *txq; | |
365 | ||
366 | sq = &vf->qset.sq[qidx]; | |
367 | txq = netdev_get_tx_queue(netdev, qidx); | |
368 | ||
369 | if (!otx2_sq_append_skb(netdev, sq, skb, qidx)) { | |
370 | netif_tx_stop_queue(txq); | |
371 | ||
372 | /* Check again, incase SQBs got freed up */ | |
373 | smp_mb(); | |
374 | if (((sq->num_sqbs - *sq->aura_fc_addr) * sq->sqe_per_sqb) | |
375 | > sq->sqe_thresh) | |
376 | netif_tx_wake_queue(txq); | |
377 | ||
378 | return NETDEV_TX_BUSY; | |
379 | } | |
380 | ||
381 | return NETDEV_TX_OK; | |
382 | } | |
383 | ||
384 | static int otx2vf_change_mtu(struct net_device *netdev, int new_mtu) | |
385 | { | |
386 | bool if_up = netif_running(netdev); | |
387 | int err = 0; | |
388 | ||
389 | if (if_up) | |
390 | otx2vf_stop(netdev); | |
391 | ||
392 | netdev_info(netdev, "Changing MTU from %d to %d\n", | |
393 | netdev->mtu, new_mtu); | |
394 | netdev->mtu = new_mtu; | |
395 | ||
396 | if (if_up) | |
397 | err = otx2vf_open(netdev); | |
398 | ||
399 | return err; | |
400 | } | |
401 | ||
402 | static void otx2vf_reset_task(struct work_struct *work) | |
403 | { | |
404 | struct otx2_nic *vf = container_of(work, struct otx2_nic, reset_task); | |
405 | ||
406 | rtnl_lock(); | |
407 | ||
408 | if (netif_running(vf->netdev)) { | |
409 | otx2vf_stop(vf->netdev); | |
410 | vf->reset_count++; | |
411 | otx2vf_open(vf->netdev); | |
412 | } | |
413 | ||
414 | rtnl_unlock(); | |
415 | } | |
416 | ||
417 | static const struct net_device_ops otx2vf_netdev_ops = { | |
418 | .ndo_open = otx2vf_open, | |
419 | .ndo_stop = otx2vf_stop, | |
420 | .ndo_start_xmit = otx2vf_xmit, | |
421 | .ndo_set_mac_address = otx2_set_mac_address, | |
422 | .ndo_change_mtu = otx2vf_change_mtu, | |
423 | .ndo_get_stats64 = otx2_get_stats64, | |
424 | .ndo_tx_timeout = otx2_tx_timeout, | |
425 | }; | |
426 | ||
427 | static int otx2vf_realloc_msix_vectors(struct otx2_nic *vf) | |
428 | { | |
429 | struct otx2_hw *hw = &vf->hw; | |
430 | int num_vec, err; | |
431 | ||
432 | num_vec = hw->nix_msixoff; | |
433 | num_vec += NIX_LF_CINT_VEC_START + hw->max_queues; | |
434 | ||
435 | otx2vf_disable_mbox_intr(vf); | |
436 | pci_free_irq_vectors(hw->pdev); | |
437 | err = pci_alloc_irq_vectors(hw->pdev, num_vec, num_vec, PCI_IRQ_MSIX); | |
438 | if (err < 0) { | |
439 | dev_err(vf->dev, "%s: Failed to realloc %d IRQ vectors\n", | |
440 | __func__, num_vec); | |
441 | return err; | |
442 | } | |
443 | ||
444 | return otx2vf_register_mbox_intr(vf, false); | |
445 | } | |
446 | ||
447 | static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id) | |
448 | { | |
449 | int num_vec = pci_msix_vec_count(pdev); | |
450 | struct device *dev = &pdev->dev; | |
451 | struct net_device *netdev; | |
452 | struct otx2_nic *vf; | |
453 | struct otx2_hw *hw; | |
454 | int err, qcount; | |
455 | ||
456 | err = pcim_enable_device(pdev); | |
457 | if (err) { | |
458 | dev_err(dev, "Failed to enable PCI device\n"); | |
459 | return err; | |
460 | } | |
461 | ||
462 | err = pci_request_regions(pdev, DRV_NAME); | |
463 | if (err) { | |
464 | dev_err(dev, "PCI request regions failed 0x%x\n", err); | |
465 | return err; | |
466 | } | |
467 | ||
468 | err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48)); | |
469 | if (err) { | |
470 | dev_err(dev, "DMA mask config failed, abort\n"); | |
471 | goto err_release_regions; | |
472 | } | |
473 | ||
474 | pci_set_master(pdev); | |
475 | ||
476 | qcount = num_online_cpus(); | |
477 | netdev = alloc_etherdev_mqs(sizeof(*vf), qcount, qcount); | |
478 | if (!netdev) { | |
479 | err = -ENOMEM; | |
480 | goto err_release_regions; | |
481 | } | |
482 | ||
483 | pci_set_drvdata(pdev, netdev); | |
484 | SET_NETDEV_DEV(netdev, &pdev->dev); | |
485 | vf = netdev_priv(netdev); | |
486 | vf->netdev = netdev; | |
487 | vf->pdev = pdev; | |
488 | vf->dev = dev; | |
489 | vf->iommu_domain = iommu_get_domain_for_dev(dev); | |
490 | ||
491 | vf->flags |= OTX2_FLAG_INTF_DOWN; | |
492 | hw = &vf->hw; | |
493 | hw->pdev = vf->pdev; | |
494 | hw->rx_queues = qcount; | |
495 | hw->tx_queues = qcount; | |
496 | hw->max_queues = qcount; | |
497 | ||
498 | hw->irq_name = devm_kmalloc_array(&hw->pdev->dev, num_vec, NAME_SIZE, | |
499 | GFP_KERNEL); | |
9302bead WY |
500 | if (!hw->irq_name) { |
501 | err = -ENOMEM; | |
3184fb5b | 502 | goto err_free_netdev; |
9302bead | 503 | } |
3184fb5b TD |
504 | |
505 | hw->affinity_mask = devm_kcalloc(&hw->pdev->dev, num_vec, | |
506 | sizeof(cpumask_var_t), GFP_KERNEL); | |
9302bead WY |
507 | if (!hw->affinity_mask) { |
508 | err = -ENOMEM; | |
3184fb5b | 509 | goto err_free_netdev; |
9302bead | 510 | } |
3184fb5b TD |
511 | |
512 | err = pci_alloc_irq_vectors(hw->pdev, num_vec, num_vec, PCI_IRQ_MSIX); | |
513 | if (err < 0) { | |
514 | dev_err(dev, "%s: Failed to alloc %d IRQ vectors\n", | |
515 | __func__, num_vec); | |
516 | goto err_free_netdev; | |
517 | } | |
518 | ||
519 | vf->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0); | |
520 | if (!vf->reg_base) { | |
521 | dev_err(dev, "Unable to map physical function CSRs, aborting\n"); | |
522 | err = -ENOMEM; | |
523 | goto err_free_irq_vectors; | |
524 | } | |
525 | ||
526 | /* Init VF <=> PF mailbox stuff */ | |
527 | err = otx2vf_vfaf_mbox_init(vf); | |
528 | if (err) | |
529 | goto err_free_irq_vectors; | |
530 | ||
531 | /* Register mailbox interrupt */ | |
532 | err = otx2vf_register_mbox_intr(vf, true); | |
533 | if (err) | |
534 | goto err_mbox_destroy; | |
535 | ||
536 | /* Request AF to attach NPA and LIX LFs to this AF */ | |
537 | err = otx2_attach_npa_nix(vf); | |
538 | if (err) | |
539 | goto err_disable_mbox_intr; | |
540 | ||
541 | err = otx2vf_realloc_msix_vectors(vf); | |
542 | if (err) | |
543 | goto err_mbox_destroy; | |
544 | ||
545 | err = otx2_set_real_num_queues(netdev, qcount, qcount); | |
546 | if (err) | |
547 | goto err_detach_rsrc; | |
548 | ||
549 | otx2_setup_dev_hw_settings(vf); | |
550 | ||
551 | /* Assign default mac address */ | |
552 | otx2_get_mac_from_af(netdev); | |
553 | ||
554 | netdev->hw_features = NETIF_F_RXCSUM | NETIF_F_IP_CSUM | | |
555 | NETIF_F_IPV6_CSUM | NETIF_F_RXHASH | | |
556 | NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6; | |
557 | netdev->features = netdev->hw_features; | |
558 | ||
559 | netdev->gso_max_segs = OTX2_MAX_GSO_SEGS; | |
560 | netdev->watchdog_timeo = OTX2_TX_TIMEOUT; | |
561 | ||
562 | netdev->netdev_ops = &otx2vf_netdev_ops; | |
563 | ||
564 | /* MTU range: 68 - 9190 */ | |
565 | netdev->min_mtu = OTX2_MIN_MTU; | |
566 | netdev->max_mtu = OTX2_MAX_MTU; | |
567 | ||
568 | INIT_WORK(&vf->reset_task, otx2vf_reset_task); | |
569 | ||
570 | /* To distinguish, for LBK VFs set netdev name explicitly */ | |
571 | if (is_otx2_lbkvf(vf->pdev)) { | |
572 | int n; | |
573 | ||
574 | n = (vf->pcifunc >> RVU_PFVF_FUNC_SHIFT) & RVU_PFVF_FUNC_MASK; | |
575 | /* Need to subtract 1 to get proper VF number */ | |
576 | n -= 1; | |
577 | snprintf(netdev->name, sizeof(netdev->name), "lbk%d", n); | |
578 | } | |
579 | ||
580 | err = register_netdev(netdev); | |
581 | if (err) { | |
582 | dev_err(dev, "Failed to register netdevice\n"); | |
583 | goto err_detach_rsrc; | |
584 | } | |
585 | ||
05c22b54 TD |
586 | otx2vf_set_ethtool_ops(netdev); |
587 | ||
3184fb5b TD |
588 | /* Enable pause frames by default */ |
589 | vf->flags |= OTX2_FLAG_RX_PAUSE_ENABLED; | |
590 | vf->flags |= OTX2_FLAG_TX_PAUSE_ENABLED; | |
591 | ||
592 | return 0; | |
593 | ||
594 | err_detach_rsrc: | |
595 | otx2_detach_resources(&vf->mbox); | |
596 | err_disable_mbox_intr: | |
597 | otx2vf_disable_mbox_intr(vf); | |
598 | err_mbox_destroy: | |
599 | otx2vf_vfaf_mbox_destroy(vf); | |
600 | err_free_irq_vectors: | |
601 | pci_free_irq_vectors(hw->pdev); | |
602 | err_free_netdev: | |
603 | pci_set_drvdata(pdev, NULL); | |
604 | free_netdev(netdev); | |
605 | err_release_regions: | |
606 | pci_release_regions(pdev); | |
607 | return err; | |
608 | } | |
609 | ||
610 | static void otx2vf_remove(struct pci_dev *pdev) | |
611 | { | |
612 | struct net_device *netdev = pci_get_drvdata(pdev); | |
613 | struct otx2_nic *vf; | |
614 | ||
615 | if (!netdev) | |
616 | return; | |
617 | ||
618 | vf = netdev_priv(netdev); | |
619 | ||
620 | otx2vf_disable_mbox_intr(vf); | |
621 | ||
622 | otx2_detach_resources(&vf->mbox); | |
623 | otx2vf_vfaf_mbox_destroy(vf); | |
624 | pci_free_irq_vectors(vf->pdev); | |
625 | pci_set_drvdata(pdev, NULL); | |
626 | free_netdev(netdev); | |
627 | ||
628 | pci_release_regions(pdev); | |
629 | } | |
630 | ||
631 | static struct pci_driver otx2vf_driver = { | |
632 | .name = DRV_NAME, | |
633 | .id_table = otx2_vf_id_table, | |
634 | .probe = otx2vf_probe, | |
635 | .remove = otx2vf_remove, | |
636 | .shutdown = otx2vf_remove, | |
637 | }; | |
638 | ||
639 | static int __init otx2vf_init_module(void) | |
640 | { | |
641 | pr_info("%s: %s\n", DRV_NAME, DRV_STRING); | |
642 | ||
643 | return pci_register_driver(&otx2vf_driver); | |
644 | } | |
645 | ||
646 | static void __exit otx2vf_cleanup_module(void) | |
647 | { | |
648 | pci_unregister_driver(&otx2vf_driver); | |
649 | } | |
650 | ||
651 | module_init(otx2vf_init_module); | |
652 | module_exit(otx2vf_cleanup_module); |