]> git.ipfire.org Git - thirdparty/linux.git/blob - drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
io_uring: reset -EBUSY error when io sq thread is waken up
[thirdparty/linux.git] / drivers / net / ethernet / broadcom / bnxt / bnxt_sriov.c
1 /* Broadcom NetXtreme-C/E network driver.
2 *
3 * Copyright (c) 2014-2016 Broadcom Corporation
4 * Copyright (c) 2016-2018 Broadcom Limited
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation.
9 */
10
11 #include <linux/module.h>
12 #include <linux/pci.h>
13 #include <linux/netdevice.h>
14 #include <linux/if_vlan.h>
15 #include <linux/interrupt.h>
16 #include <linux/etherdevice.h>
17 #include "bnxt_hsi.h"
18 #include "bnxt.h"
19 #include "bnxt_ulp.h"
20 #include "bnxt_sriov.h"
21 #include "bnxt_vfr.h"
22 #include "bnxt_ethtool.h"
23
24 #ifdef CONFIG_BNXT_SRIOV
25 static int bnxt_hwrm_fwd_async_event_cmpl(struct bnxt *bp,
26 struct bnxt_vf_info *vf, u16 event_id)
27 {
28 struct hwrm_fwd_async_event_cmpl_input req = {0};
29 struct hwrm_async_event_cmpl *async_cmpl;
30 int rc = 0;
31
32 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FWD_ASYNC_EVENT_CMPL, -1, -1);
33 if (vf)
34 req.encap_async_event_target_id = cpu_to_le16(vf->fw_fid);
35 else
36 /* broadcast this async event to all VFs */
37 req.encap_async_event_target_id = cpu_to_le16(0xffff);
38 async_cmpl = (struct hwrm_async_event_cmpl *)req.encap_async_event_cmpl;
39 async_cmpl->type = cpu_to_le16(ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT);
40 async_cmpl->event_id = cpu_to_le16(event_id);
41
42 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
43 if (rc)
44 netdev_err(bp->dev, "hwrm_fwd_async_event_cmpl failed. rc:%d\n",
45 rc);
46 return rc;
47 }
48
49 static int bnxt_vf_ndo_prep(struct bnxt *bp, int vf_id)
50 {
51 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
52 netdev_err(bp->dev, "vf ndo called though PF is down\n");
53 return -EINVAL;
54 }
55 if (!bp->pf.active_vfs) {
56 netdev_err(bp->dev, "vf ndo called though sriov is disabled\n");
57 return -EINVAL;
58 }
59 if (vf_id >= bp->pf.active_vfs) {
60 netdev_err(bp->dev, "Invalid VF id %d\n", vf_id);
61 return -EINVAL;
62 }
63 return 0;
64 }
65
66 int bnxt_set_vf_spoofchk(struct net_device *dev, int vf_id, bool setting)
67 {
68 struct hwrm_func_cfg_input req = {0};
69 struct bnxt *bp = netdev_priv(dev);
70 struct bnxt_vf_info *vf;
71 bool old_setting = false;
72 u32 func_flags;
73 int rc;
74
75 if (bp->hwrm_spec_code < 0x10701)
76 return -ENOTSUPP;
77
78 rc = bnxt_vf_ndo_prep(bp, vf_id);
79 if (rc)
80 return rc;
81
82 vf = &bp->pf.vf[vf_id];
83 if (vf->flags & BNXT_VF_SPOOFCHK)
84 old_setting = true;
85 if (old_setting == setting)
86 return 0;
87
88 func_flags = vf->func_flags;
89 if (setting)
90 func_flags |= FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_ENABLE;
91 else
92 func_flags |= FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_DISABLE;
93 /*TODO: if the driver supports VLAN filter on guest VLAN,
94 * the spoof check should also include vlan anti-spoofing
95 */
96 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
97 req.fid = cpu_to_le16(vf->fw_fid);
98 req.flags = cpu_to_le32(func_flags);
99 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
100 if (!rc) {
101 vf->func_flags = func_flags;
102 if (setting)
103 vf->flags |= BNXT_VF_SPOOFCHK;
104 else
105 vf->flags &= ~BNXT_VF_SPOOFCHK;
106 }
107 return rc;
108 }
109
110 static int bnxt_hwrm_func_qcfg_flags(struct bnxt *bp, struct bnxt_vf_info *vf)
111 {
112 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
113 struct hwrm_func_qcfg_input req = {0};
114 int rc;
115
116 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1);
117 req.fid = cpu_to_le16(vf->fw_fid);
118 mutex_lock(&bp->hwrm_cmd_lock);
119 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
120 if (rc) {
121 mutex_unlock(&bp->hwrm_cmd_lock);
122 return rc;
123 }
124 vf->func_qcfg_flags = le16_to_cpu(resp->flags);
125 mutex_unlock(&bp->hwrm_cmd_lock);
126 return 0;
127 }
128
129 static bool bnxt_is_trusted_vf(struct bnxt *bp, struct bnxt_vf_info *vf)
130 {
131 if (!(bp->fw_cap & BNXT_FW_CAP_TRUSTED_VF))
132 return !!(vf->flags & BNXT_VF_TRUST);
133
134 bnxt_hwrm_func_qcfg_flags(bp, vf);
135 return !!(vf->func_qcfg_flags & FUNC_QCFG_RESP_FLAGS_TRUSTED_VF);
136 }
137
138 static int bnxt_hwrm_set_trusted_vf(struct bnxt *bp, struct bnxt_vf_info *vf)
139 {
140 struct hwrm_func_cfg_input req = {0};
141
142 if (!(bp->fw_cap & BNXT_FW_CAP_TRUSTED_VF))
143 return 0;
144
145 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
146 req.fid = cpu_to_le16(vf->fw_fid);
147 if (vf->flags & BNXT_VF_TRUST)
148 req.flags = cpu_to_le32(FUNC_CFG_REQ_FLAGS_TRUSTED_VF_ENABLE);
149 else
150 req.flags = cpu_to_le32(FUNC_CFG_REQ_FLAGS_TRUSTED_VF_DISABLE);
151 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
152 }
153
154 int bnxt_set_vf_trust(struct net_device *dev, int vf_id, bool trusted)
155 {
156 struct bnxt *bp = netdev_priv(dev);
157 struct bnxt_vf_info *vf;
158
159 if (bnxt_vf_ndo_prep(bp, vf_id))
160 return -EINVAL;
161
162 vf = &bp->pf.vf[vf_id];
163 if (trusted)
164 vf->flags |= BNXT_VF_TRUST;
165 else
166 vf->flags &= ~BNXT_VF_TRUST;
167
168 bnxt_hwrm_set_trusted_vf(bp, vf);
169 return 0;
170 }
171
172 int bnxt_get_vf_config(struct net_device *dev, int vf_id,
173 struct ifla_vf_info *ivi)
174 {
175 struct bnxt *bp = netdev_priv(dev);
176 struct bnxt_vf_info *vf;
177 int rc;
178
179 rc = bnxt_vf_ndo_prep(bp, vf_id);
180 if (rc)
181 return rc;
182
183 ivi->vf = vf_id;
184 vf = &bp->pf.vf[vf_id];
185
186 if (is_valid_ether_addr(vf->mac_addr))
187 memcpy(&ivi->mac, vf->mac_addr, ETH_ALEN);
188 else
189 memcpy(&ivi->mac, vf->vf_mac_addr, ETH_ALEN);
190 ivi->max_tx_rate = vf->max_tx_rate;
191 ivi->min_tx_rate = vf->min_tx_rate;
192 ivi->vlan = vf->vlan;
193 if (vf->flags & BNXT_VF_QOS)
194 ivi->qos = vf->vlan >> VLAN_PRIO_SHIFT;
195 else
196 ivi->qos = 0;
197 ivi->spoofchk = !!(vf->flags & BNXT_VF_SPOOFCHK);
198 ivi->trusted = bnxt_is_trusted_vf(bp, vf);
199 if (!(vf->flags & BNXT_VF_LINK_FORCED))
200 ivi->linkstate = IFLA_VF_LINK_STATE_AUTO;
201 else if (vf->flags & BNXT_VF_LINK_UP)
202 ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE;
203 else
204 ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE;
205
206 return 0;
207 }
208
209 int bnxt_set_vf_mac(struct net_device *dev, int vf_id, u8 *mac)
210 {
211 struct hwrm_func_cfg_input req = {0};
212 struct bnxt *bp = netdev_priv(dev);
213 struct bnxt_vf_info *vf;
214 int rc;
215
216 rc = bnxt_vf_ndo_prep(bp, vf_id);
217 if (rc)
218 return rc;
219 /* reject bc or mc mac addr, zero mac addr means allow
220 * VF to use its own mac addr
221 */
222 if (is_multicast_ether_addr(mac)) {
223 netdev_err(dev, "Invalid VF ethernet address\n");
224 return -EINVAL;
225 }
226 vf = &bp->pf.vf[vf_id];
227
228 memcpy(vf->mac_addr, mac, ETH_ALEN);
229 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
230 req.fid = cpu_to_le16(vf->fw_fid);
231 req.flags = cpu_to_le32(vf->func_flags);
232 req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR);
233 memcpy(req.dflt_mac_addr, mac, ETH_ALEN);
234 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
235 }
236
237 int bnxt_set_vf_vlan(struct net_device *dev, int vf_id, u16 vlan_id, u8 qos,
238 __be16 vlan_proto)
239 {
240 struct hwrm_func_cfg_input req = {0};
241 struct bnxt *bp = netdev_priv(dev);
242 struct bnxt_vf_info *vf;
243 u16 vlan_tag;
244 int rc;
245
246 if (bp->hwrm_spec_code < 0x10201)
247 return -ENOTSUPP;
248
249 if (vlan_proto != htons(ETH_P_8021Q))
250 return -EPROTONOSUPPORT;
251
252 rc = bnxt_vf_ndo_prep(bp, vf_id);
253 if (rc)
254 return rc;
255
256 /* TODO: needed to implement proper handling of user priority,
257 * currently fail the command if there is valid priority
258 */
259 if (vlan_id > 4095 || qos)
260 return -EINVAL;
261
262 vf = &bp->pf.vf[vf_id];
263 vlan_tag = vlan_id;
264 if (vlan_tag == vf->vlan)
265 return 0;
266
267 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
268 req.fid = cpu_to_le16(vf->fw_fid);
269 req.flags = cpu_to_le32(vf->func_flags);
270 req.dflt_vlan = cpu_to_le16(vlan_tag);
271 req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_VLAN);
272 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
273 if (!rc)
274 vf->vlan = vlan_tag;
275 return rc;
276 }
277
278 int bnxt_set_vf_bw(struct net_device *dev, int vf_id, int min_tx_rate,
279 int max_tx_rate)
280 {
281 struct hwrm_func_cfg_input req = {0};
282 struct bnxt *bp = netdev_priv(dev);
283 struct bnxt_vf_info *vf;
284 u32 pf_link_speed;
285 int rc;
286
287 rc = bnxt_vf_ndo_prep(bp, vf_id);
288 if (rc)
289 return rc;
290
291 vf = &bp->pf.vf[vf_id];
292 pf_link_speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed);
293 if (max_tx_rate > pf_link_speed) {
294 netdev_info(bp->dev, "max tx rate %d exceed PF link speed for VF %d\n",
295 max_tx_rate, vf_id);
296 return -EINVAL;
297 }
298
299 if (min_tx_rate > pf_link_speed || min_tx_rate > max_tx_rate) {
300 netdev_info(bp->dev, "min tx rate %d is invalid for VF %d\n",
301 min_tx_rate, vf_id);
302 return -EINVAL;
303 }
304 if (min_tx_rate == vf->min_tx_rate && max_tx_rate == vf->max_tx_rate)
305 return 0;
306 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
307 req.fid = cpu_to_le16(vf->fw_fid);
308 req.flags = cpu_to_le32(vf->func_flags);
309 req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_MAX_BW);
310 req.max_bw = cpu_to_le32(max_tx_rate);
311 req.enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_MIN_BW);
312 req.min_bw = cpu_to_le32(min_tx_rate);
313 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
314 if (!rc) {
315 vf->min_tx_rate = min_tx_rate;
316 vf->max_tx_rate = max_tx_rate;
317 }
318 return rc;
319 }
320
321 int bnxt_set_vf_link_state(struct net_device *dev, int vf_id, int link)
322 {
323 struct bnxt *bp = netdev_priv(dev);
324 struct bnxt_vf_info *vf;
325 int rc;
326
327 rc = bnxt_vf_ndo_prep(bp, vf_id);
328 if (rc)
329 return rc;
330
331 vf = &bp->pf.vf[vf_id];
332
333 vf->flags &= ~(BNXT_VF_LINK_UP | BNXT_VF_LINK_FORCED);
334 switch (link) {
335 case IFLA_VF_LINK_STATE_AUTO:
336 vf->flags |= BNXT_VF_LINK_UP;
337 break;
338 case IFLA_VF_LINK_STATE_DISABLE:
339 vf->flags |= BNXT_VF_LINK_FORCED;
340 break;
341 case IFLA_VF_LINK_STATE_ENABLE:
342 vf->flags |= BNXT_VF_LINK_UP | BNXT_VF_LINK_FORCED;
343 break;
344 default:
345 netdev_err(bp->dev, "Invalid link option\n");
346 rc = -EINVAL;
347 break;
348 }
349 if (vf->flags & (BNXT_VF_LINK_UP | BNXT_VF_LINK_FORCED))
350 rc = bnxt_hwrm_fwd_async_event_cmpl(bp, vf,
351 ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE);
352 return rc;
353 }
354
355 static int bnxt_set_vf_attr(struct bnxt *bp, int num_vfs)
356 {
357 int i;
358 struct bnxt_vf_info *vf;
359
360 for (i = 0; i < num_vfs; i++) {
361 vf = &bp->pf.vf[i];
362 memset(vf, 0, sizeof(*vf));
363 }
364 return 0;
365 }
366
367 static int bnxt_hwrm_func_vf_resource_free(struct bnxt *bp, int num_vfs)
368 {
369 int i, rc = 0;
370 struct bnxt_pf_info *pf = &bp->pf;
371 struct hwrm_func_vf_resc_free_input req = {0};
372
373 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_RESC_FREE, -1, -1);
374
375 mutex_lock(&bp->hwrm_cmd_lock);
376 for (i = pf->first_vf_id; i < pf->first_vf_id + num_vfs; i++) {
377 req.vf_id = cpu_to_le16(i);
378 rc = _hwrm_send_message(bp, &req, sizeof(req),
379 HWRM_CMD_TIMEOUT);
380 if (rc)
381 break;
382 }
383 mutex_unlock(&bp->hwrm_cmd_lock);
384 return rc;
385 }
386
387 static void bnxt_free_vf_resources(struct bnxt *bp)
388 {
389 struct pci_dev *pdev = bp->pdev;
390 int i;
391
392 kfree(bp->pf.vf_event_bmap);
393 bp->pf.vf_event_bmap = NULL;
394
395 for (i = 0; i < 4; i++) {
396 if (bp->pf.hwrm_cmd_req_addr[i]) {
397 dma_free_coherent(&pdev->dev, BNXT_PAGE_SIZE,
398 bp->pf.hwrm_cmd_req_addr[i],
399 bp->pf.hwrm_cmd_req_dma_addr[i]);
400 bp->pf.hwrm_cmd_req_addr[i] = NULL;
401 }
402 }
403
404 kfree(bp->pf.vf);
405 bp->pf.vf = NULL;
406 }
407
408 static int bnxt_alloc_vf_resources(struct bnxt *bp, int num_vfs)
409 {
410 struct pci_dev *pdev = bp->pdev;
411 u32 nr_pages, size, i, j, k = 0;
412
413 bp->pf.vf = kcalloc(num_vfs, sizeof(struct bnxt_vf_info), GFP_KERNEL);
414 if (!bp->pf.vf)
415 return -ENOMEM;
416
417 bnxt_set_vf_attr(bp, num_vfs);
418
419 size = num_vfs * BNXT_HWRM_REQ_MAX_SIZE;
420 nr_pages = size / BNXT_PAGE_SIZE;
421 if (size & (BNXT_PAGE_SIZE - 1))
422 nr_pages++;
423
424 for (i = 0; i < nr_pages; i++) {
425 bp->pf.hwrm_cmd_req_addr[i] =
426 dma_alloc_coherent(&pdev->dev, BNXT_PAGE_SIZE,
427 &bp->pf.hwrm_cmd_req_dma_addr[i],
428 GFP_KERNEL);
429
430 if (!bp->pf.hwrm_cmd_req_addr[i])
431 return -ENOMEM;
432
433 for (j = 0; j < BNXT_HWRM_REQS_PER_PAGE && k < num_vfs; j++) {
434 struct bnxt_vf_info *vf = &bp->pf.vf[k];
435
436 vf->hwrm_cmd_req_addr = bp->pf.hwrm_cmd_req_addr[i] +
437 j * BNXT_HWRM_REQ_MAX_SIZE;
438 vf->hwrm_cmd_req_dma_addr =
439 bp->pf.hwrm_cmd_req_dma_addr[i] + j *
440 BNXT_HWRM_REQ_MAX_SIZE;
441 k++;
442 }
443 }
444
445 /* Max 128 VF's */
446 bp->pf.vf_event_bmap = kzalloc(16, GFP_KERNEL);
447 if (!bp->pf.vf_event_bmap)
448 return -ENOMEM;
449
450 bp->pf.hwrm_cmd_req_pages = nr_pages;
451 return 0;
452 }
453
454 static int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp)
455 {
456 struct hwrm_func_buf_rgtr_input req = {0};
457
458 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_BUF_RGTR, -1, -1);
459
460 req.req_buf_num_pages = cpu_to_le16(bp->pf.hwrm_cmd_req_pages);
461 req.req_buf_page_size = cpu_to_le16(BNXT_PAGE_SHIFT);
462 req.req_buf_len = cpu_to_le16(BNXT_HWRM_REQ_MAX_SIZE);
463 req.req_buf_page_addr0 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[0]);
464 req.req_buf_page_addr1 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[1]);
465 req.req_buf_page_addr2 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[2]);
466 req.req_buf_page_addr3 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[3]);
467
468 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
469 }
470
471 /* Caller holds bp->hwrm_cmd_lock mutex lock */
472 static void __bnxt_set_vf_params(struct bnxt *bp, int vf_id)
473 {
474 struct hwrm_func_cfg_input req = {0};
475 struct bnxt_vf_info *vf;
476
477 vf = &bp->pf.vf[vf_id];
478 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
479 req.fid = cpu_to_le16(vf->fw_fid);
480 req.flags = cpu_to_le32(vf->func_flags);
481
482 if (is_valid_ether_addr(vf->mac_addr)) {
483 req.enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR);
484 memcpy(req.dflt_mac_addr, vf->mac_addr, ETH_ALEN);
485 }
486 if (vf->vlan) {
487 req.enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_VLAN);
488 req.dflt_vlan = cpu_to_le16(vf->vlan);
489 }
490 if (vf->max_tx_rate) {
491 req.enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_MAX_BW);
492 req.max_bw = cpu_to_le32(vf->max_tx_rate);
493 #ifdef HAVE_IFLA_TX_RATE
494 req.enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_MIN_BW);
495 req.min_bw = cpu_to_le32(vf->min_tx_rate);
496 #endif
497 }
498 if (vf->flags & BNXT_VF_TRUST)
499 req.flags |= cpu_to_le32(FUNC_CFG_REQ_FLAGS_TRUSTED_VF_ENABLE);
500
501 _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
502 }
503
504 /* Only called by PF to reserve resources for VFs, returns actual number of
505 * VFs configured, or < 0 on error.
506 */
507 static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs, bool reset)
508 {
509 struct hwrm_func_vf_resource_cfg_input req = {0};
510 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
511 u16 vf_tx_rings, vf_rx_rings, vf_cp_rings;
512 u16 vf_stat_ctx, vf_vnics, vf_ring_grps;
513 struct bnxt_pf_info *pf = &bp->pf;
514 int i, rc = 0, min = 1;
515 u16 vf_msix = 0;
516 u16 vf_rss;
517
518 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_RESOURCE_CFG, -1, -1);
519
520 if (bp->flags & BNXT_FLAG_CHIP_P5) {
521 vf_msix = hw_resc->max_nqs - bnxt_nq_rings_in_use(bp);
522 vf_ring_grps = 0;
523 } else {
524 vf_ring_grps = hw_resc->max_hw_ring_grps - bp->rx_nr_rings;
525 }
526 vf_cp_rings = bnxt_get_avail_cp_rings_for_en(bp);
527 vf_stat_ctx = bnxt_get_avail_stat_ctxs_for_en(bp);
528 if (bp->flags & BNXT_FLAG_AGG_RINGS)
529 vf_rx_rings = hw_resc->max_rx_rings - bp->rx_nr_rings * 2;
530 else
531 vf_rx_rings = hw_resc->max_rx_rings - bp->rx_nr_rings;
532 vf_tx_rings = hw_resc->max_tx_rings - bp->tx_nr_rings;
533 vf_vnics = hw_resc->max_vnics - bp->nr_vnics;
534 vf_vnics = min_t(u16, vf_vnics, vf_rx_rings);
535 vf_rss = hw_resc->max_rsscos_ctxs - bp->rsscos_nr_ctxs;
536
537 req.min_rsscos_ctx = cpu_to_le16(BNXT_VF_MIN_RSS_CTX);
538 if (pf->vf_resv_strategy == BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC) {
539 min = 0;
540 req.min_rsscos_ctx = cpu_to_le16(min);
541 }
542 if (pf->vf_resv_strategy == BNXT_VF_RESV_STRATEGY_MINIMAL ||
543 pf->vf_resv_strategy == BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC) {
544 req.min_cmpl_rings = cpu_to_le16(min);
545 req.min_tx_rings = cpu_to_le16(min);
546 req.min_rx_rings = cpu_to_le16(min);
547 req.min_l2_ctxs = cpu_to_le16(min);
548 req.min_vnics = cpu_to_le16(min);
549 req.min_stat_ctx = cpu_to_le16(min);
550 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
551 req.min_hw_ring_grps = cpu_to_le16(min);
552 } else {
553 vf_cp_rings /= num_vfs;
554 vf_tx_rings /= num_vfs;
555 vf_rx_rings /= num_vfs;
556 vf_vnics /= num_vfs;
557 vf_stat_ctx /= num_vfs;
558 vf_ring_grps /= num_vfs;
559 vf_rss /= num_vfs;
560
561 req.min_cmpl_rings = cpu_to_le16(vf_cp_rings);
562 req.min_tx_rings = cpu_to_le16(vf_tx_rings);
563 req.min_rx_rings = cpu_to_le16(vf_rx_rings);
564 req.min_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX);
565 req.min_vnics = cpu_to_le16(vf_vnics);
566 req.min_stat_ctx = cpu_to_le16(vf_stat_ctx);
567 req.min_hw_ring_grps = cpu_to_le16(vf_ring_grps);
568 req.min_rsscos_ctx = cpu_to_le16(vf_rss);
569 }
570 req.max_cmpl_rings = cpu_to_le16(vf_cp_rings);
571 req.max_tx_rings = cpu_to_le16(vf_tx_rings);
572 req.max_rx_rings = cpu_to_le16(vf_rx_rings);
573 req.max_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX);
574 req.max_vnics = cpu_to_le16(vf_vnics);
575 req.max_stat_ctx = cpu_to_le16(vf_stat_ctx);
576 req.max_hw_ring_grps = cpu_to_le16(vf_ring_grps);
577 req.max_rsscos_ctx = cpu_to_le16(vf_rss);
578 if (bp->flags & BNXT_FLAG_CHIP_P5)
579 req.max_msix = cpu_to_le16(vf_msix / num_vfs);
580
581 mutex_lock(&bp->hwrm_cmd_lock);
582 for (i = 0; i < num_vfs; i++) {
583 if (reset)
584 __bnxt_set_vf_params(bp, i);
585
586 req.vf_id = cpu_to_le16(pf->first_vf_id + i);
587 rc = _hwrm_send_message(bp, &req, sizeof(req),
588 HWRM_CMD_TIMEOUT);
589 if (rc)
590 break;
591 pf->active_vfs = i + 1;
592 pf->vf[i].fw_fid = pf->first_vf_id + i;
593 }
594 mutex_unlock(&bp->hwrm_cmd_lock);
595 if (pf->active_vfs) {
596 u16 n = pf->active_vfs;
597
598 hw_resc->max_tx_rings -= le16_to_cpu(req.min_tx_rings) * n;
599 hw_resc->max_rx_rings -= le16_to_cpu(req.min_rx_rings) * n;
600 hw_resc->max_hw_ring_grps -= le16_to_cpu(req.min_hw_ring_grps) *
601 n;
602 hw_resc->max_cp_rings -= le16_to_cpu(req.min_cmpl_rings) * n;
603 hw_resc->max_rsscos_ctxs -= le16_to_cpu(req.min_rsscos_ctx) * n;
604 hw_resc->max_stat_ctxs -= le16_to_cpu(req.min_stat_ctx) * n;
605 hw_resc->max_vnics -= le16_to_cpu(req.min_vnics) * n;
606 if (bp->flags & BNXT_FLAG_CHIP_P5)
607 hw_resc->max_irqs -= vf_msix * n;
608
609 rc = pf->active_vfs;
610 }
611 return rc;
612 }
613
614 /* Only called by PF to reserve resources for VFs, returns actual number of
615 * VFs configured, or < 0 on error.
616 */
617 static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs)
618 {
619 u32 rc = 0, mtu, i;
620 u16 vf_tx_rings, vf_rx_rings, vf_cp_rings, vf_stat_ctx, vf_vnics;
621 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
622 struct hwrm_func_cfg_input req = {0};
623 struct bnxt_pf_info *pf = &bp->pf;
624 int total_vf_tx_rings = 0;
625 u16 vf_ring_grps;
626
627 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
628
629 /* Remaining rings are distributed equally amongs VF's for now */
630 vf_cp_rings = bnxt_get_avail_cp_rings_for_en(bp) / num_vfs;
631 vf_stat_ctx = bnxt_get_avail_stat_ctxs_for_en(bp) / num_vfs;
632 if (bp->flags & BNXT_FLAG_AGG_RINGS)
633 vf_rx_rings = (hw_resc->max_rx_rings - bp->rx_nr_rings * 2) /
634 num_vfs;
635 else
636 vf_rx_rings = (hw_resc->max_rx_rings - bp->rx_nr_rings) /
637 num_vfs;
638 vf_ring_grps = (hw_resc->max_hw_ring_grps - bp->rx_nr_rings) / num_vfs;
639 vf_tx_rings = (hw_resc->max_tx_rings - bp->tx_nr_rings) / num_vfs;
640 vf_vnics = (hw_resc->max_vnics - bp->nr_vnics) / num_vfs;
641 vf_vnics = min_t(u16, vf_vnics, vf_rx_rings);
642
643 req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_MTU |
644 FUNC_CFG_REQ_ENABLES_MRU |
645 FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS |
646 FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS |
647 FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
648 FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS |
649 FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS |
650 FUNC_CFG_REQ_ENABLES_NUM_L2_CTXS |
651 FUNC_CFG_REQ_ENABLES_NUM_VNICS |
652 FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS);
653
654 mtu = bp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
655 req.mru = cpu_to_le16(mtu);
656 req.mtu = cpu_to_le16(mtu);
657
658 req.num_rsscos_ctxs = cpu_to_le16(1);
659 req.num_cmpl_rings = cpu_to_le16(vf_cp_rings);
660 req.num_tx_rings = cpu_to_le16(vf_tx_rings);
661 req.num_rx_rings = cpu_to_le16(vf_rx_rings);
662 req.num_hw_ring_grps = cpu_to_le16(vf_ring_grps);
663 req.num_l2_ctxs = cpu_to_le16(4);
664
665 req.num_vnics = cpu_to_le16(vf_vnics);
666 /* FIXME spec currently uses 1 bit for stats ctx */
667 req.num_stat_ctxs = cpu_to_le16(vf_stat_ctx);
668
669 mutex_lock(&bp->hwrm_cmd_lock);
670 for (i = 0; i < num_vfs; i++) {
671 int vf_tx_rsvd = vf_tx_rings;
672
673 req.fid = cpu_to_le16(pf->first_vf_id + i);
674 rc = _hwrm_send_message(bp, &req, sizeof(req),
675 HWRM_CMD_TIMEOUT);
676 if (rc)
677 break;
678 pf->active_vfs = i + 1;
679 pf->vf[i].fw_fid = le16_to_cpu(req.fid);
680 rc = __bnxt_hwrm_get_tx_rings(bp, pf->vf[i].fw_fid,
681 &vf_tx_rsvd);
682 if (rc)
683 break;
684 total_vf_tx_rings += vf_tx_rsvd;
685 }
686 mutex_unlock(&bp->hwrm_cmd_lock);
687 if (pf->active_vfs) {
688 hw_resc->max_tx_rings -= total_vf_tx_rings;
689 hw_resc->max_rx_rings -= vf_rx_rings * num_vfs;
690 hw_resc->max_hw_ring_grps -= vf_ring_grps * num_vfs;
691 hw_resc->max_cp_rings -= vf_cp_rings * num_vfs;
692 hw_resc->max_rsscos_ctxs -= num_vfs;
693 hw_resc->max_stat_ctxs -= vf_stat_ctx * num_vfs;
694 hw_resc->max_vnics -= vf_vnics * num_vfs;
695 rc = pf->active_vfs;
696 }
697 return rc;
698 }
699
700 static int bnxt_func_cfg(struct bnxt *bp, int num_vfs, bool reset)
701 {
702 if (BNXT_NEW_RM(bp))
703 return bnxt_hwrm_func_vf_resc_cfg(bp, num_vfs, reset);
704 else
705 return bnxt_hwrm_func_cfg(bp, num_vfs);
706 }
707
708 int bnxt_cfg_hw_sriov(struct bnxt *bp, int *num_vfs, bool reset)
709 {
710 int rc;
711
712 /* Register buffers for VFs */
713 rc = bnxt_hwrm_func_buf_rgtr(bp);
714 if (rc)
715 return rc;
716
717 /* Reserve resources for VFs */
718 rc = bnxt_func_cfg(bp, *num_vfs, reset);
719 if (rc != *num_vfs) {
720 if (rc <= 0) {
721 netdev_warn(bp->dev, "Unable to reserve resources for SRIOV.\n");
722 *num_vfs = 0;
723 return rc;
724 }
725 netdev_warn(bp->dev, "Only able to reserve resources for %d VFs.\n",
726 rc);
727 *num_vfs = rc;
728 }
729
730 bnxt_ulp_sriov_cfg(bp, *num_vfs);
731 return 0;
732 }
733
734 static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs)
735 {
736 int rc = 0, vfs_supported;
737 int min_rx_rings, min_tx_rings, min_rss_ctxs;
738 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
739 int tx_ok = 0, rx_ok = 0, rss_ok = 0;
740 int avail_cp, avail_stat;
741
742 /* Check if we can enable requested num of vf's. At a mininum
743 * we require 1 RX 1 TX rings for each VF. In this minimum conf
744 * features like TPA will not be available.
745 */
746 vfs_supported = *num_vfs;
747
748 avail_cp = bnxt_get_avail_cp_rings_for_en(bp);
749 avail_stat = bnxt_get_avail_stat_ctxs_for_en(bp);
750 avail_cp = min_t(int, avail_cp, avail_stat);
751
752 while (vfs_supported) {
753 min_rx_rings = vfs_supported;
754 min_tx_rings = vfs_supported;
755 min_rss_ctxs = vfs_supported;
756
757 if (bp->flags & BNXT_FLAG_AGG_RINGS) {
758 if (hw_resc->max_rx_rings - bp->rx_nr_rings * 2 >=
759 min_rx_rings)
760 rx_ok = 1;
761 } else {
762 if (hw_resc->max_rx_rings - bp->rx_nr_rings >=
763 min_rx_rings)
764 rx_ok = 1;
765 }
766 if (hw_resc->max_vnics - bp->nr_vnics < min_rx_rings ||
767 avail_cp < min_rx_rings)
768 rx_ok = 0;
769
770 if (hw_resc->max_tx_rings - bp->tx_nr_rings >= min_tx_rings &&
771 avail_cp >= min_tx_rings)
772 tx_ok = 1;
773
774 if (hw_resc->max_rsscos_ctxs - bp->rsscos_nr_ctxs >=
775 min_rss_ctxs)
776 rss_ok = 1;
777
778 if (tx_ok && rx_ok && rss_ok)
779 break;
780
781 vfs_supported--;
782 }
783
784 if (!vfs_supported) {
785 netdev_err(bp->dev, "Cannot enable VF's as all resources are used by PF\n");
786 return -EINVAL;
787 }
788
789 if (vfs_supported != *num_vfs) {
790 netdev_info(bp->dev, "Requested VFs %d, can enable %d\n",
791 *num_vfs, vfs_supported);
792 *num_vfs = vfs_supported;
793 }
794
795 rc = bnxt_alloc_vf_resources(bp, *num_vfs);
796 if (rc)
797 goto err_out1;
798
799 rc = bnxt_cfg_hw_sriov(bp, num_vfs, false);
800 if (rc)
801 goto err_out2;
802
803 rc = pci_enable_sriov(bp->pdev, *num_vfs);
804 if (rc)
805 goto err_out2;
806
807 return 0;
808
809 err_out2:
810 /* Free the resources reserved for various VF's */
811 bnxt_hwrm_func_vf_resource_free(bp, *num_vfs);
812
813 err_out1:
814 bnxt_free_vf_resources(bp);
815
816 return rc;
817 }
818
819 void bnxt_sriov_disable(struct bnxt *bp)
820 {
821 u16 num_vfs = pci_num_vf(bp->pdev);
822
823 if (!num_vfs)
824 return;
825
826 /* synchronize VF and VF-rep create and destroy */
827 mutex_lock(&bp->sriov_lock);
828 bnxt_vf_reps_destroy(bp);
829
830 if (pci_vfs_assigned(bp->pdev)) {
831 bnxt_hwrm_fwd_async_event_cmpl(
832 bp, NULL, ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD);
833 netdev_warn(bp->dev, "Unable to free %d VFs because some are assigned to VMs.\n",
834 num_vfs);
835 } else {
836 pci_disable_sriov(bp->pdev);
837 /* Free the HW resources reserved for various VF's */
838 bnxt_hwrm_func_vf_resource_free(bp, num_vfs);
839 }
840 mutex_unlock(&bp->sriov_lock);
841
842 bnxt_free_vf_resources(bp);
843
844 bp->pf.active_vfs = 0;
845 /* Reclaim all resources for the PF. */
846 rtnl_lock();
847 bnxt_restore_pf_fw_resources(bp);
848 rtnl_unlock();
849
850 bnxt_ulp_sriov_cfg(bp, 0);
851 }
852
853 int bnxt_sriov_configure(struct pci_dev *pdev, int num_vfs)
854 {
855 struct net_device *dev = pci_get_drvdata(pdev);
856 struct bnxt *bp = netdev_priv(dev);
857
858 if (!(bp->flags & BNXT_FLAG_USING_MSIX)) {
859 netdev_warn(dev, "Not allow SRIOV if the irq mode is not MSIX\n");
860 return 0;
861 }
862
863 rtnl_lock();
864 if (!netif_running(dev)) {
865 netdev_warn(dev, "Reject SRIOV config request since if is down!\n");
866 rtnl_unlock();
867 return 0;
868 }
869 if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
870 netdev_warn(dev, "Reject SRIOV config request when FW reset is in progress\n");
871 rtnl_unlock();
872 return 0;
873 }
874 bp->sriov_cfg = true;
875 rtnl_unlock();
876
877 if (pci_vfs_assigned(bp->pdev)) {
878 netdev_warn(dev, "Unable to configure SRIOV since some VFs are assigned to VMs.\n");
879 num_vfs = 0;
880 goto sriov_cfg_exit;
881 }
882
883 /* Check if enabled VFs is same as requested */
884 if (num_vfs && num_vfs == bp->pf.active_vfs)
885 goto sriov_cfg_exit;
886
887 /* if there are previous existing VFs, clean them up */
888 bnxt_sriov_disable(bp);
889 if (!num_vfs)
890 goto sriov_cfg_exit;
891
892 bnxt_sriov_enable(bp, &num_vfs);
893
894 sriov_cfg_exit:
895 bp->sriov_cfg = false;
896 wake_up(&bp->sriov_cfg_wait);
897
898 return num_vfs;
899 }
900
901 static int bnxt_hwrm_fwd_resp(struct bnxt *bp, struct bnxt_vf_info *vf,
902 void *encap_resp, __le64 encap_resp_addr,
903 __le16 encap_resp_cpr, u32 msg_size)
904 {
905 int rc = 0;
906 struct hwrm_fwd_resp_input req = {0};
907
908 if (BNXT_FWD_RESP_SIZE_ERR(msg_size))
909 return -EINVAL;
910
911 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FWD_RESP, -1, -1);
912
913 /* Set the new target id */
914 req.target_id = cpu_to_le16(vf->fw_fid);
915 req.encap_resp_target_id = cpu_to_le16(vf->fw_fid);
916 req.encap_resp_len = cpu_to_le16(msg_size);
917 req.encap_resp_addr = encap_resp_addr;
918 req.encap_resp_cmpl_ring = encap_resp_cpr;
919 memcpy(req.encap_resp, encap_resp, msg_size);
920
921 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
922 if (rc)
923 netdev_err(bp->dev, "hwrm_fwd_resp failed. rc:%d\n", rc);
924 return rc;
925 }
926
927 static int bnxt_hwrm_fwd_err_resp(struct bnxt *bp, struct bnxt_vf_info *vf,
928 u32 msg_size)
929 {
930 int rc = 0;
931 struct hwrm_reject_fwd_resp_input req = {0};
932
933 if (BNXT_REJ_FWD_RESP_SIZE_ERR(msg_size))
934 return -EINVAL;
935
936 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_REJECT_FWD_RESP, -1, -1);
937 /* Set the new target id */
938 req.target_id = cpu_to_le16(vf->fw_fid);
939 req.encap_resp_target_id = cpu_to_le16(vf->fw_fid);
940 memcpy(req.encap_request, vf->hwrm_cmd_req_addr, msg_size);
941
942 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
943 if (rc)
944 netdev_err(bp->dev, "hwrm_fwd_err_resp failed. rc:%d\n", rc);
945 return rc;
946 }
947
948 static int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, struct bnxt_vf_info *vf,
949 u32 msg_size)
950 {
951 int rc = 0;
952 struct hwrm_exec_fwd_resp_input req = {0};
953
954 if (BNXT_EXEC_FWD_RESP_SIZE_ERR(msg_size))
955 return -EINVAL;
956
957 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_EXEC_FWD_RESP, -1, -1);
958 /* Set the new target id */
959 req.target_id = cpu_to_le16(vf->fw_fid);
960 req.encap_resp_target_id = cpu_to_le16(vf->fw_fid);
961 memcpy(req.encap_request, vf->hwrm_cmd_req_addr, msg_size);
962
963 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
964 if (rc)
965 netdev_err(bp->dev, "hwrm_exec_fw_resp failed. rc:%d\n", rc);
966 return rc;
967 }
968
969 static int bnxt_vf_configure_mac(struct bnxt *bp, struct bnxt_vf_info *vf)
970 {
971 u32 msg_size = sizeof(struct hwrm_func_vf_cfg_input);
972 struct hwrm_func_vf_cfg_input *req =
973 (struct hwrm_func_vf_cfg_input *)vf->hwrm_cmd_req_addr;
974
975 /* Allow VF to set a valid MAC address, if trust is set to on or
976 * if the PF assigned MAC address is zero
977 */
978 if (req->enables & cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_DFLT_MAC_ADDR)) {
979 bool trust = bnxt_is_trusted_vf(bp, vf);
980
981 if (is_valid_ether_addr(req->dflt_mac_addr) &&
982 (trust || !is_valid_ether_addr(vf->mac_addr) ||
983 ether_addr_equal(req->dflt_mac_addr, vf->mac_addr))) {
984 ether_addr_copy(vf->vf_mac_addr, req->dflt_mac_addr);
985 return bnxt_hwrm_exec_fwd_resp(bp, vf, msg_size);
986 }
987 return bnxt_hwrm_fwd_err_resp(bp, vf, msg_size);
988 }
989 return bnxt_hwrm_exec_fwd_resp(bp, vf, msg_size);
990 }
991
992 static int bnxt_vf_validate_set_mac(struct bnxt *bp, struct bnxt_vf_info *vf)
993 {
994 u32 msg_size = sizeof(struct hwrm_cfa_l2_filter_alloc_input);
995 struct hwrm_cfa_l2_filter_alloc_input *req =
996 (struct hwrm_cfa_l2_filter_alloc_input *)vf->hwrm_cmd_req_addr;
997 bool mac_ok = false;
998
999 if (!is_valid_ether_addr((const u8 *)req->l2_addr))
1000 return bnxt_hwrm_fwd_err_resp(bp, vf, msg_size);
1001
1002 /* Allow VF to set a valid MAC address, if trust is set to on.
1003 * Or VF MAC address must first match MAC address in PF's context.
1004 * Otherwise, it must match the VF MAC address if firmware spec >=
1005 * 1.2.2
1006 */
1007 if (bnxt_is_trusted_vf(bp, vf)) {
1008 mac_ok = true;
1009 } else if (is_valid_ether_addr(vf->mac_addr)) {
1010 if (ether_addr_equal((const u8 *)req->l2_addr, vf->mac_addr))
1011 mac_ok = true;
1012 } else if (is_valid_ether_addr(vf->vf_mac_addr)) {
1013 if (ether_addr_equal((const u8 *)req->l2_addr, vf->vf_mac_addr))
1014 mac_ok = true;
1015 } else {
1016 /* There are two cases:
1017 * 1.If firmware spec < 0x10202,VF MAC address is not forwarded
1018 * to the PF and so it doesn't have to match
1019 * 2.Allow VF to modify it's own MAC when PF has not assigned a
1020 * valid MAC address and firmware spec >= 0x10202
1021 */
1022 mac_ok = true;
1023 }
1024 if (mac_ok)
1025 return bnxt_hwrm_exec_fwd_resp(bp, vf, msg_size);
1026 return bnxt_hwrm_fwd_err_resp(bp, vf, msg_size);
1027 }
1028
1029 static int bnxt_vf_set_link(struct bnxt *bp, struct bnxt_vf_info *vf)
1030 {
1031 int rc = 0;
1032
1033 if (!(vf->flags & BNXT_VF_LINK_FORCED)) {
1034 /* real link */
1035 rc = bnxt_hwrm_exec_fwd_resp(
1036 bp, vf, sizeof(struct hwrm_port_phy_qcfg_input));
1037 } else {
1038 struct hwrm_port_phy_qcfg_output phy_qcfg_resp;
1039 struct hwrm_port_phy_qcfg_input *phy_qcfg_req;
1040
1041 phy_qcfg_req =
1042 (struct hwrm_port_phy_qcfg_input *)vf->hwrm_cmd_req_addr;
1043 mutex_lock(&bp->hwrm_cmd_lock);
1044 memcpy(&phy_qcfg_resp, &bp->link_info.phy_qcfg_resp,
1045 sizeof(phy_qcfg_resp));
1046 mutex_unlock(&bp->hwrm_cmd_lock);
1047 phy_qcfg_resp.resp_len = cpu_to_le16(sizeof(phy_qcfg_resp));
1048 phy_qcfg_resp.seq_id = phy_qcfg_req->seq_id;
1049 phy_qcfg_resp.valid = 1;
1050
1051 if (vf->flags & BNXT_VF_LINK_UP) {
1052 /* if physical link is down, force link up on VF */
1053 if (phy_qcfg_resp.link !=
1054 PORT_PHY_QCFG_RESP_LINK_LINK) {
1055 phy_qcfg_resp.link =
1056 PORT_PHY_QCFG_RESP_LINK_LINK;
1057 phy_qcfg_resp.link_speed = cpu_to_le16(
1058 PORT_PHY_QCFG_RESP_LINK_SPEED_10GB);
1059 phy_qcfg_resp.duplex_cfg =
1060 PORT_PHY_QCFG_RESP_DUPLEX_CFG_FULL;
1061 phy_qcfg_resp.duplex_state =
1062 PORT_PHY_QCFG_RESP_DUPLEX_STATE_FULL;
1063 phy_qcfg_resp.pause =
1064 (PORT_PHY_QCFG_RESP_PAUSE_TX |
1065 PORT_PHY_QCFG_RESP_PAUSE_RX);
1066 }
1067 } else {
1068 /* force link down */
1069 phy_qcfg_resp.link = PORT_PHY_QCFG_RESP_LINK_NO_LINK;
1070 phy_qcfg_resp.link_speed = 0;
1071 phy_qcfg_resp.duplex_state =
1072 PORT_PHY_QCFG_RESP_DUPLEX_STATE_HALF;
1073 phy_qcfg_resp.pause = 0;
1074 }
1075 rc = bnxt_hwrm_fwd_resp(bp, vf, &phy_qcfg_resp,
1076 phy_qcfg_req->resp_addr,
1077 phy_qcfg_req->cmpl_ring,
1078 sizeof(phy_qcfg_resp));
1079 }
1080 return rc;
1081 }
1082
1083 static int bnxt_vf_req_validate_snd(struct bnxt *bp, struct bnxt_vf_info *vf)
1084 {
1085 int rc = 0;
1086 struct input *encap_req = vf->hwrm_cmd_req_addr;
1087 u32 req_type = le16_to_cpu(encap_req->req_type);
1088
1089 switch (req_type) {
1090 case HWRM_FUNC_VF_CFG:
1091 rc = bnxt_vf_configure_mac(bp, vf);
1092 break;
1093 case HWRM_CFA_L2_FILTER_ALLOC:
1094 rc = bnxt_vf_validate_set_mac(bp, vf);
1095 break;
1096 case HWRM_FUNC_CFG:
1097 /* TODO Validate if VF is allowed to change mac address,
1098 * mtu, num of rings etc
1099 */
1100 rc = bnxt_hwrm_exec_fwd_resp(
1101 bp, vf, sizeof(struct hwrm_func_cfg_input));
1102 break;
1103 case HWRM_PORT_PHY_QCFG:
1104 rc = bnxt_vf_set_link(bp, vf);
1105 break;
1106 default:
1107 break;
1108 }
1109 return rc;
1110 }
1111
1112 void bnxt_hwrm_exec_fwd_req(struct bnxt *bp)
1113 {
1114 u32 i = 0, active_vfs = bp->pf.active_vfs, vf_id;
1115
1116 /* Scan through VF's and process commands */
1117 while (1) {
1118 vf_id = find_next_bit(bp->pf.vf_event_bmap, active_vfs, i);
1119 if (vf_id >= active_vfs)
1120 break;
1121
1122 clear_bit(vf_id, bp->pf.vf_event_bmap);
1123 bnxt_vf_req_validate_snd(bp, &bp->pf.vf[vf_id]);
1124 i = vf_id + 1;
1125 }
1126 }
1127
1128 void bnxt_update_vf_mac(struct bnxt *bp)
1129 {
1130 struct hwrm_func_qcaps_input req = {0};
1131 struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
1132
1133 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCAPS, -1, -1);
1134 req.fid = cpu_to_le16(0xffff);
1135
1136 mutex_lock(&bp->hwrm_cmd_lock);
1137 if (_hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT))
1138 goto update_vf_mac_exit;
1139
1140 /* Store MAC address from the firmware. There are 2 cases:
1141 * 1. MAC address is valid. It is assigned from the PF and we
1142 * need to override the current VF MAC address with it.
1143 * 2. MAC address is zero. The VF will use a random MAC address by
1144 * default but the stored zero MAC will allow the VF user to change
1145 * the random MAC address using ndo_set_mac_address() if he wants.
1146 */
1147 if (!ether_addr_equal(resp->mac_address, bp->vf.mac_addr))
1148 memcpy(bp->vf.mac_addr, resp->mac_address, ETH_ALEN);
1149
1150 /* overwrite netdev dev_addr with admin VF MAC */
1151 if (is_valid_ether_addr(bp->vf.mac_addr))
1152 memcpy(bp->dev->dev_addr, bp->vf.mac_addr, ETH_ALEN);
1153 update_vf_mac_exit:
1154 mutex_unlock(&bp->hwrm_cmd_lock);
1155 }
1156
1157 int bnxt_approve_mac(struct bnxt *bp, u8 *mac, bool strict)
1158 {
1159 struct hwrm_func_vf_cfg_input req = {0};
1160 int rc = 0;
1161
1162 if (!BNXT_VF(bp))
1163 return 0;
1164
1165 if (bp->hwrm_spec_code < 0x10202) {
1166 if (is_valid_ether_addr(bp->vf.mac_addr))
1167 rc = -EADDRNOTAVAIL;
1168 goto mac_done;
1169 }
1170 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_CFG, -1, -1);
1171 req.enables = cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_DFLT_MAC_ADDR);
1172 memcpy(req.dflt_mac_addr, mac, ETH_ALEN);
1173 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
1174 mac_done:
1175 if (rc && strict) {
1176 rc = -EADDRNOTAVAIL;
1177 netdev_warn(bp->dev, "VF MAC address %pM not approved by the PF\n",
1178 mac);
1179 return rc;
1180 }
1181 return 0;
1182 }
1183 #else
1184
1185 int bnxt_cfg_hw_sriov(struct bnxt *bp, int *num_vfs, bool reset)
1186 {
1187 if (*num_vfs)
1188 return -EOPNOTSUPP;
1189 return 0;
1190 }
1191
1192 void bnxt_sriov_disable(struct bnxt *bp)
1193 {
1194 }
1195
1196 void bnxt_hwrm_exec_fwd_req(struct bnxt *bp)
1197 {
1198 netdev_err(bp->dev, "Invalid VF message received when SRIOV is not enable\n");
1199 }
1200
1201 void bnxt_update_vf_mac(struct bnxt *bp)
1202 {
1203 }
1204
1205 int bnxt_approve_mac(struct bnxt *bp, u8 *mac, bool strict)
1206 {
1207 return 0;
1208 }
1209 #endif