1 /* Broadcom NetXtreme-C/E network driver.
3 * Copyright (c) 2014-2016 Broadcom Corporation
4 * Copyright (c) 2016-2018 Broadcom Limited
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation.
11 #include <linux/module.h>
12 #include <linux/pci.h>
13 #include <linux/netdevice.h>
14 #include <linux/if_vlan.h>
15 #include <linux/interrupt.h>
16 #include <linux/etherdevice.h>
20 #include "bnxt_sriov.h"
22 #include "bnxt_ethtool.h"
24 #ifdef CONFIG_BNXT_SRIOV
25 static int bnxt_hwrm_fwd_async_event_cmpl(struct bnxt
*bp
,
26 struct bnxt_vf_info
*vf
, u16 event_id
)
28 struct hwrm_fwd_async_event_cmpl_input req
= {0};
29 struct hwrm_async_event_cmpl
*async_cmpl
;
32 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_FWD_ASYNC_EVENT_CMPL
, -1, -1);
34 req
.encap_async_event_target_id
= cpu_to_le16(vf
->fw_fid
);
36 /* broadcast this async event to all VFs */
37 req
.encap_async_event_target_id
= cpu_to_le16(0xffff);
38 async_cmpl
= (struct hwrm_async_event_cmpl
*)req
.encap_async_event_cmpl
;
39 async_cmpl
->type
= cpu_to_le16(ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT
);
40 async_cmpl
->event_id
= cpu_to_le16(event_id
);
42 rc
= hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
44 netdev_err(bp
->dev
, "hwrm_fwd_async_event_cmpl failed. rc:%d\n",
49 static int bnxt_vf_ndo_prep(struct bnxt
*bp
, int vf_id
)
51 if (!test_bit(BNXT_STATE_OPEN
, &bp
->state
)) {
52 netdev_err(bp
->dev
, "vf ndo called though PF is down\n");
55 if (!bp
->pf
.active_vfs
) {
56 netdev_err(bp
->dev
, "vf ndo called though sriov is disabled\n");
59 if (vf_id
>= bp
->pf
.active_vfs
) {
60 netdev_err(bp
->dev
, "Invalid VF id %d\n", vf_id
);
66 int bnxt_set_vf_spoofchk(struct net_device
*dev
, int vf_id
, bool setting
)
68 struct hwrm_func_cfg_input req
= {0};
69 struct bnxt
*bp
= netdev_priv(dev
);
70 struct bnxt_vf_info
*vf
;
71 bool old_setting
= false;
75 if (bp
->hwrm_spec_code
< 0x10701)
78 rc
= bnxt_vf_ndo_prep(bp
, vf_id
);
82 vf
= &bp
->pf
.vf
[vf_id
];
83 if (vf
->flags
& BNXT_VF_SPOOFCHK
)
85 if (old_setting
== setting
)
88 func_flags
= vf
->func_flags
;
90 func_flags
|= FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_ENABLE
;
92 func_flags
|= FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_DISABLE
;
93 /*TODO: if the driver supports VLAN filter on guest VLAN,
94 * the spoof check should also include vlan anti-spoofing
96 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_FUNC_CFG
, -1, -1);
97 req
.fid
= cpu_to_le16(vf
->fw_fid
);
98 req
.flags
= cpu_to_le32(func_flags
);
99 rc
= hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
101 vf
->func_flags
= func_flags
;
103 vf
->flags
|= BNXT_VF_SPOOFCHK
;
105 vf
->flags
&= ~BNXT_VF_SPOOFCHK
;
110 static int bnxt_hwrm_func_qcfg_flags(struct bnxt
*bp
, struct bnxt_vf_info
*vf
)
112 struct hwrm_func_qcfg_output
*resp
= bp
->hwrm_cmd_resp_addr
;
113 struct hwrm_func_qcfg_input req
= {0};
116 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_FUNC_QCFG
, -1, -1);
117 req
.fid
= cpu_to_le16(vf
->fw_fid
);
118 mutex_lock(&bp
->hwrm_cmd_lock
);
119 rc
= _hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
121 mutex_unlock(&bp
->hwrm_cmd_lock
);
124 vf
->func_qcfg_flags
= le16_to_cpu(resp
->flags
);
125 mutex_unlock(&bp
->hwrm_cmd_lock
);
129 static bool bnxt_is_trusted_vf(struct bnxt
*bp
, struct bnxt_vf_info
*vf
)
131 if (!(bp
->fw_cap
& BNXT_FW_CAP_TRUSTED_VF
))
132 return !!(vf
->flags
& BNXT_VF_TRUST
);
134 bnxt_hwrm_func_qcfg_flags(bp
, vf
);
135 return !!(vf
->func_qcfg_flags
& FUNC_QCFG_RESP_FLAGS_TRUSTED_VF
);
138 static int bnxt_hwrm_set_trusted_vf(struct bnxt
*bp
, struct bnxt_vf_info
*vf
)
140 struct hwrm_func_cfg_input req
= {0};
142 if (!(bp
->fw_cap
& BNXT_FW_CAP_TRUSTED_VF
))
145 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_FUNC_CFG
, -1, -1);
146 req
.fid
= cpu_to_le16(vf
->fw_fid
);
147 if (vf
->flags
& BNXT_VF_TRUST
)
148 req
.flags
= cpu_to_le32(FUNC_CFG_REQ_FLAGS_TRUSTED_VF_ENABLE
);
150 req
.flags
= cpu_to_le32(FUNC_CFG_REQ_FLAGS_TRUSTED_VF_DISABLE
);
151 return hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
154 int bnxt_set_vf_trust(struct net_device
*dev
, int vf_id
, bool trusted
)
156 struct bnxt
*bp
= netdev_priv(dev
);
157 struct bnxt_vf_info
*vf
;
159 if (bnxt_vf_ndo_prep(bp
, vf_id
))
162 vf
= &bp
->pf
.vf
[vf_id
];
164 vf
->flags
|= BNXT_VF_TRUST
;
166 vf
->flags
&= ~BNXT_VF_TRUST
;
168 bnxt_hwrm_set_trusted_vf(bp
, vf
);
172 int bnxt_get_vf_config(struct net_device
*dev
, int vf_id
,
173 struct ifla_vf_info
*ivi
)
175 struct bnxt
*bp
= netdev_priv(dev
);
176 struct bnxt_vf_info
*vf
;
179 rc
= bnxt_vf_ndo_prep(bp
, vf_id
);
184 vf
= &bp
->pf
.vf
[vf_id
];
186 if (is_valid_ether_addr(vf
->mac_addr
))
187 memcpy(&ivi
->mac
, vf
->mac_addr
, ETH_ALEN
);
189 memcpy(&ivi
->mac
, vf
->vf_mac_addr
, ETH_ALEN
);
190 ivi
->max_tx_rate
= vf
->max_tx_rate
;
191 ivi
->min_tx_rate
= vf
->min_tx_rate
;
192 ivi
->vlan
= vf
->vlan
;
193 if (vf
->flags
& BNXT_VF_QOS
)
194 ivi
->qos
= vf
->vlan
>> VLAN_PRIO_SHIFT
;
197 ivi
->spoofchk
= !!(vf
->flags
& BNXT_VF_SPOOFCHK
);
198 ivi
->trusted
= bnxt_is_trusted_vf(bp
, vf
);
199 if (!(vf
->flags
& BNXT_VF_LINK_FORCED
))
200 ivi
->linkstate
= IFLA_VF_LINK_STATE_AUTO
;
201 else if (vf
->flags
& BNXT_VF_LINK_UP
)
202 ivi
->linkstate
= IFLA_VF_LINK_STATE_ENABLE
;
204 ivi
->linkstate
= IFLA_VF_LINK_STATE_DISABLE
;
209 int bnxt_set_vf_mac(struct net_device
*dev
, int vf_id
, u8
*mac
)
211 struct hwrm_func_cfg_input req
= {0};
212 struct bnxt
*bp
= netdev_priv(dev
);
213 struct bnxt_vf_info
*vf
;
216 rc
= bnxt_vf_ndo_prep(bp
, vf_id
);
219 /* reject bc or mc mac addr, zero mac addr means allow
220 * VF to use its own mac addr
222 if (is_multicast_ether_addr(mac
)) {
223 netdev_err(dev
, "Invalid VF ethernet address\n");
226 vf
= &bp
->pf
.vf
[vf_id
];
228 memcpy(vf
->mac_addr
, mac
, ETH_ALEN
);
229 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_FUNC_CFG
, -1, -1);
230 req
.fid
= cpu_to_le16(vf
->fw_fid
);
231 req
.flags
= cpu_to_le32(vf
->func_flags
);
232 req
.enables
= cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR
);
233 memcpy(req
.dflt_mac_addr
, mac
, ETH_ALEN
);
234 return hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
237 int bnxt_set_vf_vlan(struct net_device
*dev
, int vf_id
, u16 vlan_id
, u8 qos
,
240 struct hwrm_func_cfg_input req
= {0};
241 struct bnxt
*bp
= netdev_priv(dev
);
242 struct bnxt_vf_info
*vf
;
246 if (bp
->hwrm_spec_code
< 0x10201)
249 if (vlan_proto
!= htons(ETH_P_8021Q
))
250 return -EPROTONOSUPPORT
;
252 rc
= bnxt_vf_ndo_prep(bp
, vf_id
);
256 /* TODO: needed to implement proper handling of user priority,
257 * currently fail the command if there is valid priority
259 if (vlan_id
> 4095 || qos
)
262 vf
= &bp
->pf
.vf
[vf_id
];
264 if (vlan_tag
== vf
->vlan
)
267 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_FUNC_CFG
, -1, -1);
268 req
.fid
= cpu_to_le16(vf
->fw_fid
);
269 req
.flags
= cpu_to_le32(vf
->func_flags
);
270 req
.dflt_vlan
= cpu_to_le16(vlan_tag
);
271 req
.enables
= cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_VLAN
);
272 rc
= hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
278 int bnxt_set_vf_bw(struct net_device
*dev
, int vf_id
, int min_tx_rate
,
281 struct hwrm_func_cfg_input req
= {0};
282 struct bnxt
*bp
= netdev_priv(dev
);
283 struct bnxt_vf_info
*vf
;
287 rc
= bnxt_vf_ndo_prep(bp
, vf_id
);
291 vf
= &bp
->pf
.vf
[vf_id
];
292 pf_link_speed
= bnxt_fw_to_ethtool_speed(bp
->link_info
.link_speed
);
293 if (max_tx_rate
> pf_link_speed
) {
294 netdev_info(bp
->dev
, "max tx rate %d exceed PF link speed for VF %d\n",
299 if (min_tx_rate
> pf_link_speed
|| min_tx_rate
> max_tx_rate
) {
300 netdev_info(bp
->dev
, "min tx rate %d is invalid for VF %d\n",
304 if (min_tx_rate
== vf
->min_tx_rate
&& max_tx_rate
== vf
->max_tx_rate
)
306 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_FUNC_CFG
, -1, -1);
307 req
.fid
= cpu_to_le16(vf
->fw_fid
);
308 req
.flags
= cpu_to_le32(vf
->func_flags
);
309 req
.enables
= cpu_to_le32(FUNC_CFG_REQ_ENABLES_MAX_BW
);
310 req
.max_bw
= cpu_to_le32(max_tx_rate
);
311 req
.enables
|= cpu_to_le32(FUNC_CFG_REQ_ENABLES_MIN_BW
);
312 req
.min_bw
= cpu_to_le32(min_tx_rate
);
313 rc
= hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
315 vf
->min_tx_rate
= min_tx_rate
;
316 vf
->max_tx_rate
= max_tx_rate
;
321 int bnxt_set_vf_link_state(struct net_device
*dev
, int vf_id
, int link
)
323 struct bnxt
*bp
= netdev_priv(dev
);
324 struct bnxt_vf_info
*vf
;
327 rc
= bnxt_vf_ndo_prep(bp
, vf_id
);
331 vf
= &bp
->pf
.vf
[vf_id
];
333 vf
->flags
&= ~(BNXT_VF_LINK_UP
| BNXT_VF_LINK_FORCED
);
335 case IFLA_VF_LINK_STATE_AUTO
:
336 vf
->flags
|= BNXT_VF_LINK_UP
;
338 case IFLA_VF_LINK_STATE_DISABLE
:
339 vf
->flags
|= BNXT_VF_LINK_FORCED
;
341 case IFLA_VF_LINK_STATE_ENABLE
:
342 vf
->flags
|= BNXT_VF_LINK_UP
| BNXT_VF_LINK_FORCED
;
345 netdev_err(bp
->dev
, "Invalid link option\n");
349 if (vf
->flags
& (BNXT_VF_LINK_UP
| BNXT_VF_LINK_FORCED
))
350 rc
= bnxt_hwrm_fwd_async_event_cmpl(bp
, vf
,
351 ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE
);
355 static int bnxt_set_vf_attr(struct bnxt
*bp
, int num_vfs
)
358 struct bnxt_vf_info
*vf
;
360 for (i
= 0; i
< num_vfs
; i
++) {
362 memset(vf
, 0, sizeof(*vf
));
367 static int bnxt_hwrm_func_vf_resource_free(struct bnxt
*bp
, int num_vfs
)
370 struct bnxt_pf_info
*pf
= &bp
->pf
;
371 struct hwrm_func_vf_resc_free_input req
= {0};
373 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_FUNC_VF_RESC_FREE
, -1, -1);
375 mutex_lock(&bp
->hwrm_cmd_lock
);
376 for (i
= pf
->first_vf_id
; i
< pf
->first_vf_id
+ num_vfs
; i
++) {
377 req
.vf_id
= cpu_to_le16(i
);
378 rc
= _hwrm_send_message(bp
, &req
, sizeof(req
),
383 mutex_unlock(&bp
->hwrm_cmd_lock
);
387 static void bnxt_free_vf_resources(struct bnxt
*bp
)
389 struct pci_dev
*pdev
= bp
->pdev
;
392 kfree(bp
->pf
.vf_event_bmap
);
393 bp
->pf
.vf_event_bmap
= NULL
;
395 for (i
= 0; i
< 4; i
++) {
396 if (bp
->pf
.hwrm_cmd_req_addr
[i
]) {
397 dma_free_coherent(&pdev
->dev
, BNXT_PAGE_SIZE
,
398 bp
->pf
.hwrm_cmd_req_addr
[i
],
399 bp
->pf
.hwrm_cmd_req_dma_addr
[i
]);
400 bp
->pf
.hwrm_cmd_req_addr
[i
] = NULL
;
408 static int bnxt_alloc_vf_resources(struct bnxt
*bp
, int num_vfs
)
410 struct pci_dev
*pdev
= bp
->pdev
;
411 u32 nr_pages
, size
, i
, j
, k
= 0;
413 bp
->pf
.vf
= kcalloc(num_vfs
, sizeof(struct bnxt_vf_info
), GFP_KERNEL
);
417 bnxt_set_vf_attr(bp
, num_vfs
);
419 size
= num_vfs
* BNXT_HWRM_REQ_MAX_SIZE
;
420 nr_pages
= size
/ BNXT_PAGE_SIZE
;
421 if (size
& (BNXT_PAGE_SIZE
- 1))
424 for (i
= 0; i
< nr_pages
; i
++) {
425 bp
->pf
.hwrm_cmd_req_addr
[i
] =
426 dma_alloc_coherent(&pdev
->dev
, BNXT_PAGE_SIZE
,
427 &bp
->pf
.hwrm_cmd_req_dma_addr
[i
],
430 if (!bp
->pf
.hwrm_cmd_req_addr
[i
])
433 for (j
= 0; j
< BNXT_HWRM_REQS_PER_PAGE
&& k
< num_vfs
; j
++) {
434 struct bnxt_vf_info
*vf
= &bp
->pf
.vf
[k
];
436 vf
->hwrm_cmd_req_addr
= bp
->pf
.hwrm_cmd_req_addr
[i
] +
437 j
* BNXT_HWRM_REQ_MAX_SIZE
;
438 vf
->hwrm_cmd_req_dma_addr
=
439 bp
->pf
.hwrm_cmd_req_dma_addr
[i
] + j
*
440 BNXT_HWRM_REQ_MAX_SIZE
;
446 bp
->pf
.vf_event_bmap
= kzalloc(16, GFP_KERNEL
);
447 if (!bp
->pf
.vf_event_bmap
)
450 bp
->pf
.hwrm_cmd_req_pages
= nr_pages
;
454 static int bnxt_hwrm_func_buf_rgtr(struct bnxt
*bp
)
456 struct hwrm_func_buf_rgtr_input req
= {0};
458 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_FUNC_BUF_RGTR
, -1, -1);
460 req
.req_buf_num_pages
= cpu_to_le16(bp
->pf
.hwrm_cmd_req_pages
);
461 req
.req_buf_page_size
= cpu_to_le16(BNXT_PAGE_SHIFT
);
462 req
.req_buf_len
= cpu_to_le16(BNXT_HWRM_REQ_MAX_SIZE
);
463 req
.req_buf_page_addr0
= cpu_to_le64(bp
->pf
.hwrm_cmd_req_dma_addr
[0]);
464 req
.req_buf_page_addr1
= cpu_to_le64(bp
->pf
.hwrm_cmd_req_dma_addr
[1]);
465 req
.req_buf_page_addr2
= cpu_to_le64(bp
->pf
.hwrm_cmd_req_dma_addr
[2]);
466 req
.req_buf_page_addr3
= cpu_to_le64(bp
->pf
.hwrm_cmd_req_dma_addr
[3]);
468 return hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
471 /* Caller holds bp->hwrm_cmd_lock mutex lock */
472 static void __bnxt_set_vf_params(struct bnxt
*bp
, int vf_id
)
474 struct hwrm_func_cfg_input req
= {0};
475 struct bnxt_vf_info
*vf
;
477 vf
= &bp
->pf
.vf
[vf_id
];
478 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_FUNC_CFG
, -1, -1);
479 req
.fid
= cpu_to_le16(vf
->fw_fid
);
480 req
.flags
= cpu_to_le32(vf
->func_flags
);
482 if (is_valid_ether_addr(vf
->mac_addr
)) {
483 req
.enables
|= cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR
);
484 memcpy(req
.dflt_mac_addr
, vf
->mac_addr
, ETH_ALEN
);
487 req
.enables
|= cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_VLAN
);
488 req
.dflt_vlan
= cpu_to_le16(vf
->vlan
);
490 if (vf
->max_tx_rate
) {
491 req
.enables
|= cpu_to_le32(FUNC_CFG_REQ_ENABLES_MAX_BW
);
492 req
.max_bw
= cpu_to_le32(vf
->max_tx_rate
);
493 #ifdef HAVE_IFLA_TX_RATE
494 req
.enables
|= cpu_to_le32(FUNC_CFG_REQ_ENABLES_MIN_BW
);
495 req
.min_bw
= cpu_to_le32(vf
->min_tx_rate
);
498 if (vf
->flags
& BNXT_VF_TRUST
)
499 req
.flags
|= cpu_to_le32(FUNC_CFG_REQ_FLAGS_TRUSTED_VF_ENABLE
);
501 _hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
504 /* Only called by PF to reserve resources for VFs, returns actual number of
505 * VFs configured, or < 0 on error.
507 static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt
*bp
, int num_vfs
, bool reset
)
509 struct hwrm_func_vf_resource_cfg_input req
= {0};
510 struct bnxt_hw_resc
*hw_resc
= &bp
->hw_resc
;
511 u16 vf_tx_rings
, vf_rx_rings
, vf_cp_rings
;
512 u16 vf_stat_ctx
, vf_vnics
, vf_ring_grps
;
513 struct bnxt_pf_info
*pf
= &bp
->pf
;
514 int i
, rc
= 0, min
= 1;
518 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_FUNC_VF_RESOURCE_CFG
, -1, -1);
520 if (bp
->flags
& BNXT_FLAG_CHIP_P5
) {
521 vf_msix
= hw_resc
->max_nqs
- bnxt_nq_rings_in_use(bp
);
524 vf_ring_grps
= hw_resc
->max_hw_ring_grps
- bp
->rx_nr_rings
;
526 vf_cp_rings
= bnxt_get_avail_cp_rings_for_en(bp
);
527 vf_stat_ctx
= bnxt_get_avail_stat_ctxs_for_en(bp
);
528 if (bp
->flags
& BNXT_FLAG_AGG_RINGS
)
529 vf_rx_rings
= hw_resc
->max_rx_rings
- bp
->rx_nr_rings
* 2;
531 vf_rx_rings
= hw_resc
->max_rx_rings
- bp
->rx_nr_rings
;
532 vf_tx_rings
= hw_resc
->max_tx_rings
- bp
->tx_nr_rings
;
533 vf_vnics
= hw_resc
->max_vnics
- bp
->nr_vnics
;
534 vf_vnics
= min_t(u16
, vf_vnics
, vf_rx_rings
);
535 vf_rss
= hw_resc
->max_rsscos_ctxs
- bp
->rsscos_nr_ctxs
;
537 req
.min_rsscos_ctx
= cpu_to_le16(BNXT_VF_MIN_RSS_CTX
);
538 if (pf
->vf_resv_strategy
== BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC
) {
540 req
.min_rsscos_ctx
= cpu_to_le16(min
);
542 if (pf
->vf_resv_strategy
== BNXT_VF_RESV_STRATEGY_MINIMAL
||
543 pf
->vf_resv_strategy
== BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC
) {
544 req
.min_cmpl_rings
= cpu_to_le16(min
);
545 req
.min_tx_rings
= cpu_to_le16(min
);
546 req
.min_rx_rings
= cpu_to_le16(min
);
547 req
.min_l2_ctxs
= cpu_to_le16(min
);
548 req
.min_vnics
= cpu_to_le16(min
);
549 req
.min_stat_ctx
= cpu_to_le16(min
);
550 if (!(bp
->flags
& BNXT_FLAG_CHIP_P5
))
551 req
.min_hw_ring_grps
= cpu_to_le16(min
);
553 vf_cp_rings
/= num_vfs
;
554 vf_tx_rings
/= num_vfs
;
555 vf_rx_rings
/= num_vfs
;
557 vf_stat_ctx
/= num_vfs
;
558 vf_ring_grps
/= num_vfs
;
561 req
.min_cmpl_rings
= cpu_to_le16(vf_cp_rings
);
562 req
.min_tx_rings
= cpu_to_le16(vf_tx_rings
);
563 req
.min_rx_rings
= cpu_to_le16(vf_rx_rings
);
564 req
.min_l2_ctxs
= cpu_to_le16(BNXT_VF_MAX_L2_CTX
);
565 req
.min_vnics
= cpu_to_le16(vf_vnics
);
566 req
.min_stat_ctx
= cpu_to_le16(vf_stat_ctx
);
567 req
.min_hw_ring_grps
= cpu_to_le16(vf_ring_grps
);
568 req
.min_rsscos_ctx
= cpu_to_le16(vf_rss
);
570 req
.max_cmpl_rings
= cpu_to_le16(vf_cp_rings
);
571 req
.max_tx_rings
= cpu_to_le16(vf_tx_rings
);
572 req
.max_rx_rings
= cpu_to_le16(vf_rx_rings
);
573 req
.max_l2_ctxs
= cpu_to_le16(BNXT_VF_MAX_L2_CTX
);
574 req
.max_vnics
= cpu_to_le16(vf_vnics
);
575 req
.max_stat_ctx
= cpu_to_le16(vf_stat_ctx
);
576 req
.max_hw_ring_grps
= cpu_to_le16(vf_ring_grps
);
577 req
.max_rsscos_ctx
= cpu_to_le16(vf_rss
);
578 if (bp
->flags
& BNXT_FLAG_CHIP_P5
)
579 req
.max_msix
= cpu_to_le16(vf_msix
/ num_vfs
);
581 mutex_lock(&bp
->hwrm_cmd_lock
);
582 for (i
= 0; i
< num_vfs
; i
++) {
584 __bnxt_set_vf_params(bp
, i
);
586 req
.vf_id
= cpu_to_le16(pf
->first_vf_id
+ i
);
587 rc
= _hwrm_send_message(bp
, &req
, sizeof(req
),
591 pf
->active_vfs
= i
+ 1;
592 pf
->vf
[i
].fw_fid
= pf
->first_vf_id
+ i
;
594 mutex_unlock(&bp
->hwrm_cmd_lock
);
595 if (pf
->active_vfs
) {
596 u16 n
= pf
->active_vfs
;
598 hw_resc
->max_tx_rings
-= le16_to_cpu(req
.min_tx_rings
) * n
;
599 hw_resc
->max_rx_rings
-= le16_to_cpu(req
.min_rx_rings
) * n
;
600 hw_resc
->max_hw_ring_grps
-= le16_to_cpu(req
.min_hw_ring_grps
) *
602 hw_resc
->max_cp_rings
-= le16_to_cpu(req
.min_cmpl_rings
) * n
;
603 hw_resc
->max_rsscos_ctxs
-= le16_to_cpu(req
.min_rsscos_ctx
) * n
;
604 hw_resc
->max_stat_ctxs
-= le16_to_cpu(req
.min_stat_ctx
) * n
;
605 hw_resc
->max_vnics
-= le16_to_cpu(req
.min_vnics
) * n
;
606 if (bp
->flags
& BNXT_FLAG_CHIP_P5
)
607 hw_resc
->max_irqs
-= vf_msix
* n
;
614 /* Only called by PF to reserve resources for VFs, returns actual number of
615 * VFs configured, or < 0 on error.
617 static int bnxt_hwrm_func_cfg(struct bnxt
*bp
, int num_vfs
)
620 u16 vf_tx_rings
, vf_rx_rings
, vf_cp_rings
, vf_stat_ctx
, vf_vnics
;
621 struct bnxt_hw_resc
*hw_resc
= &bp
->hw_resc
;
622 struct hwrm_func_cfg_input req
= {0};
623 struct bnxt_pf_info
*pf
= &bp
->pf
;
624 int total_vf_tx_rings
= 0;
627 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_FUNC_CFG
, -1, -1);
629 /* Remaining rings are distributed equally amongs VF's for now */
630 vf_cp_rings
= bnxt_get_avail_cp_rings_for_en(bp
) / num_vfs
;
631 vf_stat_ctx
= bnxt_get_avail_stat_ctxs_for_en(bp
) / num_vfs
;
632 if (bp
->flags
& BNXT_FLAG_AGG_RINGS
)
633 vf_rx_rings
= (hw_resc
->max_rx_rings
- bp
->rx_nr_rings
* 2) /
636 vf_rx_rings
= (hw_resc
->max_rx_rings
- bp
->rx_nr_rings
) /
638 vf_ring_grps
= (hw_resc
->max_hw_ring_grps
- bp
->rx_nr_rings
) / num_vfs
;
639 vf_tx_rings
= (hw_resc
->max_tx_rings
- bp
->tx_nr_rings
) / num_vfs
;
640 vf_vnics
= (hw_resc
->max_vnics
- bp
->nr_vnics
) / num_vfs
;
641 vf_vnics
= min_t(u16
, vf_vnics
, vf_rx_rings
);
643 req
.enables
= cpu_to_le32(FUNC_CFG_REQ_ENABLES_MTU
|
644 FUNC_CFG_REQ_ENABLES_MRU
|
645 FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS
|
646 FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS
|
647 FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS
|
648 FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS
|
649 FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS
|
650 FUNC_CFG_REQ_ENABLES_NUM_L2_CTXS
|
651 FUNC_CFG_REQ_ENABLES_NUM_VNICS
|
652 FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS
);
654 mtu
= bp
->dev
->mtu
+ ETH_HLEN
+ ETH_FCS_LEN
+ VLAN_HLEN
;
655 req
.mru
= cpu_to_le16(mtu
);
656 req
.mtu
= cpu_to_le16(mtu
);
658 req
.num_rsscos_ctxs
= cpu_to_le16(1);
659 req
.num_cmpl_rings
= cpu_to_le16(vf_cp_rings
);
660 req
.num_tx_rings
= cpu_to_le16(vf_tx_rings
);
661 req
.num_rx_rings
= cpu_to_le16(vf_rx_rings
);
662 req
.num_hw_ring_grps
= cpu_to_le16(vf_ring_grps
);
663 req
.num_l2_ctxs
= cpu_to_le16(4);
665 req
.num_vnics
= cpu_to_le16(vf_vnics
);
666 /* FIXME spec currently uses 1 bit for stats ctx */
667 req
.num_stat_ctxs
= cpu_to_le16(vf_stat_ctx
);
669 mutex_lock(&bp
->hwrm_cmd_lock
);
670 for (i
= 0; i
< num_vfs
; i
++) {
671 int vf_tx_rsvd
= vf_tx_rings
;
673 req
.fid
= cpu_to_le16(pf
->first_vf_id
+ i
);
674 rc
= _hwrm_send_message(bp
, &req
, sizeof(req
),
678 pf
->active_vfs
= i
+ 1;
679 pf
->vf
[i
].fw_fid
= le16_to_cpu(req
.fid
);
680 rc
= __bnxt_hwrm_get_tx_rings(bp
, pf
->vf
[i
].fw_fid
,
684 total_vf_tx_rings
+= vf_tx_rsvd
;
686 mutex_unlock(&bp
->hwrm_cmd_lock
);
687 if (pf
->active_vfs
) {
688 hw_resc
->max_tx_rings
-= total_vf_tx_rings
;
689 hw_resc
->max_rx_rings
-= vf_rx_rings
* num_vfs
;
690 hw_resc
->max_hw_ring_grps
-= vf_ring_grps
* num_vfs
;
691 hw_resc
->max_cp_rings
-= vf_cp_rings
* num_vfs
;
692 hw_resc
->max_rsscos_ctxs
-= num_vfs
;
693 hw_resc
->max_stat_ctxs
-= vf_stat_ctx
* num_vfs
;
694 hw_resc
->max_vnics
-= vf_vnics
* num_vfs
;
700 static int bnxt_func_cfg(struct bnxt
*bp
, int num_vfs
, bool reset
)
703 return bnxt_hwrm_func_vf_resc_cfg(bp
, num_vfs
, reset
);
705 return bnxt_hwrm_func_cfg(bp
, num_vfs
);
708 int bnxt_cfg_hw_sriov(struct bnxt
*bp
, int *num_vfs
, bool reset
)
712 /* Register buffers for VFs */
713 rc
= bnxt_hwrm_func_buf_rgtr(bp
);
717 /* Reserve resources for VFs */
718 rc
= bnxt_func_cfg(bp
, *num_vfs
, reset
);
719 if (rc
!= *num_vfs
) {
721 netdev_warn(bp
->dev
, "Unable to reserve resources for SRIOV.\n");
725 netdev_warn(bp
->dev
, "Only able to reserve resources for %d VFs.\n",
730 bnxt_ulp_sriov_cfg(bp
, *num_vfs
);
734 static int bnxt_sriov_enable(struct bnxt
*bp
, int *num_vfs
)
736 int rc
= 0, vfs_supported
;
737 int min_rx_rings
, min_tx_rings
, min_rss_ctxs
;
738 struct bnxt_hw_resc
*hw_resc
= &bp
->hw_resc
;
739 int tx_ok
= 0, rx_ok
= 0, rss_ok
= 0;
740 int avail_cp
, avail_stat
;
742 /* Check if we can enable requested num of vf's. At a mininum
743 * we require 1 RX 1 TX rings for each VF. In this minimum conf
744 * features like TPA will not be available.
746 vfs_supported
= *num_vfs
;
748 avail_cp
= bnxt_get_avail_cp_rings_for_en(bp
);
749 avail_stat
= bnxt_get_avail_stat_ctxs_for_en(bp
);
750 avail_cp
= min_t(int, avail_cp
, avail_stat
);
752 while (vfs_supported
) {
753 min_rx_rings
= vfs_supported
;
754 min_tx_rings
= vfs_supported
;
755 min_rss_ctxs
= vfs_supported
;
757 if (bp
->flags
& BNXT_FLAG_AGG_RINGS
) {
758 if (hw_resc
->max_rx_rings
- bp
->rx_nr_rings
* 2 >=
762 if (hw_resc
->max_rx_rings
- bp
->rx_nr_rings
>=
766 if (hw_resc
->max_vnics
- bp
->nr_vnics
< min_rx_rings
||
767 avail_cp
< min_rx_rings
)
770 if (hw_resc
->max_tx_rings
- bp
->tx_nr_rings
>= min_tx_rings
&&
771 avail_cp
>= min_tx_rings
)
774 if (hw_resc
->max_rsscos_ctxs
- bp
->rsscos_nr_ctxs
>=
778 if (tx_ok
&& rx_ok
&& rss_ok
)
784 if (!vfs_supported
) {
785 netdev_err(bp
->dev
, "Cannot enable VF's as all resources are used by PF\n");
789 if (vfs_supported
!= *num_vfs
) {
790 netdev_info(bp
->dev
, "Requested VFs %d, can enable %d\n",
791 *num_vfs
, vfs_supported
);
792 *num_vfs
= vfs_supported
;
795 rc
= bnxt_alloc_vf_resources(bp
, *num_vfs
);
799 rc
= bnxt_cfg_hw_sriov(bp
, num_vfs
, false);
803 rc
= pci_enable_sriov(bp
->pdev
, *num_vfs
);
810 /* Free the resources reserved for various VF's */
811 bnxt_hwrm_func_vf_resource_free(bp
, *num_vfs
);
814 bnxt_free_vf_resources(bp
);
819 void bnxt_sriov_disable(struct bnxt
*bp
)
821 u16 num_vfs
= pci_num_vf(bp
->pdev
);
826 /* synchronize VF and VF-rep create and destroy */
827 mutex_lock(&bp
->sriov_lock
);
828 bnxt_vf_reps_destroy(bp
);
830 if (pci_vfs_assigned(bp
->pdev
)) {
831 bnxt_hwrm_fwd_async_event_cmpl(
832 bp
, NULL
, ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD
);
833 netdev_warn(bp
->dev
, "Unable to free %d VFs because some are assigned to VMs.\n",
836 pci_disable_sriov(bp
->pdev
);
837 /* Free the HW resources reserved for various VF's */
838 bnxt_hwrm_func_vf_resource_free(bp
, num_vfs
);
840 mutex_unlock(&bp
->sriov_lock
);
842 bnxt_free_vf_resources(bp
);
844 bp
->pf
.active_vfs
= 0;
845 /* Reclaim all resources for the PF. */
847 bnxt_restore_pf_fw_resources(bp
);
850 bnxt_ulp_sriov_cfg(bp
, 0);
853 int bnxt_sriov_configure(struct pci_dev
*pdev
, int num_vfs
)
855 struct net_device
*dev
= pci_get_drvdata(pdev
);
856 struct bnxt
*bp
= netdev_priv(dev
);
858 if (!(bp
->flags
& BNXT_FLAG_USING_MSIX
)) {
859 netdev_warn(dev
, "Not allow SRIOV if the irq mode is not MSIX\n");
864 if (!netif_running(dev
)) {
865 netdev_warn(dev
, "Reject SRIOV config request since if is down!\n");
869 if (test_bit(BNXT_STATE_IN_FW_RESET
, &bp
->state
)) {
870 netdev_warn(dev
, "Reject SRIOV config request when FW reset is in progress\n");
874 bp
->sriov_cfg
= true;
877 if (pci_vfs_assigned(bp
->pdev
)) {
878 netdev_warn(dev
, "Unable to configure SRIOV since some VFs are assigned to VMs.\n");
883 /* Check if enabled VFs is same as requested */
884 if (num_vfs
&& num_vfs
== bp
->pf
.active_vfs
)
887 /* if there are previous existing VFs, clean them up */
888 bnxt_sriov_disable(bp
);
892 bnxt_sriov_enable(bp
, &num_vfs
);
895 bp
->sriov_cfg
= false;
896 wake_up(&bp
->sriov_cfg_wait
);
901 static int bnxt_hwrm_fwd_resp(struct bnxt
*bp
, struct bnxt_vf_info
*vf
,
902 void *encap_resp
, __le64 encap_resp_addr
,
903 __le16 encap_resp_cpr
, u32 msg_size
)
906 struct hwrm_fwd_resp_input req
= {0};
908 if (BNXT_FWD_RESP_SIZE_ERR(msg_size
))
911 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_FWD_RESP
, -1, -1);
913 /* Set the new target id */
914 req
.target_id
= cpu_to_le16(vf
->fw_fid
);
915 req
.encap_resp_target_id
= cpu_to_le16(vf
->fw_fid
);
916 req
.encap_resp_len
= cpu_to_le16(msg_size
);
917 req
.encap_resp_addr
= encap_resp_addr
;
918 req
.encap_resp_cmpl_ring
= encap_resp_cpr
;
919 memcpy(req
.encap_resp
, encap_resp
, msg_size
);
921 rc
= hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
923 netdev_err(bp
->dev
, "hwrm_fwd_resp failed. rc:%d\n", rc
);
927 static int bnxt_hwrm_fwd_err_resp(struct bnxt
*bp
, struct bnxt_vf_info
*vf
,
931 struct hwrm_reject_fwd_resp_input req
= {0};
933 if (BNXT_REJ_FWD_RESP_SIZE_ERR(msg_size
))
936 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_REJECT_FWD_RESP
, -1, -1);
937 /* Set the new target id */
938 req
.target_id
= cpu_to_le16(vf
->fw_fid
);
939 req
.encap_resp_target_id
= cpu_to_le16(vf
->fw_fid
);
940 memcpy(req
.encap_request
, vf
->hwrm_cmd_req_addr
, msg_size
);
942 rc
= hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
944 netdev_err(bp
->dev
, "hwrm_fwd_err_resp failed. rc:%d\n", rc
);
948 static int bnxt_hwrm_exec_fwd_resp(struct bnxt
*bp
, struct bnxt_vf_info
*vf
,
952 struct hwrm_exec_fwd_resp_input req
= {0};
954 if (BNXT_EXEC_FWD_RESP_SIZE_ERR(msg_size
))
957 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_EXEC_FWD_RESP
, -1, -1);
958 /* Set the new target id */
959 req
.target_id
= cpu_to_le16(vf
->fw_fid
);
960 req
.encap_resp_target_id
= cpu_to_le16(vf
->fw_fid
);
961 memcpy(req
.encap_request
, vf
->hwrm_cmd_req_addr
, msg_size
);
963 rc
= hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
965 netdev_err(bp
->dev
, "hwrm_exec_fw_resp failed. rc:%d\n", rc
);
969 static int bnxt_vf_configure_mac(struct bnxt
*bp
, struct bnxt_vf_info
*vf
)
971 u32 msg_size
= sizeof(struct hwrm_func_vf_cfg_input
);
972 struct hwrm_func_vf_cfg_input
*req
=
973 (struct hwrm_func_vf_cfg_input
*)vf
->hwrm_cmd_req_addr
;
975 /* Allow VF to set a valid MAC address, if trust is set to on or
976 * if the PF assigned MAC address is zero
978 if (req
->enables
& cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_DFLT_MAC_ADDR
)) {
979 bool trust
= bnxt_is_trusted_vf(bp
, vf
);
981 if (is_valid_ether_addr(req
->dflt_mac_addr
) &&
982 (trust
|| !is_valid_ether_addr(vf
->mac_addr
) ||
983 ether_addr_equal(req
->dflt_mac_addr
, vf
->mac_addr
))) {
984 ether_addr_copy(vf
->vf_mac_addr
, req
->dflt_mac_addr
);
985 return bnxt_hwrm_exec_fwd_resp(bp
, vf
, msg_size
);
987 return bnxt_hwrm_fwd_err_resp(bp
, vf
, msg_size
);
989 return bnxt_hwrm_exec_fwd_resp(bp
, vf
, msg_size
);
992 static int bnxt_vf_validate_set_mac(struct bnxt
*bp
, struct bnxt_vf_info
*vf
)
994 u32 msg_size
= sizeof(struct hwrm_cfa_l2_filter_alloc_input
);
995 struct hwrm_cfa_l2_filter_alloc_input
*req
=
996 (struct hwrm_cfa_l2_filter_alloc_input
*)vf
->hwrm_cmd_req_addr
;
999 if (!is_valid_ether_addr((const u8
*)req
->l2_addr
))
1000 return bnxt_hwrm_fwd_err_resp(bp
, vf
, msg_size
);
1002 /* Allow VF to set a valid MAC address, if trust is set to on.
1003 * Or VF MAC address must first match MAC address in PF's context.
1004 * Otherwise, it must match the VF MAC address if firmware spec >=
1007 if (bnxt_is_trusted_vf(bp
, vf
)) {
1009 } else if (is_valid_ether_addr(vf
->mac_addr
)) {
1010 if (ether_addr_equal((const u8
*)req
->l2_addr
, vf
->mac_addr
))
1012 } else if (is_valid_ether_addr(vf
->vf_mac_addr
)) {
1013 if (ether_addr_equal((const u8
*)req
->l2_addr
, vf
->vf_mac_addr
))
1016 /* There are two cases:
1017 * 1.If firmware spec < 0x10202,VF MAC address is not forwarded
1018 * to the PF and so it doesn't have to match
1019 * 2.Allow VF to modify it's own MAC when PF has not assigned a
1020 * valid MAC address and firmware spec >= 0x10202
1025 return bnxt_hwrm_exec_fwd_resp(bp
, vf
, msg_size
);
1026 return bnxt_hwrm_fwd_err_resp(bp
, vf
, msg_size
);
1029 static int bnxt_vf_set_link(struct bnxt
*bp
, struct bnxt_vf_info
*vf
)
1033 if (!(vf
->flags
& BNXT_VF_LINK_FORCED
)) {
1035 rc
= bnxt_hwrm_exec_fwd_resp(
1036 bp
, vf
, sizeof(struct hwrm_port_phy_qcfg_input
));
1038 struct hwrm_port_phy_qcfg_output phy_qcfg_resp
;
1039 struct hwrm_port_phy_qcfg_input
*phy_qcfg_req
;
1042 (struct hwrm_port_phy_qcfg_input
*)vf
->hwrm_cmd_req_addr
;
1043 mutex_lock(&bp
->hwrm_cmd_lock
);
1044 memcpy(&phy_qcfg_resp
, &bp
->link_info
.phy_qcfg_resp
,
1045 sizeof(phy_qcfg_resp
));
1046 mutex_unlock(&bp
->hwrm_cmd_lock
);
1047 phy_qcfg_resp
.resp_len
= cpu_to_le16(sizeof(phy_qcfg_resp
));
1048 phy_qcfg_resp
.seq_id
= phy_qcfg_req
->seq_id
;
1049 phy_qcfg_resp
.valid
= 1;
1051 if (vf
->flags
& BNXT_VF_LINK_UP
) {
1052 /* if physical link is down, force link up on VF */
1053 if (phy_qcfg_resp
.link
!=
1054 PORT_PHY_QCFG_RESP_LINK_LINK
) {
1055 phy_qcfg_resp
.link
=
1056 PORT_PHY_QCFG_RESP_LINK_LINK
;
1057 phy_qcfg_resp
.link_speed
= cpu_to_le16(
1058 PORT_PHY_QCFG_RESP_LINK_SPEED_10GB
);
1059 phy_qcfg_resp
.duplex_cfg
=
1060 PORT_PHY_QCFG_RESP_DUPLEX_CFG_FULL
;
1061 phy_qcfg_resp
.duplex_state
=
1062 PORT_PHY_QCFG_RESP_DUPLEX_STATE_FULL
;
1063 phy_qcfg_resp
.pause
=
1064 (PORT_PHY_QCFG_RESP_PAUSE_TX
|
1065 PORT_PHY_QCFG_RESP_PAUSE_RX
);
1068 /* force link down */
1069 phy_qcfg_resp
.link
= PORT_PHY_QCFG_RESP_LINK_NO_LINK
;
1070 phy_qcfg_resp
.link_speed
= 0;
1071 phy_qcfg_resp
.duplex_state
=
1072 PORT_PHY_QCFG_RESP_DUPLEX_STATE_HALF
;
1073 phy_qcfg_resp
.pause
= 0;
1075 rc
= bnxt_hwrm_fwd_resp(bp
, vf
, &phy_qcfg_resp
,
1076 phy_qcfg_req
->resp_addr
,
1077 phy_qcfg_req
->cmpl_ring
,
1078 sizeof(phy_qcfg_resp
));
1083 static int bnxt_vf_req_validate_snd(struct bnxt
*bp
, struct bnxt_vf_info
*vf
)
1086 struct input
*encap_req
= vf
->hwrm_cmd_req_addr
;
1087 u32 req_type
= le16_to_cpu(encap_req
->req_type
);
1090 case HWRM_FUNC_VF_CFG
:
1091 rc
= bnxt_vf_configure_mac(bp
, vf
);
1093 case HWRM_CFA_L2_FILTER_ALLOC
:
1094 rc
= bnxt_vf_validate_set_mac(bp
, vf
);
1097 /* TODO Validate if VF is allowed to change mac address,
1098 * mtu, num of rings etc
1100 rc
= bnxt_hwrm_exec_fwd_resp(
1101 bp
, vf
, sizeof(struct hwrm_func_cfg_input
));
1103 case HWRM_PORT_PHY_QCFG
:
1104 rc
= bnxt_vf_set_link(bp
, vf
);
1112 void bnxt_hwrm_exec_fwd_req(struct bnxt
*bp
)
1114 u32 i
= 0, active_vfs
= bp
->pf
.active_vfs
, vf_id
;
1116 /* Scan through VF's and process commands */
1118 vf_id
= find_next_bit(bp
->pf
.vf_event_bmap
, active_vfs
, i
);
1119 if (vf_id
>= active_vfs
)
1122 clear_bit(vf_id
, bp
->pf
.vf_event_bmap
);
1123 bnxt_vf_req_validate_snd(bp
, &bp
->pf
.vf
[vf_id
]);
1128 void bnxt_update_vf_mac(struct bnxt
*bp
)
1130 struct hwrm_func_qcaps_input req
= {0};
1131 struct hwrm_func_qcaps_output
*resp
= bp
->hwrm_cmd_resp_addr
;
1133 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_FUNC_QCAPS
, -1, -1);
1134 req
.fid
= cpu_to_le16(0xffff);
1136 mutex_lock(&bp
->hwrm_cmd_lock
);
1137 if (_hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
))
1138 goto update_vf_mac_exit
;
1140 /* Store MAC address from the firmware. There are 2 cases:
1141 * 1. MAC address is valid. It is assigned from the PF and we
1142 * need to override the current VF MAC address with it.
1143 * 2. MAC address is zero. The VF will use a random MAC address by
1144 * default but the stored zero MAC will allow the VF user to change
1145 * the random MAC address using ndo_set_mac_address() if he wants.
1147 if (!ether_addr_equal(resp
->mac_address
, bp
->vf
.mac_addr
))
1148 memcpy(bp
->vf
.mac_addr
, resp
->mac_address
, ETH_ALEN
);
1150 /* overwrite netdev dev_addr with admin VF MAC */
1151 if (is_valid_ether_addr(bp
->vf
.mac_addr
))
1152 memcpy(bp
->dev
->dev_addr
, bp
->vf
.mac_addr
, ETH_ALEN
);
1154 mutex_unlock(&bp
->hwrm_cmd_lock
);
1157 int bnxt_approve_mac(struct bnxt
*bp
, u8
*mac
, bool strict
)
1159 struct hwrm_func_vf_cfg_input req
= {0};
1165 if (bp
->hwrm_spec_code
< 0x10202) {
1166 if (is_valid_ether_addr(bp
->vf
.mac_addr
))
1167 rc
= -EADDRNOTAVAIL
;
1170 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_FUNC_VF_CFG
, -1, -1);
1171 req
.enables
= cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_DFLT_MAC_ADDR
);
1172 memcpy(req
.dflt_mac_addr
, mac
, ETH_ALEN
);
1173 rc
= hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
1176 rc
= -EADDRNOTAVAIL
;
1177 netdev_warn(bp
->dev
, "VF MAC address %pM not approved by the PF\n",
1185 int bnxt_cfg_hw_sriov(struct bnxt
*bp
, int *num_vfs
, bool reset
)
1192 void bnxt_sriov_disable(struct bnxt
*bp
)
1196 void bnxt_hwrm_exec_fwd_req(struct bnxt
*bp
)
1198 netdev_err(bp
->dev
, "Invalid VF message received when SRIOV is not enable\n");
1201 void bnxt_update_vf_mac(struct bnxt
*bp
)
1205 int bnxt_approve_mac(struct bnxt
*bp
, u8
*mac
, bool strict
)