]> git.ipfire.org Git - thirdparty/kernel/stable.git/blame - drivers/net/ethernet/broadcom/bnxt/bnxt.c
Merge tag 'kvm-x86-docs-6.7' of https://github.com/kvm-x86/linux into HEAD
[thirdparty/kernel/stable.git] / drivers / net / ethernet / broadcom / bnxt / bnxt.c
CommitLineData
c0c050c5
MC
1/* Broadcom NetXtreme-C/E network driver.
2 *
11f15ed3 3 * Copyright (c) 2014-2016 Broadcom Corporation
c6cc32a2 4 * Copyright (c) 2016-2019 Broadcom Limited
c0c050c5
MC
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation.
9 */
10
11#include <linux/module.h>
12
13#include <linux/stringify.h>
14#include <linux/kernel.h>
15#include <linux/timer.h>
16#include <linux/errno.h>
17#include <linux/ioport.h>
18#include <linux/slab.h>
19#include <linux/vmalloc.h>
20#include <linux/interrupt.h>
21#include <linux/pci.h>
22#include <linux/netdevice.h>
23#include <linux/etherdevice.h>
24#include <linux/skbuff.h>
25#include <linux/dma-mapping.h>
26#include <linux/bitops.h>
27#include <linux/io.h>
28#include <linux/irq.h>
29#include <linux/delay.h>
30#include <asm/byteorder.h>
31#include <asm/page.h>
32#include <linux/time.h>
33#include <linux/mii.h>
0ca12be9 34#include <linux/mdio.h>
c0c050c5
MC
35#include <linux/if.h>
36#include <linux/if_vlan.h>
32e8239c 37#include <linux/if_bridge.h>
5ac67d8b 38#include <linux/rtc.h>
c6d30e83 39#include <linux/bpf.h>
4721031c 40#include <net/gro.h>
c0c050c5
MC
41#include <net/ip.h>
42#include <net/tcp.h>
43#include <net/udp.h>
44#include <net/checksum.h>
45#include <net/ip6_checksum.h>
ad51b8e9 46#include <net/udp_tunnel.h>
c0c050c5
MC
47#include <linux/workqueue.h>
48#include <linux/prefetch.h>
49#include <linux/cache.h>
50#include <linux/log2.h>
c0c050c5
MC
51#include <linux/bitmap.h>
52#include <linux/cpu_rmap.h>
56f0fd80 53#include <linux/cpumask.h>
2ae7408f 54#include <net/pkt_cls.h>
cde49a42
VV
55#include <linux/hwmon.h>
56#include <linux/hwmon-sysfs.h>
a9ca9f9c 57#include <net/page_pool/helpers.h>
32861236 58#include <linux/align.h>
08a09678 59#include <net/netdev_queues.h>
c0c050c5
MC
60
61#include "bnxt_hsi.h"
62#include "bnxt.h"
3c8c20db 63#include "bnxt_hwrm.h"
a588e458 64#include "bnxt_ulp.h"
c0c050c5
MC
65#include "bnxt_sriov.h"
66#include "bnxt_ethtool.h"
7df4ae9f 67#include "bnxt_dcb.h"
c6d30e83 68#include "bnxt_xdp.h"
ae5c42f0 69#include "bnxt_ptp.h"
4ab0c6a8 70#include "bnxt_vfr.h"
2ae7408f 71#include "bnxt_tc.h"
3c467bf3 72#include "bnxt_devlink.h"
cabfb09d 73#include "bnxt_debugfs.h"
c0c050c5
MC
74
75#define BNXT_TX_TIMEOUT (5 * HZ)
e8d8c5d8
JK
76#define BNXT_DEF_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_HW | \
77 NETIF_MSG_TX_ERR)
c0c050c5 78
c0c050c5
MC
79MODULE_LICENSE("GPL");
80MODULE_DESCRIPTION("Broadcom BCM573xx network driver");
c0c050c5
MC
81
82#define BNXT_RX_OFFSET (NET_SKB_PAD + NET_IP_ALIGN)
83#define BNXT_RX_DMA_OFFSET NET_SKB_PAD
84#define BNXT_RX_COPY_THRESH 256
85
4419dbe6 86#define BNXT_TX_PUSH_THRESH 164
c0c050c5 87
c7dd4a5b 88/* indexed by enum board_idx */
c0c050c5
MC
89static const struct {
90 char *name;
91} board_info[] = {
27573a7d
SB
92 [BCM57301] = { "Broadcom BCM57301 NetXtreme-C 10Gb Ethernet" },
93 [BCM57302] = { "Broadcom BCM57302 NetXtreme-C 10Gb/25Gb Ethernet" },
94 [BCM57304] = { "Broadcom BCM57304 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
95 [BCM57417_NPAR] = { "Broadcom BCM57417 NetXtreme-E Ethernet Partition" },
96 [BCM58700] = { "Broadcom BCM58700 Nitro 1Gb/2.5Gb/10Gb Ethernet" },
97 [BCM57311] = { "Broadcom BCM57311 NetXtreme-C 10Gb Ethernet" },
98 [BCM57312] = { "Broadcom BCM57312 NetXtreme-C 10Gb/25Gb Ethernet" },
99 [BCM57402] = { "Broadcom BCM57402 NetXtreme-E 10Gb Ethernet" },
100 [BCM57404] = { "Broadcom BCM57404 NetXtreme-E 10Gb/25Gb Ethernet" },
101 [BCM57406] = { "Broadcom BCM57406 NetXtreme-E 10GBase-T Ethernet" },
102 [BCM57402_NPAR] = { "Broadcom BCM57402 NetXtreme-E Ethernet Partition" },
103 [BCM57407] = { "Broadcom BCM57407 NetXtreme-E 10GBase-T Ethernet" },
104 [BCM57412] = { "Broadcom BCM57412 NetXtreme-E 10Gb Ethernet" },
105 [BCM57414] = { "Broadcom BCM57414 NetXtreme-E 10Gb/25Gb Ethernet" },
106 [BCM57416] = { "Broadcom BCM57416 NetXtreme-E 10GBase-T Ethernet" },
107 [BCM57417] = { "Broadcom BCM57417 NetXtreme-E 10GBase-T Ethernet" },
108 [BCM57412_NPAR] = { "Broadcom BCM57412 NetXtreme-E Ethernet Partition" },
109 [BCM57314] = { "Broadcom BCM57314 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
110 [BCM57417_SFP] = { "Broadcom BCM57417 NetXtreme-E 10Gb/25Gb Ethernet" },
111 [BCM57416_SFP] = { "Broadcom BCM57416 NetXtreme-E 10Gb Ethernet" },
112 [BCM57404_NPAR] = { "Broadcom BCM57404 NetXtreme-E Ethernet Partition" },
113 [BCM57406_NPAR] = { "Broadcom BCM57406 NetXtreme-E Ethernet Partition" },
114 [BCM57407_SFP] = { "Broadcom BCM57407 NetXtreme-E 25Gb Ethernet" },
115 [BCM57407_NPAR] = { "Broadcom BCM57407 NetXtreme-E Ethernet Partition" },
116 [BCM57414_NPAR] = { "Broadcom BCM57414 NetXtreme-E Ethernet Partition" },
117 [BCM57416_NPAR] = { "Broadcom BCM57416 NetXtreme-E Ethernet Partition" },
118 [BCM57452] = { "Broadcom BCM57452 NetXtreme-E 10Gb/25Gb/40Gb/50Gb Ethernet" },
119 [BCM57454] = { "Broadcom BCM57454 NetXtreme-E 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
92abef36 120 [BCM5745x_NPAR] = { "Broadcom BCM5745x NetXtreme-E Ethernet Partition" },
1ab968d2 121 [BCM57508] = { "Broadcom BCM57508 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" },
c6cc32a2 122 [BCM57504] = { "Broadcom BCM57504 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" },
51fec80d 123 [BCM57502] = { "Broadcom BCM57502 NetXtreme-E 10Gb/25Gb/50Gb Ethernet" },
49c98421
MC
124 [BCM57508_NPAR] = { "Broadcom BCM57508 NetXtreme-E Ethernet Partition" },
125 [BCM57504_NPAR] = { "Broadcom BCM57504 NetXtreme-E Ethernet Partition" },
126 [BCM57502_NPAR] = { "Broadcom BCM57502 NetXtreme-E Ethernet Partition" },
27573a7d 127 [BCM58802] = { "Broadcom BCM58802 NetXtreme-S 10Gb/25Gb/40Gb/50Gb Ethernet" },
8ed693b7 128 [BCM58804] = { "Broadcom BCM58804 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
27573a7d
SB
129 [BCM58808] = { "Broadcom BCM58808 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
130 [NETXTREME_E_VF] = { "Broadcom NetXtreme-E Ethernet Virtual Function" },
131 [NETXTREME_C_VF] = { "Broadcom NetXtreme-C Ethernet Virtual Function" },
618784e3 132 [NETXTREME_S_VF] = { "Broadcom NetXtreme-S Ethernet Virtual Function" },
7fbf359b
MC
133 [NETXTREME_C_VF_HV] = { "Broadcom NetXtreme-C Virtual Function for Hyper-V" },
134 [NETXTREME_E_VF_HV] = { "Broadcom NetXtreme-E Virtual Function for Hyper-V" },
b16b6891 135 [NETXTREME_E_P5_VF] = { "Broadcom BCM5750X NetXtreme-E Ethernet Virtual Function" },
7fbf359b 136 [NETXTREME_E_P5_VF_HV] = { "Broadcom BCM5750X NetXtreme-E Virtual Function for Hyper-V" },
c0c050c5
MC
137};
138
139static const struct pci_device_id bnxt_pci_tbl[] = {
92abef36
VV
140 { PCI_VDEVICE(BROADCOM, 0x1604), .driver_data = BCM5745x_NPAR },
141 { PCI_VDEVICE(BROADCOM, 0x1605), .driver_data = BCM5745x_NPAR },
4a58139b 142 { PCI_VDEVICE(BROADCOM, 0x1614), .driver_data = BCM57454 },
adbc8305 143 { PCI_VDEVICE(BROADCOM, 0x16c0), .driver_data = BCM57417_NPAR },
fbc9a523 144 { PCI_VDEVICE(BROADCOM, 0x16c8), .driver_data = BCM57301 },
c0c050c5
MC
145 { PCI_VDEVICE(BROADCOM, 0x16c9), .driver_data = BCM57302 },
146 { PCI_VDEVICE(BROADCOM, 0x16ca), .driver_data = BCM57304 },
1f681688 147 { PCI_VDEVICE(BROADCOM, 0x16cc), .driver_data = BCM57417_NPAR },
fa853dda 148 { PCI_VDEVICE(BROADCOM, 0x16cd), .driver_data = BCM58700 },
b24eb6ae
MC
149 { PCI_VDEVICE(BROADCOM, 0x16ce), .driver_data = BCM57311 },
150 { PCI_VDEVICE(BROADCOM, 0x16cf), .driver_data = BCM57312 },
fbc9a523 151 { PCI_VDEVICE(BROADCOM, 0x16d0), .driver_data = BCM57402 },
c0c050c5
MC
152 { PCI_VDEVICE(BROADCOM, 0x16d1), .driver_data = BCM57404 },
153 { PCI_VDEVICE(BROADCOM, 0x16d2), .driver_data = BCM57406 },
1f681688
MC
154 { PCI_VDEVICE(BROADCOM, 0x16d4), .driver_data = BCM57402_NPAR },
155 { PCI_VDEVICE(BROADCOM, 0x16d5), .driver_data = BCM57407 },
b24eb6ae
MC
156 { PCI_VDEVICE(BROADCOM, 0x16d6), .driver_data = BCM57412 },
157 { PCI_VDEVICE(BROADCOM, 0x16d7), .driver_data = BCM57414 },
158 { PCI_VDEVICE(BROADCOM, 0x16d8), .driver_data = BCM57416 },
159 { PCI_VDEVICE(BROADCOM, 0x16d9), .driver_data = BCM57417 },
1f681688 160 { PCI_VDEVICE(BROADCOM, 0x16de), .driver_data = BCM57412_NPAR },
5049e33b 161 { PCI_VDEVICE(BROADCOM, 0x16df), .driver_data = BCM57314 },
1f681688
MC
162 { PCI_VDEVICE(BROADCOM, 0x16e2), .driver_data = BCM57417_SFP },
163 { PCI_VDEVICE(BROADCOM, 0x16e3), .driver_data = BCM57416_SFP },
164 { PCI_VDEVICE(BROADCOM, 0x16e7), .driver_data = BCM57404_NPAR },
165 { PCI_VDEVICE(BROADCOM, 0x16e8), .driver_data = BCM57406_NPAR },
166 { PCI_VDEVICE(BROADCOM, 0x16e9), .driver_data = BCM57407_SFP },
adbc8305
MC
167 { PCI_VDEVICE(BROADCOM, 0x16ea), .driver_data = BCM57407_NPAR },
168 { PCI_VDEVICE(BROADCOM, 0x16eb), .driver_data = BCM57412_NPAR },
1f681688 169 { PCI_VDEVICE(BROADCOM, 0x16ec), .driver_data = BCM57414_NPAR },
adbc8305 170 { PCI_VDEVICE(BROADCOM, 0x16ed), .driver_data = BCM57414_NPAR },
1f681688 171 { PCI_VDEVICE(BROADCOM, 0x16ee), .driver_data = BCM57416_NPAR },
adbc8305 172 { PCI_VDEVICE(BROADCOM, 0x16ef), .driver_data = BCM57416_NPAR },
4a58139b 173 { PCI_VDEVICE(BROADCOM, 0x16f0), .driver_data = BCM58808 },
32b40798 174 { PCI_VDEVICE(BROADCOM, 0x16f1), .driver_data = BCM57452 },
1ab968d2 175 { PCI_VDEVICE(BROADCOM, 0x1750), .driver_data = BCM57508 },
c6cc32a2 176 { PCI_VDEVICE(BROADCOM, 0x1751), .driver_data = BCM57504 },
51fec80d 177 { PCI_VDEVICE(BROADCOM, 0x1752), .driver_data = BCM57502 },
62aad36e 178 { PCI_VDEVICE(BROADCOM, 0x1800), .driver_data = BCM57502_NPAR },
49c98421 179 { PCI_VDEVICE(BROADCOM, 0x1801), .driver_data = BCM57504_NPAR },
62aad36e
KA
180 { PCI_VDEVICE(BROADCOM, 0x1802), .driver_data = BCM57508_NPAR },
181 { PCI_VDEVICE(BROADCOM, 0x1803), .driver_data = BCM57502_NPAR },
49c98421 182 { PCI_VDEVICE(BROADCOM, 0x1804), .driver_data = BCM57504_NPAR },
62aad36e 183 { PCI_VDEVICE(BROADCOM, 0x1805), .driver_data = BCM57508_NPAR },
4a58139b 184 { PCI_VDEVICE(BROADCOM, 0xd802), .driver_data = BCM58802 },
8ed693b7 185 { PCI_VDEVICE(BROADCOM, 0xd804), .driver_data = BCM58804 },
c0c050c5 186#ifdef CONFIG_BNXT_SRIOV
c7ef35eb 187 { PCI_VDEVICE(BROADCOM, 0x1606), .driver_data = NETXTREME_E_VF },
7fbf359b
MC
188 { PCI_VDEVICE(BROADCOM, 0x1607), .driver_data = NETXTREME_E_VF_HV },
189 { PCI_VDEVICE(BROADCOM, 0x1608), .driver_data = NETXTREME_E_VF_HV },
c7ef35eb 190 { PCI_VDEVICE(BROADCOM, 0x1609), .driver_data = NETXTREME_E_VF },
7fbf359b 191 { PCI_VDEVICE(BROADCOM, 0x16bd), .driver_data = NETXTREME_E_VF_HV },
adbc8305 192 { PCI_VDEVICE(BROADCOM, 0x16c1), .driver_data = NETXTREME_E_VF },
7fbf359b
MC
193 { PCI_VDEVICE(BROADCOM, 0x16c2), .driver_data = NETXTREME_C_VF_HV },
194 { PCI_VDEVICE(BROADCOM, 0x16c3), .driver_data = NETXTREME_C_VF_HV },
195 { PCI_VDEVICE(BROADCOM, 0x16c4), .driver_data = NETXTREME_E_VF_HV },
196 { PCI_VDEVICE(BROADCOM, 0x16c5), .driver_data = NETXTREME_E_VF_HV },
adbc8305
MC
197 { PCI_VDEVICE(BROADCOM, 0x16cb), .driver_data = NETXTREME_C_VF },
198 { PCI_VDEVICE(BROADCOM, 0x16d3), .driver_data = NETXTREME_E_VF },
199 { PCI_VDEVICE(BROADCOM, 0x16dc), .driver_data = NETXTREME_E_VF },
200 { PCI_VDEVICE(BROADCOM, 0x16e1), .driver_data = NETXTREME_C_VF },
201 { PCI_VDEVICE(BROADCOM, 0x16e5), .driver_data = NETXTREME_C_VF },
7fbf359b 202 { PCI_VDEVICE(BROADCOM, 0x16e6), .driver_data = NETXTREME_C_VF_HV },
51fec80d 203 { PCI_VDEVICE(BROADCOM, 0x1806), .driver_data = NETXTREME_E_P5_VF },
b16b6891 204 { PCI_VDEVICE(BROADCOM, 0x1807), .driver_data = NETXTREME_E_P5_VF },
7fbf359b
MC
205 { PCI_VDEVICE(BROADCOM, 0x1808), .driver_data = NETXTREME_E_P5_VF_HV },
206 { PCI_VDEVICE(BROADCOM, 0x1809), .driver_data = NETXTREME_E_P5_VF_HV },
618784e3 207 { PCI_VDEVICE(BROADCOM, 0xd800), .driver_data = NETXTREME_S_VF },
c0c050c5
MC
208#endif
209 { 0 }
210};
211
212MODULE_DEVICE_TABLE(pci, bnxt_pci_tbl);
213
214static const u16 bnxt_vf_req_snif[] = {
215 HWRM_FUNC_CFG,
91cdda40 216 HWRM_FUNC_VF_CFG,
c0c050c5
MC
217 HWRM_PORT_PHY_QCFG,
218 HWRM_CFA_L2_FILTER_ALLOC,
219};
220
25be8623 221static const u16 bnxt_async_events_arr[] = {
87c374de 222 ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE,
b1613e78 223 ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE,
87c374de
MC
224 ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD,
225 ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED,
226 ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE,
227 ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE,
b1613e78 228 ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE,
2151fe08 229 ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY,
7e914027 230 ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY,
a44daa8f 231 ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION,
68f684e2 232 ASYNC_EVENT_CMPL_EVENT_ID_DEFERRED_RESPONSE,
8d4bd96b 233 ASYNC_EVENT_CMPL_EVENT_ID_RING_MONITOR_MSG,
df97b34d 234 ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST,
099fdeda 235 ASYNC_EVENT_CMPL_EVENT_ID_PPS_TIMESTAMP,
abf90ac2 236 ASYNC_EVENT_CMPL_EVENT_ID_ERROR_REPORT,
8bcf6f04 237 ASYNC_EVENT_CMPL_EVENT_ID_PHC_UPDATE,
25be8623
MC
238};
239
c213eae8
MC
240static struct workqueue_struct *bnxt_pf_wq;
241
c0c050c5
MC
242static bool bnxt_vf_pciid(enum board_idx idx)
243{
618784e3 244 return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF ||
7fbf359b 245 idx == NETXTREME_S_VF || idx == NETXTREME_C_VF_HV ||
ab21494b
AG
246 idx == NETXTREME_E_VF_HV || idx == NETXTREME_E_P5_VF ||
247 idx == NETXTREME_E_P5_VF_HV);
c0c050c5
MC
248}
249
250#define DB_CP_REARM_FLAGS (DB_KEY_CP | DB_IDX_VALID)
251#define DB_CP_FLAGS (DB_KEY_CP | DB_IDX_VALID | DB_IRQ_DIS)
252#define DB_CP_IRQ_DIS_FLAGS (DB_KEY_CP | DB_IRQ_DIS)
253
c0c050c5
MC
254#define BNXT_CP_DB_IRQ_DIS(db) \
255 writel(DB_CP_IRQ_DIS_FLAGS, db)
256
697197e5
MC
257#define BNXT_DB_CQ(db, idx) \
258 writel(DB_CP_FLAGS | RING_CMP(idx), (db)->doorbell)
259
260#define BNXT_DB_NQ_P5(db, idx) \
c6132f6f
MC
261 bnxt_writeq(bp, (db)->db_key64 | DBR_TYPE_NQ | RING_CMP(idx), \
262 (db)->doorbell)
697197e5
MC
263
264#define BNXT_DB_CQ_ARM(db, idx) \
265 writel(DB_CP_REARM_FLAGS | RING_CMP(idx), (db)->doorbell)
266
267#define BNXT_DB_NQ_ARM_P5(db, idx) \
c6132f6f
MC
268 bnxt_writeq(bp, (db)->db_key64 | DBR_TYPE_NQ_ARM | RING_CMP(idx),\
269 (db)->doorbell)
697197e5
MC
270
271static void bnxt_db_nq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
272{
273 if (bp->flags & BNXT_FLAG_CHIP_P5)
274 BNXT_DB_NQ_P5(db, idx);
275 else
276 BNXT_DB_CQ(db, idx);
277}
278
279static void bnxt_db_nq_arm(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
280{
281 if (bp->flags & BNXT_FLAG_CHIP_P5)
282 BNXT_DB_NQ_ARM_P5(db, idx);
283 else
284 BNXT_DB_CQ_ARM(db, idx);
285}
286
287static void bnxt_db_cq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
288{
289 if (bp->flags & BNXT_FLAG_CHIP_P5)
c6132f6f
MC
290 bnxt_writeq(bp, db->db_key64 | DBR_TYPE_CQ_ARMALL |
291 RING_CMP(idx), db->doorbell);
697197e5
MC
292 else
293 BNXT_DB_CQ(db, idx);
294}
295
fea2993a
JK
296static void bnxt_queue_fw_reset_work(struct bnxt *bp, unsigned long delay)
297{
298 if (!(test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)))
299 return;
300
301 if (BNXT_PF(bp))
302 queue_delayed_work(bnxt_pf_wq, &bp->fw_reset_task, delay);
303 else
304 schedule_delayed_work(&bp->fw_reset_task, delay);
305}
306
9b1a00fd 307static void __bnxt_queue_sp_work(struct bnxt *bp)
fea2993a
JK
308{
309 if (BNXT_PF(bp))
310 queue_work(bnxt_pf_wq, &bp->sp_task);
311 else
312 schedule_work(&bp->sp_task);
313}
314
9b1a00fd
JK
315static void bnxt_queue_sp_work(struct bnxt *bp, unsigned int event)
316{
317 set_bit(event, &bp->sp_event);
318 __bnxt_queue_sp_work(bp);
319}
320
fea2993a
JK
321static void bnxt_sched_reset_rxr(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
322{
323 if (!rxr->bnapi->in_reset) {
324 rxr->bnapi->in_reset = true;
325 if (bp->flags & BNXT_FLAG_CHIP_P5)
326 set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
327 else
328 set_bit(BNXT_RST_RING_SP_EVENT, &bp->sp_event);
9b1a00fd 329 __bnxt_queue_sp_work(bp);
fea2993a
JK
330 }
331 rxr->rx_next_cons = 0xffff;
332}
333
2b56b3d9
JK
334void bnxt_sched_reset_txr(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
335 int idx)
336{
337 struct bnxt_napi *bnapi = txr->bnapi;
338
339 if (bnapi->tx_fault)
340 return;
341
342 netdev_err(bp->dev, "Invalid Tx completion (ring:%d tx_pkts:%d cons:%u prod:%u i:%d)",
343 txr->txq_index, bnapi->tx_pkts,
344 txr->tx_cons, txr->tx_prod, idx);
345 WARN_ON_ONCE(1);
346 bnapi->tx_fault = 1;
347 bnxt_queue_sp_work(bp, BNXT_RESET_TASK_SP_EVENT);
348}
349
38413406 350const u16 bnxt_lhint_arr[] = {
c0c050c5
MC
351 TX_BD_FLAGS_LHINT_512_AND_SMALLER,
352 TX_BD_FLAGS_LHINT_512_TO_1023,
353 TX_BD_FLAGS_LHINT_1024_TO_2047,
354 TX_BD_FLAGS_LHINT_1024_TO_2047,
355 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
356 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
357 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
358 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
359 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
360 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
361 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
362 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
363 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
364 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
365 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
366 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
367 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
368 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
369 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
370};
371
ee5c7fb3
SP
372static u16 bnxt_xmit_get_cfa_action(struct sk_buff *skb)
373{
374 struct metadata_dst *md_dst = skb_metadata_dst(skb);
375
376 if (!md_dst || md_dst->type != METADATA_HW_PORT_MUX)
377 return 0;
378
379 return md_dst->u.port_info.port_id;
380}
381
e8d8c5d8
JK
382static void bnxt_txr_db_kick(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
383 u16 prod)
384{
385 bnxt_db_write(bp, &txr->tx_db, prod);
386 txr->kick_pending = 0;
387}
388
c0c050c5
MC
389static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
390{
391 struct bnxt *bp = netdev_priv(dev);
392 struct tx_bd *txbd;
393 struct tx_bd_ext *txbd1;
394 struct netdev_queue *txq;
395 int i;
396 dma_addr_t mapping;
397 unsigned int length, pad = 0;
398 u32 len, free_size, vlan_tag_flags, cfa_action, flags;
399 u16 prod, last_frag;
400 struct pci_dev *pdev = bp->pdev;
c0c050c5
MC
401 struct bnxt_tx_ring_info *txr;
402 struct bnxt_sw_tx_bd *tx_buf;
dade5e15 403 __le32 lflags = 0;
c0c050c5
MC
404
405 i = skb_get_queue_mapping(skb);
406 if (unlikely(i >= bp->tx_nr_rings)) {
407 dev_kfree_skb_any(skb);
625788b5 408 dev_core_stats_tx_dropped_inc(dev);
c0c050c5
MC
409 return NETDEV_TX_OK;
410 }
411
c0c050c5 412 txq = netdev_get_tx_queue(dev, i);
a960dec9 413 txr = &bp->tx_ring[bp->tx_ring_map[i]];
c0c050c5
MC
414 prod = txr->tx_prod;
415
416 free_size = bnxt_tx_avail(bp, txr);
417 if (unlikely(free_size < skb_shinfo(skb)->nr_frags + 2)) {
e8d8c5d8
JK
418 /* We must have raced with NAPI cleanup */
419 if (net_ratelimit() && txr->kick_pending)
420 netif_warn(bp, tx_err, dev,
421 "bnxt: ring busy w/ flush pending!\n");
08a09678
JK
422 if (!netif_txq_try_stop(txq, bnxt_tx_avail(bp, txr),
423 bp->tx_wake_thresh))
3c603136 424 return NETDEV_TX_BUSY;
c0c050c5
MC
425 }
426
b6488b16
CL
427 if (unlikely(ipv6_hopopt_jumbo_remove(skb)))
428 goto tx_free;
429
c0c050c5
MC
430 length = skb->len;
431 len = skb_headlen(skb);
432 last_frag = skb_shinfo(skb)->nr_frags;
433
434 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
435
436 txbd->tx_bd_opaque = prod;
437
438 tx_buf = &txr->tx_buf_ring[prod];
439 tx_buf->skb = skb;
440 tx_buf->nr_frags = last_frag;
441
442 vlan_tag_flags = 0;
ee5c7fb3 443 cfa_action = bnxt_xmit_get_cfa_action(skb);
c0c050c5
MC
444 if (skb_vlan_tag_present(skb)) {
445 vlan_tag_flags = TX_BD_CFA_META_KEY_VLAN |
446 skb_vlan_tag_get(skb);
447 /* Currently supports 8021Q, 8021AD vlan offloads
448 * QINQ1, QINQ2, QINQ3 vlan headers are deprecated
449 */
450 if (skb->vlan_proto == htons(ETH_P_8021Q))
451 vlan_tag_flags |= 1 << TX_BD_CFA_META_TPID_SHIFT;
452 }
453
83bb623c
PC
454 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
455 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
456
457 if (ptp && ptp->tx_tstamp_en && !skb_is_gso(skb) &&
458 atomic_dec_if_positive(&ptp->tx_avail) >= 0) {
9e266807
MC
459 if (!bnxt_ptp_parse(skb, &ptp->tx_seqid,
460 &ptp->tx_hdr_off)) {
461 if (vlan_tag_flags)
462 ptp->tx_hdr_off += VLAN_HLEN;
83bb623c
PC
463 lflags |= cpu_to_le32(TX_BD_FLAGS_STAMP);
464 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
465 } else {
466 atomic_inc(&bp->ptp_cfg->tx_avail);
467 }
468 }
dade5e15
MC
469 }
470
83bb623c
PC
471 if (unlikely(skb->no_fcs))
472 lflags |= cpu_to_le32(TX_BD_FLAGS_NO_CRC);
473
474 if (free_size == bp->tx_ring_size && length <= bp->tx_push_thresh &&
475 !lflags) {
4419dbe6
MC
476 struct tx_push_buffer *tx_push_buf = txr->tx_push;
477 struct tx_push_bd *tx_push = &tx_push_buf->push_bd;
478 struct tx_bd_ext *tx_push1 = &tx_push->txbd2;
697197e5 479 void __iomem *db = txr->tx_db.doorbell;
4419dbe6
MC
480 void *pdata = tx_push_buf->data;
481 u64 *end;
482 int j, push_len;
c0c050c5
MC
483
484 /* Set COAL_NOW to be ready quickly for the next push */
485 tx_push->tx_bd_len_flags_type =
486 cpu_to_le32((length << TX_BD_LEN_SHIFT) |
487 TX_BD_TYPE_LONG_TX_BD |
488 TX_BD_FLAGS_LHINT_512_AND_SMALLER |
489 TX_BD_FLAGS_COAL_NOW |
490 TX_BD_FLAGS_PACKET_END |
491 (2 << TX_BD_FLAGS_BD_CNT_SHIFT));
492
493 if (skb->ip_summed == CHECKSUM_PARTIAL)
494 tx_push1->tx_bd_hsize_lflags =
495 cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
496 else
497 tx_push1->tx_bd_hsize_lflags = 0;
498
499 tx_push1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
ee5c7fb3
SP
500 tx_push1->tx_bd_cfa_action =
501 cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT);
c0c050c5 502
fbb0fa8b
MC
503 end = pdata + length;
504 end = PTR_ALIGN(end, 8) - 1;
4419dbe6
MC
505 *end = 0;
506
c0c050c5
MC
507 skb_copy_from_linear_data(skb, pdata, len);
508 pdata += len;
509 for (j = 0; j < last_frag; j++) {
510 skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
511 void *fptr;
512
513 fptr = skb_frag_address_safe(frag);
514 if (!fptr)
515 goto normal_tx;
516
517 memcpy(pdata, fptr, skb_frag_size(frag));
518 pdata += skb_frag_size(frag);
519 }
520
4419dbe6
MC
521 txbd->tx_bd_len_flags_type = tx_push->tx_bd_len_flags_type;
522 txbd->tx_bd_haddr = txr->data_mapping;
c0c050c5
MC
523 prod = NEXT_TX(prod);
524 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
525 memcpy(txbd, tx_push1, sizeof(*txbd));
526 prod = NEXT_TX(prod);
4419dbe6 527 tx_push->doorbell =
c0c050c5 528 cpu_to_le32(DB_KEY_TX_PUSH | DB_LONG_TX_PUSH | prod);
36647b20 529 WRITE_ONCE(txr->tx_prod, prod);
c0c050c5 530
b9a8460a 531 tx_buf->is_push = 1;
c0c050c5 532 netdev_tx_sent_queue(txq, skb->len);
b9a8460a 533 wmb(); /* Sync is_push and byte queue before pushing data */
c0c050c5 534
4419dbe6
MC
535 push_len = (length + sizeof(*tx_push) + 7) / 8;
536 if (push_len > 16) {
697197e5
MC
537 __iowrite64_copy(db, tx_push_buf, 16);
538 __iowrite32_copy(db + 4, tx_push_buf + 1,
9d13744b 539 (push_len - 16) << 1);
4419dbe6 540 } else {
697197e5 541 __iowrite64_copy(db, tx_push_buf, push_len);
4419dbe6 542 }
c0c050c5 543
c0c050c5
MC
544 goto tx_done;
545 }
546
547normal_tx:
548 if (length < BNXT_MIN_PKT_SIZE) {
549 pad = BNXT_MIN_PKT_SIZE - length;
e8d8c5d8 550 if (skb_pad(skb, pad))
c0c050c5 551 /* SKB already freed. */
e8d8c5d8 552 goto tx_kick_pending;
c0c050c5
MC
553 length = BNXT_MIN_PKT_SIZE;
554 }
555
556 mapping = dma_map_single(&pdev->dev, skb->data, len, DMA_TO_DEVICE);
557
e8d8c5d8
JK
558 if (unlikely(dma_mapping_error(&pdev->dev, mapping)))
559 goto tx_free;
c0c050c5
MC
560
561 dma_unmap_addr_set(tx_buf, mapping, mapping);
562 flags = (len << TX_BD_LEN_SHIFT) | TX_BD_TYPE_LONG_TX_BD |
563 ((last_frag + 2) << TX_BD_FLAGS_BD_CNT_SHIFT);
564
565 txbd->tx_bd_haddr = cpu_to_le64(mapping);
566
567 prod = NEXT_TX(prod);
568 txbd1 = (struct tx_bd_ext *)
569 &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
570
dade5e15 571 txbd1->tx_bd_hsize_lflags = lflags;
c0c050c5
MC
572 if (skb_is_gso(skb)) {
573 u32 hdr_len;
574
575 if (skb->encapsulation)
504148fe 576 hdr_len = skb_inner_tcp_all_headers(skb);
c0c050c5 577 else
504148fe 578 hdr_len = skb_tcp_all_headers(skb);
c0c050c5 579
dade5e15 580 txbd1->tx_bd_hsize_lflags |= cpu_to_le32(TX_BD_FLAGS_LSO |
c0c050c5
MC
581 TX_BD_FLAGS_T_IPID |
582 (hdr_len << (TX_BD_HSIZE_SHIFT - 1)));
583 length = skb_shinfo(skb)->gso_size;
584 txbd1->tx_bd_mss = cpu_to_le32(length);
585 length += hdr_len;
586 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
dade5e15 587 txbd1->tx_bd_hsize_lflags |=
c0c050c5
MC
588 cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
589 txbd1->tx_bd_mss = 0;
590 }
591
592 length >>= 9;
2b3c6885
MC
593 if (unlikely(length >= ARRAY_SIZE(bnxt_lhint_arr))) {
594 dev_warn_ratelimited(&pdev->dev, "Dropped oversize %d bytes TX packet.\n",
595 skb->len);
596 i = 0;
597 goto tx_dma_error;
598 }
c0c050c5
MC
599 flags |= bnxt_lhint_arr[length];
600 txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
601
602 txbd1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
ee5c7fb3
SP
603 txbd1->tx_bd_cfa_action =
604 cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT);
c0c050c5
MC
605 for (i = 0; i < last_frag; i++) {
606 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
607
608 prod = NEXT_TX(prod);
609 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
610
611 len = skb_frag_size(frag);
612 mapping = skb_frag_dma_map(&pdev->dev, frag, 0, len,
613 DMA_TO_DEVICE);
614
615 if (unlikely(dma_mapping_error(&pdev->dev, mapping)))
616 goto tx_dma_error;
617
618 tx_buf = &txr->tx_buf_ring[prod];
619 dma_unmap_addr_set(tx_buf, mapping, mapping);
620
621 txbd->tx_bd_haddr = cpu_to_le64(mapping);
622
623 flags = len << TX_BD_LEN_SHIFT;
624 txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
625 }
626
627 flags &= ~TX_BD_LEN;
628 txbd->tx_bd_len_flags_type =
629 cpu_to_le32(((len + pad) << TX_BD_LEN_SHIFT) | flags |
630 TX_BD_FLAGS_PACKET_END);
631
632 netdev_tx_sent_queue(txq, skb->len);
633
83bb623c
PC
634 skb_tx_timestamp(skb);
635
c0c050c5
MC
636 /* Sync BD data before updating doorbell */
637 wmb();
638
639 prod = NEXT_TX(prod);
36647b20 640 WRITE_ONCE(txr->tx_prod, prod);
c0c050c5 641
6b16f9ee 642 if (!netdev_xmit_more() || netif_xmit_stopped(txq))
e8d8c5d8
JK
643 bnxt_txr_db_kick(bp, txr, prod);
644 else
645 txr->kick_pending = 1;
c0c050c5
MC
646
647tx_done:
648
c0c050c5 649 if (unlikely(bnxt_tx_avail(bp, txr) <= MAX_SKB_FRAGS + 1)) {
6b16f9ee 650 if (netdev_xmit_more() && !tx_buf->is_push)
e8d8c5d8 651 bnxt_txr_db_kick(bp, txr, prod);
c0c050c5 652
08a09678
JK
653 netif_txq_try_stop(txq, bnxt_tx_avail(bp, txr),
654 bp->tx_wake_thresh);
c0c050c5
MC
655 }
656 return NETDEV_TX_OK;
657
658tx_dma_error:
83bb623c
PC
659 if (BNXT_TX_PTP_IS_SET(lflags))
660 atomic_inc(&bp->ptp_cfg->tx_avail);
661
c0c050c5
MC
662 last_frag = i;
663
664 /* start back at beginning and unmap skb */
665 prod = txr->tx_prod;
666 tx_buf = &txr->tx_buf_ring[prod];
c0c050c5 667 dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
df70303d 668 skb_headlen(skb), DMA_TO_DEVICE);
c0c050c5
MC
669 prod = NEXT_TX(prod);
670
671 /* unmap remaining mapped pages */
672 for (i = 0; i < last_frag; i++) {
673 prod = NEXT_TX(prod);
674 tx_buf = &txr->tx_buf_ring[prod];
675 dma_unmap_page(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
676 skb_frag_size(&skb_shinfo(skb)->frags[i]),
df70303d 677 DMA_TO_DEVICE);
c0c050c5
MC
678 }
679
e8d8c5d8 680tx_free:
c0c050c5 681 dev_kfree_skb_any(skb);
e8d8c5d8
JK
682tx_kick_pending:
683 if (txr->kick_pending)
684 bnxt_txr_db_kick(bp, txr, txr->tx_prod);
685 txr->tx_buf_ring[txr->tx_prod].skb = NULL;
625788b5 686 dev_core_stats_tx_dropped_inc(dev);
c0c050c5
MC
687 return NETDEV_TX_OK;
688}
689
37b61cda 690static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
c0c050c5 691{
b6ab4b01 692 struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
a960dec9 693 struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, txr->txq_index);
c0c050c5
MC
694 u16 cons = txr->tx_cons;
695 struct pci_dev *pdev = bp->pdev;
37b61cda 696 int nr_pkts = bnapi->tx_pkts;
c0c050c5
MC
697 int i;
698 unsigned int tx_bytes = 0;
699
700 for (i = 0; i < nr_pkts; i++) {
701 struct bnxt_sw_tx_bd *tx_buf;
702 struct sk_buff *skb;
703 int j, last;
704
705 tx_buf = &txr->tx_buf_ring[cons];
706 cons = NEXT_TX(cons);
707 skb = tx_buf->skb;
708 tx_buf->skb = NULL;
709
2b56b3d9
JK
710 if (unlikely(!skb)) {
711 bnxt_sched_reset_txr(bp, txr, i);
712 return;
713 }
714
c31f26c8
JK
715 tx_bytes += skb->len;
716
c0c050c5
MC
717 if (tx_buf->is_push) {
718 tx_buf->is_push = 0;
719 goto next_tx_int;
720 }
721
722 dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
df70303d 723 skb_headlen(skb), DMA_TO_DEVICE);
c0c050c5
MC
724 last = tx_buf->nr_frags;
725
726 for (j = 0; j < last; j++) {
727 cons = NEXT_TX(cons);
728 tx_buf = &txr->tx_buf_ring[cons];
729 dma_unmap_page(
730 &pdev->dev,
731 dma_unmap_addr(tx_buf, mapping),
732 skb_frag_size(&skb_shinfo(skb)->frags[j]),
df70303d 733 DMA_TO_DEVICE);
c0c050c5 734 }
83bb623c
PC
735 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
736 if (bp->flags & BNXT_FLAG_CHIP_P5) {
c31f26c8 737 /* PTP worker takes ownership of the skb */
83bb623c 738 if (!bnxt_get_tx_ts_p5(bp, skb))
c31f26c8 739 skb = NULL;
83bb623c
PC
740 else
741 atomic_inc(&bp->ptp_cfg->tx_avail);
742 }
743 }
c0c050c5
MC
744
745next_tx_int:
746 cons = NEXT_TX(cons);
747
47b7acfb 748 dev_consume_skb_any(skb);
c0c050c5
MC
749 }
750
37b61cda 751 bnapi->tx_pkts = 0;
36647b20 752 WRITE_ONCE(txr->tx_cons, cons);
c0c050c5 753
301f227f
JK
754 __netif_txq_completed_wake(txq, nr_pkts, tx_bytes,
755 bnxt_tx_avail(bp, txr), bp->tx_wake_thresh,
649c3fed 756 READ_ONCE(txr->dev_state) == BNXT_DEV_STATE_CLOSING);
c0c050c5
MC
757}
758
c61fb99c 759static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
322b87ca 760 struct bnxt_rx_ring_info *rxr,
f6974b4c 761 unsigned int *offset,
c61fb99c
MC
762 gfp_t gfp)
763{
c61fb99c
MC
764 struct page *page;
765
f6974b4c
SK
766 if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) {
767 page = page_pool_dev_alloc_frag(rxr->page_pool, offset,
768 BNXT_RX_PAGE_SIZE);
769 } else {
770 page = page_pool_dev_alloc_pages(rxr->page_pool);
771 *offset = 0;
772 }
c61fb99c
MC
773 if (!page)
774 return NULL;
775
578fcfd2 776 *mapping = page_pool_get_dma_addr(page) + *offset;
c61fb99c
MC
777 return page;
778}
779
720908e5 780static inline u8 *__bnxt_alloc_rx_frag(struct bnxt *bp, dma_addr_t *mapping,
c0c050c5
MC
781 gfp_t gfp)
782{
783 u8 *data;
784 struct pci_dev *pdev = bp->pdev;
785
720908e5
JK
786 if (gfp == GFP_ATOMIC)
787 data = napi_alloc_frag(bp->rx_buf_size);
788 else
789 data = netdev_alloc_frag(bp->rx_buf_size);
c0c050c5
MC
790 if (!data)
791 return NULL;
792
c519fe9a
SN
793 *mapping = dma_map_single_attrs(&pdev->dev, data + bp->rx_dma_offset,
794 bp->rx_buf_use_size, bp->rx_dir,
795 DMA_ATTR_WEAK_ORDERING);
c0c050c5
MC
796
797 if (dma_mapping_error(&pdev->dev, *mapping)) {
720908e5 798 skb_free_frag(data);
c0c050c5
MC
799 data = NULL;
800 }
801 return data;
802}
803
38413406
MC
804int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
805 u16 prod, gfp_t gfp)
c0c050c5
MC
806{
807 struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
808 struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[prod];
c0c050c5
MC
809 dma_addr_t mapping;
810
c61fb99c 811 if (BNXT_RX_PAGE_MODE(bp)) {
f6974b4c 812 unsigned int offset;
322b87ca 813 struct page *page =
f6974b4c 814 __bnxt_alloc_rx_page(bp, &mapping, rxr, &offset, gfp);
c0c050c5 815
c61fb99c
MC
816 if (!page)
817 return -ENOMEM;
818
9a6aa350 819 mapping += bp->rx_dma_offset;
c61fb99c 820 rx_buf->data = page;
f6974b4c 821 rx_buf->data_ptr = page_address(page) + offset + bp->rx_offset;
c61fb99c 822 } else {
720908e5 823 u8 *data = __bnxt_alloc_rx_frag(bp, &mapping, gfp);
c61fb99c
MC
824
825 if (!data)
826 return -ENOMEM;
827
828 rx_buf->data = data;
829 rx_buf->data_ptr = data + bp->rx_offset;
830 }
11cd119d 831 rx_buf->mapping = mapping;
c0c050c5
MC
832
833 rxbd->rx_bd_haddr = cpu_to_le64(mapping);
c0c050c5
MC
834 return 0;
835}
836
c6d30e83 837void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons, void *data)
c0c050c5
MC
838{
839 u16 prod = rxr->rx_prod;
840 struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
841 struct rx_bd *cons_bd, *prod_bd;
842
843 prod_rx_buf = &rxr->rx_buf_ring[prod];
844 cons_rx_buf = &rxr->rx_buf_ring[cons];
845
846 prod_rx_buf->data = data;
6bb19474 847 prod_rx_buf->data_ptr = cons_rx_buf->data_ptr;
c0c050c5 848
11cd119d 849 prod_rx_buf->mapping = cons_rx_buf->mapping;
c0c050c5
MC
850
851 prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
852 cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
853
854 prod_bd->rx_bd_haddr = cons_bd->rx_bd_haddr;
855}
856
857static inline u16 bnxt_find_next_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx)
858{
859 u16 next, max = rxr->rx_agg_bmap_size;
860
861 next = find_next_zero_bit(rxr->rx_agg_bmap, max, idx);
862 if (next >= max)
863 next = find_first_zero_bit(rxr->rx_agg_bmap, max);
864 return next;
865}
866
867static inline int bnxt_alloc_rx_page(struct bnxt *bp,
868 struct bnxt_rx_ring_info *rxr,
869 u16 prod, gfp_t gfp)
870{
871 struct rx_bd *rxbd =
872 &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
873 struct bnxt_sw_rx_agg_bd *rx_agg_buf;
c0c050c5
MC
874 struct page *page;
875 dma_addr_t mapping;
876 u16 sw_prod = rxr->rx_sw_agg_prod;
89d0a06c 877 unsigned int offset = 0;
c0c050c5 878
86b05508 879 page = __bnxt_alloc_rx_page(bp, &mapping, rxr, &offset, gfp);
c0c050c5 880
86b05508
SK
881 if (!page)
882 return -ENOMEM;
c0c050c5
MC
883
884 if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
885 sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
886
887 __set_bit(sw_prod, rxr->rx_agg_bmap);
888 rx_agg_buf = &rxr->rx_agg_ring[sw_prod];
889 rxr->rx_sw_agg_prod = NEXT_RX_AGG(sw_prod);
890
891 rx_agg_buf->page = page;
89d0a06c 892 rx_agg_buf->offset = offset;
c0c050c5
MC
893 rx_agg_buf->mapping = mapping;
894 rxbd->rx_bd_haddr = cpu_to_le64(mapping);
895 rxbd->rx_bd_opaque = sw_prod;
896 return 0;
897}
898
4a228a3a
MC
899static struct rx_agg_cmp *bnxt_get_agg(struct bnxt *bp,
900 struct bnxt_cp_ring_info *cpr,
901 u16 cp_cons, u16 curr)
902{
903 struct rx_agg_cmp *agg;
904
905 cp_cons = RING_CMP(ADV_RAW_CMP(cp_cons, curr));
906 agg = (struct rx_agg_cmp *)
907 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
908 return agg;
909}
910
bfcd8d79
MC
911static struct rx_agg_cmp *bnxt_get_tpa_agg_p5(struct bnxt *bp,
912 struct bnxt_rx_ring_info *rxr,
913 u16 agg_id, u16 curr)
914{
915 struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[agg_id];
916
917 return &tpa_info->agg_arr[curr];
918}
919
4a228a3a
MC
920static void bnxt_reuse_rx_agg_bufs(struct bnxt_cp_ring_info *cpr, u16 idx,
921 u16 start, u32 agg_bufs, bool tpa)
c0c050c5 922{
e44758b7 923 struct bnxt_napi *bnapi = cpr->bnapi;
c0c050c5 924 struct bnxt *bp = bnapi->bp;
b6ab4b01 925 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
c0c050c5
MC
926 u16 prod = rxr->rx_agg_prod;
927 u16 sw_prod = rxr->rx_sw_agg_prod;
bfcd8d79 928 bool p5_tpa = false;
c0c050c5
MC
929 u32 i;
930
bfcd8d79
MC
931 if ((bp->flags & BNXT_FLAG_CHIP_P5) && tpa)
932 p5_tpa = true;
933
c0c050c5
MC
934 for (i = 0; i < agg_bufs; i++) {
935 u16 cons;
936 struct rx_agg_cmp *agg;
937 struct bnxt_sw_rx_agg_bd *cons_rx_buf, *prod_rx_buf;
938 struct rx_bd *prod_bd;
939 struct page *page;
940
bfcd8d79
MC
941 if (p5_tpa)
942 agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, start + i);
943 else
944 agg = bnxt_get_agg(bp, cpr, idx, start + i);
c0c050c5
MC
945 cons = agg->rx_agg_cmp_opaque;
946 __clear_bit(cons, rxr->rx_agg_bmap);
947
948 if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
949 sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
950
951 __set_bit(sw_prod, rxr->rx_agg_bmap);
952 prod_rx_buf = &rxr->rx_agg_ring[sw_prod];
953 cons_rx_buf = &rxr->rx_agg_ring[cons];
954
955 /* It is possible for sw_prod to be equal to cons, so
956 * set cons_rx_buf->page to NULL first.
957 */
958 page = cons_rx_buf->page;
959 cons_rx_buf->page = NULL;
960 prod_rx_buf->page = page;
89d0a06c 961 prod_rx_buf->offset = cons_rx_buf->offset;
c0c050c5
MC
962
963 prod_rx_buf->mapping = cons_rx_buf->mapping;
964
965 prod_bd = &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
966
967 prod_bd->rx_bd_haddr = cpu_to_le64(cons_rx_buf->mapping);
968 prod_bd->rx_bd_opaque = sw_prod;
969
970 prod = NEXT_RX_AGG(prod);
971 sw_prod = NEXT_RX_AGG(sw_prod);
c0c050c5
MC
972 }
973 rxr->rx_agg_prod = prod;
974 rxr->rx_sw_agg_prod = sw_prod;
975}
976
1dc4c557
AG
977static struct sk_buff *bnxt_rx_multi_page_skb(struct bnxt *bp,
978 struct bnxt_rx_ring_info *rxr,
979 u16 cons, void *data, u8 *data_ptr,
980 dma_addr_t dma_addr,
981 unsigned int offset_and_len)
982{
983 unsigned int len = offset_and_len & 0xffff;
984 struct page *page = data;
985 u16 prod = rxr->rx_prod;
986 struct sk_buff *skb;
987 int err;
988
989 err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
990 if (unlikely(err)) {
991 bnxt_reuse_rx_data(rxr, cons, data);
992 return NULL;
993 }
994 dma_addr -= bp->rx_dma_offset;
578fcfd2
SK
995 dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr, BNXT_RX_PAGE_SIZE,
996 bp->rx_dir);
e3b3a879 997 skb = napi_build_skb(data_ptr - bp->rx_offset, BNXT_RX_PAGE_SIZE);
1dc4c557 998 if (!skb) {
97f5e03a 999 page_pool_recycle_direct(rxr->page_pool, page);
1dc4c557
AG
1000 return NULL;
1001 }
1002 skb_mark_for_recycle(skb);
f6974b4c 1003 skb_reserve(skb, bp->rx_offset);
1dc4c557
AG
1004 __skb_put(skb, len);
1005
1006 return skb;
1007}
1008
c61fb99c
MC
1009static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp,
1010 struct bnxt_rx_ring_info *rxr,
1011 u16 cons, void *data, u8 *data_ptr,
1012 dma_addr_t dma_addr,
1013 unsigned int offset_and_len)
1014{
1015 unsigned int payload = offset_and_len >> 16;
1016 unsigned int len = offset_and_len & 0xffff;
d7840976 1017 skb_frag_t *frag;
c61fb99c
MC
1018 struct page *page = data;
1019 u16 prod = rxr->rx_prod;
1020 struct sk_buff *skb;
1021 int off, err;
1022
1023 err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
1024 if (unlikely(err)) {
1025 bnxt_reuse_rx_data(rxr, cons, data);
1026 return NULL;
1027 }
1028 dma_addr -= bp->rx_dma_offset;
578fcfd2
SK
1029 dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr, BNXT_RX_PAGE_SIZE,
1030 bp->rx_dir);
c61fb99c
MC
1031
1032 if (unlikely(!payload))
c43f1255 1033 payload = eth_get_headlen(bp->dev, data_ptr, len);
c61fb99c
MC
1034
1035 skb = napi_alloc_skb(&rxr->bnapi->napi, payload);
1036 if (!skb) {
97f5e03a 1037 page_pool_recycle_direct(rxr->page_pool, page);
c61fb99c
MC
1038 return NULL;
1039 }
1040
1dc4c557 1041 skb_mark_for_recycle(skb);
c61fb99c 1042 off = (void *)data_ptr - page_address(page);
f6974b4c 1043 skb_add_rx_frag(skb, 0, page, off, len, BNXT_RX_PAGE_SIZE);
c61fb99c
MC
1044 memcpy(skb->data - NET_IP_ALIGN, data_ptr - NET_IP_ALIGN,
1045 payload + NET_IP_ALIGN);
1046
1047 frag = &skb_shinfo(skb)->frags[0];
1048 skb_frag_size_sub(frag, payload);
b54c9d5b 1049 skb_frag_off_add(frag, payload);
c61fb99c
MC
1050 skb->data_len -= payload;
1051 skb->tail += payload;
1052
1053 return skb;
1054}
1055
c0c050c5
MC
1056static struct sk_buff *bnxt_rx_skb(struct bnxt *bp,
1057 struct bnxt_rx_ring_info *rxr, u16 cons,
6bb19474
MC
1058 void *data, u8 *data_ptr,
1059 dma_addr_t dma_addr,
1060 unsigned int offset_and_len)
c0c050c5 1061{
6bb19474 1062 u16 prod = rxr->rx_prod;
c0c050c5 1063 struct sk_buff *skb;
6bb19474 1064 int err;
c0c050c5
MC
1065
1066 err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
1067 if (unlikely(err)) {
1068 bnxt_reuse_rx_data(rxr, cons, data);
1069 return NULL;
1070 }
1071
e3b3a879 1072 skb = napi_build_skb(data, bp->rx_buf_size);
c519fe9a
SN
1073 dma_unmap_single_attrs(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
1074 bp->rx_dir, DMA_ATTR_WEAK_ORDERING);
c0c050c5 1075 if (!skb) {
720908e5 1076 skb_free_frag(data);
c0c050c5
MC
1077 return NULL;
1078 }
1079
b3dba77c 1080 skb_reserve(skb, bp->rx_offset);
6bb19474 1081 skb_put(skb, offset_and_len & 0xffff);
c0c050c5
MC
1082 return skb;
1083}
1084
23e4c046
AG
1085static u32 __bnxt_rx_agg_pages(struct bnxt *bp,
1086 struct bnxt_cp_ring_info *cpr,
1087 struct skb_shared_info *shinfo,
31b9998b
AG
1088 u16 idx, u32 agg_bufs, bool tpa,
1089 struct xdp_buff *xdp)
c0c050c5 1090{
e44758b7 1091 struct bnxt_napi *bnapi = cpr->bnapi;
c0c050c5 1092 struct pci_dev *pdev = bp->pdev;
b6ab4b01 1093 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
c0c050c5 1094 u16 prod = rxr->rx_agg_prod;
ca1df2dd 1095 u32 i, total_frag_len = 0;
bfcd8d79 1096 bool p5_tpa = false;
c0c050c5 1097
bfcd8d79
MC
1098 if ((bp->flags & BNXT_FLAG_CHIP_P5) && tpa)
1099 p5_tpa = true;
1100
c0c050c5 1101 for (i = 0; i < agg_bufs; i++) {
ca1df2dd 1102 skb_frag_t *frag = &shinfo->frags[i];
c0c050c5
MC
1103 u16 cons, frag_len;
1104 struct rx_agg_cmp *agg;
1105 struct bnxt_sw_rx_agg_bd *cons_rx_buf;
1106 struct page *page;
1107 dma_addr_t mapping;
1108
bfcd8d79
MC
1109 if (p5_tpa)
1110 agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, i);
1111 else
1112 agg = bnxt_get_agg(bp, cpr, idx, i);
c0c050c5
MC
1113 cons = agg->rx_agg_cmp_opaque;
1114 frag_len = (le32_to_cpu(agg->rx_agg_cmp_len_flags_type) &
1115 RX_AGG_CMP_LEN) >> RX_AGG_CMP_LEN_SHIFT;
1116
1117 cons_rx_buf = &rxr->rx_agg_ring[cons];
b51f4113
YL
1118 skb_frag_fill_page_desc(frag, cons_rx_buf->page,
1119 cons_rx_buf->offset, frag_len);
ca1df2dd 1120 shinfo->nr_frags = i + 1;
c0c050c5
MC
1121 __clear_bit(cons, rxr->rx_agg_bmap);
1122
1123 /* It is possible for bnxt_alloc_rx_page() to allocate
1124 * a sw_prod index that equals the cons index, so we
1125 * need to clear the cons entry now.
1126 */
11cd119d 1127 mapping = cons_rx_buf->mapping;
c0c050c5
MC
1128 page = cons_rx_buf->page;
1129 cons_rx_buf->page = NULL;
1130
31b9998b
AG
1131 if (xdp && page_is_pfmemalloc(page))
1132 xdp_buff_set_frag_pfmemalloc(xdp);
1133
c0c050c5 1134 if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_ATOMIC) != 0) {
278fda0d 1135 --shinfo->nr_frags;
c0c050c5
MC
1136 cons_rx_buf->page = page;
1137
1138 /* Update prod since possibly some pages have been
1139 * allocated already.
1140 */
1141 rxr->rx_agg_prod = prod;
4a228a3a 1142 bnxt_reuse_rx_agg_bufs(cpr, idx, i, agg_bufs - i, tpa);
ca1df2dd 1143 return 0;
c0c050c5
MC
1144 }
1145
578fcfd2
SK
1146 dma_sync_single_for_cpu(&pdev->dev, mapping, BNXT_RX_PAGE_SIZE,
1147 bp->rx_dir);
c0c050c5 1148
ca1df2dd 1149 total_frag_len += frag_len;
c0c050c5 1150 prod = NEXT_RX_AGG(prod);
c0c050c5
MC
1151 }
1152 rxr->rx_agg_prod = prod;
ca1df2dd
AG
1153 return total_frag_len;
1154}
1155
23e4c046
AG
1156static struct sk_buff *bnxt_rx_agg_pages_skb(struct bnxt *bp,
1157 struct bnxt_cp_ring_info *cpr,
1158 struct sk_buff *skb, u16 idx,
1159 u32 agg_bufs, bool tpa)
ca1df2dd
AG
1160{
1161 struct skb_shared_info *shinfo = skb_shinfo(skb);
1162 u32 total_frag_len = 0;
1163
31b9998b
AG
1164 total_frag_len = __bnxt_rx_agg_pages(bp, cpr, shinfo, idx,
1165 agg_bufs, tpa, NULL);
ca1df2dd 1166 if (!total_frag_len) {
86b05508 1167 skb_mark_for_recycle(skb);
ca1df2dd
AG
1168 dev_kfree_skb(skb);
1169 return NULL;
1170 }
1171
1172 skb->data_len += total_frag_len;
1173 skb->len += total_frag_len;
f6974b4c 1174 skb->truesize += BNXT_RX_PAGE_SIZE * agg_bufs;
c0c050c5
MC
1175 return skb;
1176}
1177
4c6c123c
AG
1178static u32 bnxt_rx_agg_pages_xdp(struct bnxt *bp,
1179 struct bnxt_cp_ring_info *cpr,
1180 struct xdp_buff *xdp, u16 idx,
1181 u32 agg_bufs, bool tpa)
1182{
1183 struct skb_shared_info *shinfo = xdp_get_shared_info_from_buff(xdp);
1184 u32 total_frag_len = 0;
1185
1186 if (!xdp_buff_has_frags(xdp))
1187 shinfo->nr_frags = 0;
1188
31b9998b
AG
1189 total_frag_len = __bnxt_rx_agg_pages(bp, cpr, shinfo,
1190 idx, agg_bufs, tpa, xdp);
4c6c123c
AG
1191 if (total_frag_len) {
1192 xdp_buff_set_frags_flag(xdp);
1193 shinfo->nr_frags = agg_bufs;
1194 shinfo->xdp_frags_size = total_frag_len;
1195 }
1196 return total_frag_len;
1197}
1198
c0c050c5
MC
1199static int bnxt_agg_bufs_valid(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1200 u8 agg_bufs, u32 *raw_cons)
1201{
1202 u16 last;
1203 struct rx_agg_cmp *agg;
1204
1205 *raw_cons = ADV_RAW_CMP(*raw_cons, agg_bufs);
1206 last = RING_CMP(*raw_cons);
1207 agg = (struct rx_agg_cmp *)
1208 &cpr->cp_desc_ring[CP_RING(last)][CP_IDX(last)];
1209 return RX_AGG_CMP_VALID(agg, *raw_cons);
1210}
1211
1212static inline struct sk_buff *bnxt_copy_skb(struct bnxt_napi *bnapi, u8 *data,
1213 unsigned int len,
1214 dma_addr_t mapping)
1215{
1216 struct bnxt *bp = bnapi->bp;
1217 struct pci_dev *pdev = bp->pdev;
1218 struct sk_buff *skb;
1219
1220 skb = napi_alloc_skb(&bnapi->napi, len);
1221 if (!skb)
1222 return NULL;
1223
745fc05c
MC
1224 dma_sync_single_for_cpu(&pdev->dev, mapping, bp->rx_copy_thresh,
1225 bp->rx_dir);
c0c050c5 1226
6bb19474
MC
1227 memcpy(skb->data - NET_IP_ALIGN, data - NET_IP_ALIGN,
1228 len + NET_IP_ALIGN);
c0c050c5 1229
745fc05c
MC
1230 dma_sync_single_for_device(&pdev->dev, mapping, bp->rx_copy_thresh,
1231 bp->rx_dir);
c0c050c5
MC
1232
1233 skb_put(skb, len);
1234 return skb;
1235}
1236
e44758b7 1237static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
fa7e2812
MC
1238 u32 *raw_cons, void *cmp)
1239{
fa7e2812
MC
1240 struct rx_cmp *rxcmp = cmp;
1241 u32 tmp_raw_cons = *raw_cons;
1242 u8 cmp_type, agg_bufs = 0;
1243
1244 cmp_type = RX_CMP_TYPE(rxcmp);
1245
1246 if (cmp_type == CMP_TYPE_RX_L2_CMP) {
1247 agg_bufs = (le32_to_cpu(rxcmp->rx_cmp_misc_v1) &
1248 RX_CMP_AGG_BUFS) >>
1249 RX_CMP_AGG_BUFS_SHIFT;
1250 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
1251 struct rx_tpa_end_cmp *tpa_end = cmp;
1252
bfcd8d79
MC
1253 if (bp->flags & BNXT_FLAG_CHIP_P5)
1254 return 0;
1255
4a228a3a 1256 agg_bufs = TPA_END_AGG_BUFS(tpa_end);
fa7e2812
MC
1257 }
1258
1259 if (agg_bufs) {
1260 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
1261 return -EBUSY;
1262 }
1263 *raw_cons = tmp_raw_cons;
1264 return 0;
1265}
1266
ec4d8e7c
MC
1267static u16 bnxt_alloc_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id)
1268{
1269 struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
1270 u16 idx = agg_id & MAX_TPA_P5_MASK;
1271
1272 if (test_bit(idx, map->agg_idx_bmap))
1273 idx = find_first_zero_bit(map->agg_idx_bmap,
1274 BNXT_AGG_IDX_BMAP_SIZE);
1275 __set_bit(idx, map->agg_idx_bmap);
1276 map->agg_id_tbl[agg_id] = idx;
1277 return idx;
1278}
1279
1280static void bnxt_free_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx)
1281{
1282 struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
1283
1284 __clear_bit(idx, map->agg_idx_bmap);
1285}
1286
1287static u16 bnxt_lookup_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id)
1288{
1289 struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
1290
1291 return map->agg_id_tbl[agg_id];
1292}
1293
c0c050c5
MC
1294static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
1295 struct rx_tpa_start_cmp *tpa_start,
1296 struct rx_tpa_start_cmp_ext *tpa_start1)
1297{
c0c050c5 1298 struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
bfcd8d79
MC
1299 struct bnxt_tpa_info *tpa_info;
1300 u16 cons, prod, agg_id;
c0c050c5
MC
1301 struct rx_bd *prod_bd;
1302 dma_addr_t mapping;
1303
ec4d8e7c 1304 if (bp->flags & BNXT_FLAG_CHIP_P5) {
bfcd8d79 1305 agg_id = TPA_START_AGG_ID_P5(tpa_start);
ec4d8e7c
MC
1306 agg_id = bnxt_alloc_agg_idx(rxr, agg_id);
1307 } else {
bfcd8d79 1308 agg_id = TPA_START_AGG_ID(tpa_start);
ec4d8e7c 1309 }
c0c050c5
MC
1310 cons = tpa_start->rx_tpa_start_cmp_opaque;
1311 prod = rxr->rx_prod;
1312 cons_rx_buf = &rxr->rx_buf_ring[cons];
1313 prod_rx_buf = &rxr->rx_buf_ring[prod];
1314 tpa_info = &rxr->rx_tpa[agg_id];
1315
bfcd8d79
MC
1316 if (unlikely(cons != rxr->rx_next_cons ||
1317 TPA_START_ERROR(tpa_start))) {
1318 netdev_warn(bp->dev, "TPA cons %x, expected cons %x, error code %x\n",
1319 cons, rxr->rx_next_cons,
1320 TPA_START_ERROR_CODE(tpa_start1));
fea2993a 1321 bnxt_sched_reset_rxr(bp, rxr);
fa7e2812
MC
1322 return;
1323 }
ee5c7fb3
SP
1324 /* Store cfa_code in tpa_info to use in tpa_end
1325 * completion processing.
1326 */
1327 tpa_info->cfa_code = TPA_START_CFA_CODE(tpa_start1);
c0c050c5 1328 prod_rx_buf->data = tpa_info->data;
6bb19474 1329 prod_rx_buf->data_ptr = tpa_info->data_ptr;
c0c050c5
MC
1330
1331 mapping = tpa_info->mapping;
11cd119d 1332 prod_rx_buf->mapping = mapping;
c0c050c5
MC
1333
1334 prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
1335
1336 prod_bd->rx_bd_haddr = cpu_to_le64(mapping);
1337
1338 tpa_info->data = cons_rx_buf->data;
6bb19474 1339 tpa_info->data_ptr = cons_rx_buf->data_ptr;
c0c050c5 1340 cons_rx_buf->data = NULL;
11cd119d 1341 tpa_info->mapping = cons_rx_buf->mapping;
c0c050c5
MC
1342
1343 tpa_info->len =
1344 le32_to_cpu(tpa_start->rx_tpa_start_cmp_len_flags_type) >>
1345 RX_TPA_START_CMP_LEN_SHIFT;
1346 if (likely(TPA_START_HASH_VALID(tpa_start))) {
1347 u32 hash_type = TPA_START_HASH_TYPE(tpa_start);
1348
1349 tpa_info->hash_type = PKT_HASH_TYPE_L4;
1350 tpa_info->gso_type = SKB_GSO_TCPV4;
1351 /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
50f011b6 1352 if (hash_type == 3 || TPA_START_IS_IPV6(tpa_start1))
c0c050c5
MC
1353 tpa_info->gso_type = SKB_GSO_TCPV6;
1354 tpa_info->rss_hash =
1355 le32_to_cpu(tpa_start->rx_tpa_start_cmp_rss_hash);
1356 } else {
1357 tpa_info->hash_type = PKT_HASH_TYPE_NONE;
1358 tpa_info->gso_type = 0;
871127e6 1359 netif_warn(bp, rx_err, bp->dev, "TPA packet without valid hash\n");
c0c050c5
MC
1360 }
1361 tpa_info->flags2 = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_flags2);
1362 tpa_info->metadata = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_metadata);
94758f8d 1363 tpa_info->hdr_info = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_hdr_info);
bfcd8d79 1364 tpa_info->agg_count = 0;
c0c050c5
MC
1365
1366 rxr->rx_prod = NEXT_RX(prod);
1367 cons = NEXT_RX(cons);
376a5b86 1368 rxr->rx_next_cons = NEXT_RX(cons);
c0c050c5
MC
1369 cons_rx_buf = &rxr->rx_buf_ring[cons];
1370
1371 bnxt_reuse_rx_data(rxr, cons, cons_rx_buf->data);
1372 rxr->rx_prod = NEXT_RX(rxr->rx_prod);
1373 cons_rx_buf->data = NULL;
1374}
1375
4a228a3a 1376static void bnxt_abort_tpa(struct bnxt_cp_ring_info *cpr, u16 idx, u32 agg_bufs)
c0c050c5
MC
1377{
1378 if (agg_bufs)
4a228a3a 1379 bnxt_reuse_rx_agg_bufs(cpr, idx, 0, agg_bufs, true);
c0c050c5
MC
1380}
1381
bee5a188
MC
1382#ifdef CONFIG_INET
1383static void bnxt_gro_tunnel(struct sk_buff *skb, __be16 ip_proto)
1384{
1385 struct udphdr *uh = NULL;
1386
1387 if (ip_proto == htons(ETH_P_IP)) {
1388 struct iphdr *iph = (struct iphdr *)skb->data;
1389
1390 if (iph->protocol == IPPROTO_UDP)
1391 uh = (struct udphdr *)(iph + 1);
1392 } else {
1393 struct ipv6hdr *iph = (struct ipv6hdr *)skb->data;
1394
1395 if (iph->nexthdr == IPPROTO_UDP)
1396 uh = (struct udphdr *)(iph + 1);
1397 }
1398 if (uh) {
1399 if (uh->check)
1400 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL_CSUM;
1401 else
1402 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
1403 }
1404}
1405#endif
1406
94758f8d
MC
1407static struct sk_buff *bnxt_gro_func_5731x(struct bnxt_tpa_info *tpa_info,
1408 int payload_off, int tcp_ts,
1409 struct sk_buff *skb)
1410{
1411#ifdef CONFIG_INET
1412 struct tcphdr *th;
1413 int len, nw_off;
1414 u16 outer_ip_off, inner_ip_off, inner_mac_off;
1415 u32 hdr_info = tpa_info->hdr_info;
1416 bool loopback = false;
1417
1418 inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info);
1419 inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info);
1420 outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info);
1421
1422 /* If the packet is an internal loopback packet, the offsets will
1423 * have an extra 4 bytes.
1424 */
1425 if (inner_mac_off == 4) {
1426 loopback = true;
1427 } else if (inner_mac_off > 4) {
1428 __be16 proto = *((__be16 *)(skb->data + inner_ip_off -
1429 ETH_HLEN - 2));
1430
1431 /* We only support inner iPv4/ipv6. If we don't see the
1432 * correct protocol ID, it must be a loopback packet where
1433 * the offsets are off by 4.
1434 */
09a7636a 1435 if (proto != htons(ETH_P_IP) && proto != htons(ETH_P_IPV6))
94758f8d
MC
1436 loopback = true;
1437 }
1438 if (loopback) {
1439 /* internal loopback packet, subtract all offsets by 4 */
1440 inner_ip_off -= 4;
1441 inner_mac_off -= 4;
1442 outer_ip_off -= 4;
1443 }
1444
1445 nw_off = inner_ip_off - ETH_HLEN;
1446 skb_set_network_header(skb, nw_off);
1447 if (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) {
1448 struct ipv6hdr *iph = ipv6_hdr(skb);
1449
1450 skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
1451 len = skb->len - skb_transport_offset(skb);
1452 th = tcp_hdr(skb);
1453 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
1454 } else {
1455 struct iphdr *iph = ip_hdr(skb);
1456
1457 skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
1458 len = skb->len - skb_transport_offset(skb);
1459 th = tcp_hdr(skb);
1460 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
1461 }
1462
1463 if (inner_mac_off) { /* tunnel */
94758f8d
MC
1464 __be16 proto = *((__be16 *)(skb->data + outer_ip_off -
1465 ETH_HLEN - 2));
1466
bee5a188 1467 bnxt_gro_tunnel(skb, proto);
94758f8d
MC
1468 }
1469#endif
1470 return skb;
1471}
1472
67912c36
MC
1473static struct sk_buff *bnxt_gro_func_5750x(struct bnxt_tpa_info *tpa_info,
1474 int payload_off, int tcp_ts,
1475 struct sk_buff *skb)
1476{
1477#ifdef CONFIG_INET
1478 u16 outer_ip_off, inner_ip_off, inner_mac_off;
1479 u32 hdr_info = tpa_info->hdr_info;
1480 int iphdr_len, nw_off;
1481
1482 inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info);
1483 inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info);
1484 outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info);
1485
1486 nw_off = inner_ip_off - ETH_HLEN;
1487 skb_set_network_header(skb, nw_off);
1488 iphdr_len = (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) ?
1489 sizeof(struct ipv6hdr) : sizeof(struct iphdr);
1490 skb_set_transport_header(skb, nw_off + iphdr_len);
1491
1492 if (inner_mac_off) { /* tunnel */
1493 __be16 proto = *((__be16 *)(skb->data + outer_ip_off -
1494 ETH_HLEN - 2));
1495
1496 bnxt_gro_tunnel(skb, proto);
1497 }
1498#endif
1499 return skb;
1500}
1501
c0c050c5
MC
1502#define BNXT_IPV4_HDR_SIZE (sizeof(struct iphdr) + sizeof(struct tcphdr))
1503#define BNXT_IPV6_HDR_SIZE (sizeof(struct ipv6hdr) + sizeof(struct tcphdr))
1504
309369c9
MC
1505static struct sk_buff *bnxt_gro_func_5730x(struct bnxt_tpa_info *tpa_info,
1506 int payload_off, int tcp_ts,
c0c050c5
MC
1507 struct sk_buff *skb)
1508{
d1611c3a 1509#ifdef CONFIG_INET
c0c050c5 1510 struct tcphdr *th;
719ca811 1511 int len, nw_off, tcp_opt_len = 0;
27e24189 1512
309369c9 1513 if (tcp_ts)
c0c050c5
MC
1514 tcp_opt_len = 12;
1515
c0c050c5
MC
1516 if (tpa_info->gso_type == SKB_GSO_TCPV4) {
1517 struct iphdr *iph;
1518
1519 nw_off = payload_off - BNXT_IPV4_HDR_SIZE - tcp_opt_len -
1520 ETH_HLEN;
1521 skb_set_network_header(skb, nw_off);
1522 iph = ip_hdr(skb);
1523 skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
1524 len = skb->len - skb_transport_offset(skb);
1525 th = tcp_hdr(skb);
1526 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
1527 } else if (tpa_info->gso_type == SKB_GSO_TCPV6) {
1528 struct ipv6hdr *iph;
1529
1530 nw_off = payload_off - BNXT_IPV6_HDR_SIZE - tcp_opt_len -
1531 ETH_HLEN;
1532 skb_set_network_header(skb, nw_off);
1533 iph = ipv6_hdr(skb);
1534 skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
1535 len = skb->len - skb_transport_offset(skb);
1536 th = tcp_hdr(skb);
1537 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
1538 } else {
1539 dev_kfree_skb_any(skb);
1540 return NULL;
1541 }
c0c050c5 1542
bee5a188
MC
1543 if (nw_off) /* tunnel */
1544 bnxt_gro_tunnel(skb, skb->protocol);
c0c050c5
MC
1545#endif
1546 return skb;
1547}
1548
309369c9
MC
1549static inline struct sk_buff *bnxt_gro_skb(struct bnxt *bp,
1550 struct bnxt_tpa_info *tpa_info,
1551 struct rx_tpa_end_cmp *tpa_end,
1552 struct rx_tpa_end_cmp_ext *tpa_end1,
1553 struct sk_buff *skb)
1554{
1555#ifdef CONFIG_INET
1556 int payload_off;
1557 u16 segs;
1558
1559 segs = TPA_END_TPA_SEGS(tpa_end);
1560 if (segs == 1)
1561 return skb;
1562
1563 NAPI_GRO_CB(skb)->count = segs;
1564 skb_shinfo(skb)->gso_size =
1565 le32_to_cpu(tpa_end1->rx_tpa_end_cmp_seg_len);
1566 skb_shinfo(skb)->gso_type = tpa_info->gso_type;
bfcd8d79
MC
1567 if (bp->flags & BNXT_FLAG_CHIP_P5)
1568 payload_off = TPA_END_PAYLOAD_OFF_P5(tpa_end1);
1569 else
1570 payload_off = TPA_END_PAYLOAD_OFF(tpa_end);
309369c9 1571 skb = bp->gro_func(tpa_info, payload_off, TPA_END_GRO_TS(tpa_end), skb);
5910906c
MC
1572 if (likely(skb))
1573 tcp_gro_complete(skb);
309369c9
MC
1574#endif
1575 return skb;
1576}
1577
ee5c7fb3
SP
1578/* Given the cfa_code of a received packet determine which
1579 * netdev (vf-rep or PF) the packet is destined to.
1580 */
1581static struct net_device *bnxt_get_pkt_dev(struct bnxt *bp, u16 cfa_code)
1582{
1583 struct net_device *dev = bnxt_get_vf_rep(bp, cfa_code);
1584
1585 /* if vf-rep dev is NULL, the must belongs to the PF */
1586 return dev ? dev : bp->dev;
1587}
1588
c0c050c5 1589static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
e44758b7 1590 struct bnxt_cp_ring_info *cpr,
c0c050c5
MC
1591 u32 *raw_cons,
1592 struct rx_tpa_end_cmp *tpa_end,
1593 struct rx_tpa_end_cmp_ext *tpa_end1,
4e5dbbda 1594 u8 *event)
c0c050c5 1595{
e44758b7 1596 struct bnxt_napi *bnapi = cpr->bnapi;
b6ab4b01 1597 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
6bb19474 1598 u8 *data_ptr, agg_bufs;
c0c050c5
MC
1599 unsigned int len;
1600 struct bnxt_tpa_info *tpa_info;
1601 dma_addr_t mapping;
1602 struct sk_buff *skb;
bfcd8d79 1603 u16 idx = 0, agg_id;
6bb19474 1604 void *data;
bfcd8d79 1605 bool gro;
c0c050c5 1606
fa7e2812 1607 if (unlikely(bnapi->in_reset)) {
e44758b7 1608 int rc = bnxt_discard_rx(bp, cpr, raw_cons, tpa_end);
fa7e2812
MC
1609
1610 if (rc < 0)
1611 return ERR_PTR(-EBUSY);
1612 return NULL;
1613 }
1614
bfcd8d79
MC
1615 if (bp->flags & BNXT_FLAG_CHIP_P5) {
1616 agg_id = TPA_END_AGG_ID_P5(tpa_end);
ec4d8e7c 1617 agg_id = bnxt_lookup_agg_idx(rxr, agg_id);
bfcd8d79
MC
1618 agg_bufs = TPA_END_AGG_BUFS_P5(tpa_end1);
1619 tpa_info = &rxr->rx_tpa[agg_id];
1620 if (unlikely(agg_bufs != tpa_info->agg_count)) {
1621 netdev_warn(bp->dev, "TPA end agg_buf %d != expected agg_bufs %d\n",
1622 agg_bufs, tpa_info->agg_count);
1623 agg_bufs = tpa_info->agg_count;
1624 }
1625 tpa_info->agg_count = 0;
1626 *event |= BNXT_AGG_EVENT;
ec4d8e7c 1627 bnxt_free_agg_idx(rxr, agg_id);
bfcd8d79
MC
1628 idx = agg_id;
1629 gro = !!(bp->flags & BNXT_FLAG_GRO);
1630 } else {
1631 agg_id = TPA_END_AGG_ID(tpa_end);
1632 agg_bufs = TPA_END_AGG_BUFS(tpa_end);
1633 tpa_info = &rxr->rx_tpa[agg_id];
1634 idx = RING_CMP(*raw_cons);
1635 if (agg_bufs) {
1636 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, raw_cons))
1637 return ERR_PTR(-EBUSY);
1638
1639 *event |= BNXT_AGG_EVENT;
1640 idx = NEXT_CMP(idx);
1641 }
1642 gro = !!TPA_END_GRO(tpa_end);
1643 }
c0c050c5 1644 data = tpa_info->data;
6bb19474
MC
1645 data_ptr = tpa_info->data_ptr;
1646 prefetch(data_ptr);
c0c050c5
MC
1647 len = tpa_info->len;
1648 mapping = tpa_info->mapping;
1649
69c149e2 1650 if (unlikely(agg_bufs > MAX_SKB_FRAGS || TPA_END_ERRORS(tpa_end1))) {
4a228a3a 1651 bnxt_abort_tpa(cpr, idx, agg_bufs);
69c149e2
MC
1652 if (agg_bufs > MAX_SKB_FRAGS)
1653 netdev_warn(bp->dev, "TPA frags %d exceeded MAX_SKB_FRAGS %d\n",
1654 agg_bufs, (int)MAX_SKB_FRAGS);
c0c050c5
MC
1655 return NULL;
1656 }
1657
1658 if (len <= bp->rx_copy_thresh) {
6bb19474 1659 skb = bnxt_copy_skb(bnapi, data_ptr, len, mapping);
c0c050c5 1660 if (!skb) {
4a228a3a 1661 bnxt_abort_tpa(cpr, idx, agg_bufs);
907fd4a2 1662 cpr->sw_stats.rx.rx_oom_discards += 1;
c0c050c5
MC
1663 return NULL;
1664 }
1665 } else {
1666 u8 *new_data;
1667 dma_addr_t new_mapping;
1668
720908e5 1669 new_data = __bnxt_alloc_rx_frag(bp, &new_mapping, GFP_ATOMIC);
c0c050c5 1670 if (!new_data) {
4a228a3a 1671 bnxt_abort_tpa(cpr, idx, agg_bufs);
907fd4a2 1672 cpr->sw_stats.rx.rx_oom_discards += 1;
c0c050c5
MC
1673 return NULL;
1674 }
1675
1676 tpa_info->data = new_data;
b3dba77c 1677 tpa_info->data_ptr = new_data + bp->rx_offset;
c0c050c5
MC
1678 tpa_info->mapping = new_mapping;
1679
e3b3a879 1680 skb = napi_build_skb(data, bp->rx_buf_size);
c519fe9a
SN
1681 dma_unmap_single_attrs(&bp->pdev->dev, mapping,
1682 bp->rx_buf_use_size, bp->rx_dir,
1683 DMA_ATTR_WEAK_ORDERING);
c0c050c5
MC
1684
1685 if (!skb) {
720908e5 1686 skb_free_frag(data);
4a228a3a 1687 bnxt_abort_tpa(cpr, idx, agg_bufs);
907fd4a2 1688 cpr->sw_stats.rx.rx_oom_discards += 1;
c0c050c5
MC
1689 return NULL;
1690 }
b3dba77c 1691 skb_reserve(skb, bp->rx_offset);
c0c050c5
MC
1692 skb_put(skb, len);
1693 }
1694
1695 if (agg_bufs) {
23e4c046 1696 skb = bnxt_rx_agg_pages_skb(bp, cpr, skb, idx, agg_bufs, true);
c0c050c5
MC
1697 if (!skb) {
1698 /* Page reuse already handled by bnxt_rx_pages(). */
907fd4a2 1699 cpr->sw_stats.rx.rx_oom_discards += 1;
c0c050c5
MC
1700 return NULL;
1701 }
1702 }
ee5c7fb3
SP
1703
1704 skb->protocol =
1705 eth_type_trans(skb, bnxt_get_pkt_dev(bp, tpa_info->cfa_code));
c0c050c5
MC
1706
1707 if (tpa_info->hash_type != PKT_HASH_TYPE_NONE)
1708 skb_set_hash(skb, tpa_info->rss_hash, tpa_info->hash_type);
1709
8852ddb4 1710 if ((tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) &&
a196e96b 1711 (skb->dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)) {
96bdd4b9
MC
1712 __be16 vlan_proto = htons(tpa_info->metadata >>
1713 RX_CMP_FLAGS2_METADATA_TPID_SFT);
ed7bc602 1714 u16 vtag = tpa_info->metadata & RX_CMP_FLAGS2_METADATA_TCI_MASK;
c0c050c5 1715
96bdd4b9
MC
1716 if (eth_type_vlan(vlan_proto)) {
1717 __vlan_hwaccel_put_tag(skb, vlan_proto, vtag);
1718 } else {
1719 dev_kfree_skb(skb);
1720 return NULL;
1721 }
c0c050c5
MC
1722 }
1723
1724 skb_checksum_none_assert(skb);
1725 if (likely(tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_L4_CS_CALC)) {
1726 skb->ip_summed = CHECKSUM_UNNECESSARY;
1727 skb->csum_level =
1728 (tpa_info->flags2 & RX_CMP_FLAGS2_T_L4_CS_CALC) >> 3;
1729 }
1730
bfcd8d79 1731 if (gro)
309369c9 1732 skb = bnxt_gro_skb(bp, tpa_info, tpa_end, tpa_end1, skb);
c0c050c5
MC
1733
1734 return skb;
1735}
1736
8fe88ce7
MC
1737static void bnxt_tpa_agg(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
1738 struct rx_agg_cmp *rx_agg)
1739{
1740 u16 agg_id = TPA_AGG_AGG_ID(rx_agg);
1741 struct bnxt_tpa_info *tpa_info;
1742
ec4d8e7c 1743 agg_id = bnxt_lookup_agg_idx(rxr, agg_id);
8fe88ce7
MC
1744 tpa_info = &rxr->rx_tpa[agg_id];
1745 BUG_ON(tpa_info->agg_count >= MAX_SKB_FRAGS);
1746 tpa_info->agg_arr[tpa_info->agg_count++] = *rx_agg;
1747}
1748
ee5c7fb3
SP
1749static void bnxt_deliver_skb(struct bnxt *bp, struct bnxt_napi *bnapi,
1750 struct sk_buff *skb)
1751{
1752 if (skb->dev != bp->dev) {
1753 /* this packet belongs to a vf-rep */
1754 bnxt_vf_rep_rx(bp, skb);
1755 return;
1756 }
1757 skb_record_rx_queue(skb, bnapi->index);
86b05508 1758 skb_mark_for_recycle(skb);
ee5c7fb3
SP
1759 napi_gro_receive(&bnapi->napi, skb);
1760}
1761
c0c050c5
MC
1762/* returns the following:
1763 * 1 - 1 packet successfully received
1764 * 0 - successful TPA_START, packet not completed yet
1765 * -EBUSY - completion ring does not have all the agg buffers yet
1766 * -ENOMEM - packet aborted due to out of memory
1767 * -EIO - packet aborted due to hw error indicated in BD
1768 */
e44758b7
MC
1769static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1770 u32 *raw_cons, u8 *event)
c0c050c5 1771{
e44758b7 1772 struct bnxt_napi *bnapi = cpr->bnapi;
b6ab4b01 1773 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
c0c050c5
MC
1774 struct net_device *dev = bp->dev;
1775 struct rx_cmp *rxcmp;
1776 struct rx_cmp_ext *rxcmp1;
1777 u32 tmp_raw_cons = *raw_cons;
ee5c7fb3 1778 u16 cfa_code, cons, prod, cp_cons = RING_CMP(tmp_raw_cons);
c0c050c5
MC
1779 struct bnxt_sw_rx_bd *rx_buf;
1780 unsigned int len;
6bb19474 1781 u8 *data_ptr, agg_bufs, cmp_type;
ee536dcb 1782 bool xdp_active = false;
c0c050c5
MC
1783 dma_addr_t dma_addr;
1784 struct sk_buff *skb;
b231c3f3 1785 struct xdp_buff xdp;
7f5515d1 1786 u32 flags, misc;
6bb19474 1787 void *data;
c0c050c5
MC
1788 int rc = 0;
1789
1790 rxcmp = (struct rx_cmp *)
1791 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1792
8fe88ce7
MC
1793 cmp_type = RX_CMP_TYPE(rxcmp);
1794
1795 if (cmp_type == CMP_TYPE_RX_TPA_AGG_CMP) {
1796 bnxt_tpa_agg(bp, rxr, (struct rx_agg_cmp *)rxcmp);
1797 goto next_rx_no_prod_no_len;
1798 }
1799
c0c050c5
MC
1800 tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
1801 cp_cons = RING_CMP(tmp_raw_cons);
1802 rxcmp1 = (struct rx_cmp_ext *)
1803 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1804
1805 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
1806 return -EBUSY;
1807
828affc2
MC
1808 /* The valid test of the entry must be done first before
1809 * reading any further.
1810 */
1811 dma_rmb();
c0c050c5
MC
1812 prod = rxr->rx_prod;
1813
1814 if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP) {
1815 bnxt_tpa_start(bp, rxr, (struct rx_tpa_start_cmp *)rxcmp,
1816 (struct rx_tpa_start_cmp_ext *)rxcmp1);
1817
4e5dbbda 1818 *event |= BNXT_RX_EVENT;
e7e70fa6 1819 goto next_rx_no_prod_no_len;
c0c050c5
MC
1820
1821 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
e44758b7 1822 skb = bnxt_tpa_end(bp, cpr, &tmp_raw_cons,
c0c050c5 1823 (struct rx_tpa_end_cmp *)rxcmp,
4e5dbbda 1824 (struct rx_tpa_end_cmp_ext *)rxcmp1, event);
c0c050c5 1825
1fac4b2f 1826 if (IS_ERR(skb))
c0c050c5
MC
1827 return -EBUSY;
1828
1829 rc = -ENOMEM;
1830 if (likely(skb)) {
ee5c7fb3 1831 bnxt_deliver_skb(bp, bnapi, skb);
c0c050c5
MC
1832 rc = 1;
1833 }
4e5dbbda 1834 *event |= BNXT_RX_EVENT;
e7e70fa6 1835 goto next_rx_no_prod_no_len;
c0c050c5
MC
1836 }
1837
1838 cons = rxcmp->rx_cmp_opaque;
fa7e2812 1839 if (unlikely(cons != rxr->rx_next_cons)) {
bbd6f0a9 1840 int rc1 = bnxt_discard_rx(bp, cpr, &tmp_raw_cons, rxcmp);
fa7e2812 1841
1b5c8b63
MC
1842 /* 0xffff is forced error, don't print it */
1843 if (rxr->rx_next_cons != 0xffff)
1844 netdev_warn(bp->dev, "RX cons %x != expected cons %x\n",
1845 cons, rxr->rx_next_cons);
fea2993a 1846 bnxt_sched_reset_rxr(bp, rxr);
bbd6f0a9
MC
1847 if (rc1)
1848 return rc1;
1849 goto next_rx_no_prod_no_len;
fa7e2812 1850 }
a1b0e4e6
MC
1851 rx_buf = &rxr->rx_buf_ring[cons];
1852 data = rx_buf->data;
1853 data_ptr = rx_buf->data_ptr;
6bb19474 1854 prefetch(data_ptr);
c0c050c5 1855
c61fb99c
MC
1856 misc = le32_to_cpu(rxcmp->rx_cmp_misc_v1);
1857 agg_bufs = (misc & RX_CMP_AGG_BUFS) >> RX_CMP_AGG_BUFS_SHIFT;
c0c050c5
MC
1858
1859 if (agg_bufs) {
1860 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
1861 return -EBUSY;
1862
1863 cp_cons = NEXT_CMP(cp_cons);
4e5dbbda 1864 *event |= BNXT_AGG_EVENT;
c0c050c5 1865 }
4e5dbbda 1866 *event |= BNXT_RX_EVENT;
c0c050c5
MC
1867
1868 rx_buf->data = NULL;
1869 if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L2_ERRORS) {
8e44e96c
MC
1870 u32 rx_err = le32_to_cpu(rxcmp1->rx_cmp_cfa_code_errors_v2);
1871
c0c050c5
MC
1872 bnxt_reuse_rx_data(rxr, cons, data);
1873 if (agg_bufs)
4a228a3a
MC
1874 bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0, agg_bufs,
1875 false);
c0c050c5
MC
1876
1877 rc = -EIO;
8e44e96c 1878 if (rx_err & RX_CMPL_ERRORS_BUFFER_ERROR_MASK) {
9d8b5f05 1879 bnapi->cp_ring.sw_stats.rx.rx_buf_errors++;
8d4bd96b
MC
1880 if (!(bp->flags & BNXT_FLAG_CHIP_P5) &&
1881 !(bp->fw_cap & BNXT_FW_CAP_RING_MONITOR)) {
8fbf58e1
MC
1882 netdev_warn_once(bp->dev, "RX buffer error %x\n",
1883 rx_err);
fea2993a 1884 bnxt_sched_reset_rxr(bp, rxr);
19b3751f 1885 }
8e44e96c 1886 }
0b397b17 1887 goto next_rx_no_len;
c0c050c5
MC
1888 }
1889
7f5515d1
PC
1890 flags = le32_to_cpu(rxcmp->rx_cmp_len_flags_type);
1891 len = flags >> RX_CMP_LEN_SHIFT;
11cd119d 1892 dma_addr = rx_buf->mapping;
c0c050c5 1893
b231c3f3 1894 if (bnxt_xdp_attached(bp, rxr)) {
bbfc17e5 1895 bnxt_xdp_buff_init(bp, rxr, cons, data_ptr, len, &xdp);
4c6c123c
AG
1896 if (agg_bufs) {
1897 u32 frag_len = bnxt_rx_agg_pages_xdp(bp, cpr, &xdp,
1898 cp_cons, agg_bufs,
1899 false);
1900 if (!frag_len) {
1901 cpr->sw_stats.rx.rx_oom_discards += 1;
1902 rc = -ENOMEM;
1903 goto next_rx;
1904 }
1905 }
ee536dcb
AG
1906 xdp_active = true;
1907 }
1908
9f4b2830 1909 if (xdp_active) {
9b3e6078 1910 if (bnxt_rx_xdp(bp, rxr, cons, xdp, data, &data_ptr, &len, event)) {
b231c3f3
AG
1911 rc = 1;
1912 goto next_rx;
1913 }
c6d30e83 1914 }
ee536dcb 1915
c0c050c5 1916 if (len <= bp->rx_copy_thresh) {
6bb19474 1917 skb = bnxt_copy_skb(bnapi, data_ptr, len, dma_addr);
c0c050c5
MC
1918 bnxt_reuse_rx_data(rxr, cons, data);
1919 if (!skb) {
a7559bc8
AG
1920 if (agg_bufs) {
1921 if (!xdp_active)
1922 bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0,
1923 agg_bufs, false);
1924 else
1925 bnxt_xdp_buff_frags_free(rxr, &xdp);
1926 }
907fd4a2 1927 cpr->sw_stats.rx.rx_oom_discards += 1;
c0c050c5
MC
1928 rc = -ENOMEM;
1929 goto next_rx;
1930 }
1931 } else {
c61fb99c
MC
1932 u32 payload;
1933
c6d30e83
MC
1934 if (rx_buf->data_ptr == data_ptr)
1935 payload = misc & RX_CMP_PAYLOAD_OFFSET;
1936 else
1937 payload = 0;
6bb19474 1938 skb = bp->rx_skb_func(bp, rxr, cons, data, data_ptr, dma_addr,
c61fb99c 1939 payload | len);
c0c050c5 1940 if (!skb) {
907fd4a2 1941 cpr->sw_stats.rx.rx_oom_discards += 1;
c0c050c5
MC
1942 rc = -ENOMEM;
1943 goto next_rx;
1944 }
1945 }
1946
1947 if (agg_bufs) {
32861236
AG
1948 if (!xdp_active) {
1949 skb = bnxt_rx_agg_pages_skb(bp, cpr, skb, cp_cons, agg_bufs, false);
1950 if (!skb) {
1951 cpr->sw_stats.rx.rx_oom_discards += 1;
1952 rc = -ENOMEM;
1953 goto next_rx;
1954 }
1dc4c557
AG
1955 } else {
1956 skb = bnxt_xdp_build_skb(bp, skb, agg_bufs, rxr->page_pool, &xdp, rxcmp1);
1957 if (!skb) {
1958 /* we should be able to free the old skb here */
a7559bc8 1959 bnxt_xdp_buff_frags_free(rxr, &xdp);
1dc4c557
AG
1960 cpr->sw_stats.rx.rx_oom_discards += 1;
1961 rc = -ENOMEM;
1962 goto next_rx;
1963 }
c0c050c5
MC
1964 }
1965 }
1966
1967 if (RX_CMP_HASH_VALID(rxcmp)) {
1968 u32 hash_type = RX_CMP_HASH_TYPE(rxcmp);
1969 enum pkt_hash_types type = PKT_HASH_TYPE_L4;
1970
1971 /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
1972 if (hash_type != 1 && hash_type != 3)
1973 type = PKT_HASH_TYPE_L3;
1974 skb_set_hash(skb, le32_to_cpu(rxcmp->rx_cmp_rss_hash), type);
1975 }
1976
ee5c7fb3
SP
1977 cfa_code = RX_CMP_CFA_CODE(rxcmp1);
1978 skb->protocol = eth_type_trans(skb, bnxt_get_pkt_dev(bp, cfa_code));
c0c050c5 1979
8852ddb4
MC
1980 if ((rxcmp1->rx_cmp_flags2 &
1981 cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)) &&
a196e96b 1982 (skb->dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)) {
c0c050c5 1983 u32 meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data);
ed7bc602 1984 u16 vtag = meta_data & RX_CMP_FLAGS2_METADATA_TCI_MASK;
96bdd4b9
MC
1985 __be16 vlan_proto = htons(meta_data >>
1986 RX_CMP_FLAGS2_METADATA_TPID_SFT);
c0c050c5 1987
96bdd4b9
MC
1988 if (eth_type_vlan(vlan_proto)) {
1989 __vlan_hwaccel_put_tag(skb, vlan_proto, vtag);
1990 } else {
1991 dev_kfree_skb(skb);
1992 goto next_rx;
1993 }
c0c050c5
MC
1994 }
1995
1996 skb_checksum_none_assert(skb);
1997 if (RX_CMP_L4_CS_OK(rxcmp1)) {
1998 if (dev->features & NETIF_F_RXCSUM) {
1999 skb->ip_summed = CHECKSUM_UNNECESSARY;
2000 skb->csum_level = RX_CMP_ENCAP(rxcmp1);
2001 }
2002 } else {
665e350d
SB
2003 if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L4_CS_ERR_BITS) {
2004 if (dev->features & NETIF_F_RXCSUM)
9d8b5f05 2005 bnapi->cp_ring.sw_stats.rx.rx_l4_csum_errors++;
665e350d 2006 }
c0c050c5
MC
2007 }
2008
7f5515d1 2009 if (unlikely((flags & RX_CMP_FLAGS_ITYPES_MASK) ==
66ed81dc 2010 RX_CMP_FLAGS_ITYPE_PTP_W_TS) || bp->ptp_all_rx_tstamp) {
7f5515d1
PC
2011 if (bp->flags & BNXT_FLAG_CHIP_P5) {
2012 u32 cmpl_ts = le32_to_cpu(rxcmp1->rx_cmp_timestamp);
2013 u64 ns, ts;
2014
2015 if (!bnxt_get_rx_ts_p5(bp, &ts, cmpl_ts)) {
2016 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
2017
2018 spin_lock_bh(&ptp->ptp_lock);
2019 ns = timecounter_cyc2time(&ptp->tc, ts);
2020 spin_unlock_bh(&ptp->ptp_lock);
2021 memset(skb_hwtstamps(skb), 0,
2022 sizeof(*skb_hwtstamps(skb)));
2023 skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(ns);
2024 }
2025 }
2026 }
ee5c7fb3 2027 bnxt_deliver_skb(bp, bnapi, skb);
c0c050c5
MC
2028 rc = 1;
2029
2030next_rx:
6a8788f2
AG
2031 cpr->rx_packets += 1;
2032 cpr->rx_bytes += len;
e7e70fa6 2033
0b397b17
MC
2034next_rx_no_len:
2035 rxr->rx_prod = NEXT_RX(prod);
2036 rxr->rx_next_cons = NEXT_RX(cons);
2037
e7e70fa6 2038next_rx_no_prod_no_len:
c0c050c5
MC
2039 *raw_cons = tmp_raw_cons;
2040
2041 return rc;
2042}
2043
2270bc5d
MC
2044/* In netpoll mode, if we are using a combined completion ring, we need to
2045 * discard the rx packets and recycle the buffers.
2046 */
e44758b7
MC
2047static int bnxt_force_rx_discard(struct bnxt *bp,
2048 struct bnxt_cp_ring_info *cpr,
2270bc5d
MC
2049 u32 *raw_cons, u8 *event)
2050{
2270bc5d
MC
2051 u32 tmp_raw_cons = *raw_cons;
2052 struct rx_cmp_ext *rxcmp1;
2053 struct rx_cmp *rxcmp;
2054 u16 cp_cons;
2055 u8 cmp_type;
40bedf7c 2056 int rc;
2270bc5d
MC
2057
2058 cp_cons = RING_CMP(tmp_raw_cons);
2059 rxcmp = (struct rx_cmp *)
2060 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2061
2062 tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
2063 cp_cons = RING_CMP(tmp_raw_cons);
2064 rxcmp1 = (struct rx_cmp_ext *)
2065 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2066
2067 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
2068 return -EBUSY;
2069
828affc2
MC
2070 /* The valid test of the entry must be done first before
2071 * reading any further.
2072 */
2073 dma_rmb();
2270bc5d
MC
2074 cmp_type = RX_CMP_TYPE(rxcmp);
2075 if (cmp_type == CMP_TYPE_RX_L2_CMP) {
2076 rxcmp1->rx_cmp_cfa_code_errors_v2 |=
2077 cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
2078 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
2079 struct rx_tpa_end_cmp_ext *tpa_end1;
2080
2081 tpa_end1 = (struct rx_tpa_end_cmp_ext *)rxcmp1;
2082 tpa_end1->rx_tpa_end_cmp_errors_v2 |=
2083 cpu_to_le32(RX_TPA_END_CMP_ERRORS);
2084 }
40bedf7c
JK
2085 rc = bnxt_rx_pkt(bp, cpr, raw_cons, event);
2086 if (rc && rc != -EBUSY)
2087 cpr->sw_stats.rx.rx_netpoll_discards += 1;
2088 return rc;
2270bc5d
MC
2089}
2090
7e914027
MC
2091u32 bnxt_fw_health_readl(struct bnxt *bp, int reg_idx)
2092{
2093 struct bnxt_fw_health *fw_health = bp->fw_health;
2094 u32 reg = fw_health->regs[reg_idx];
2095 u32 reg_type, reg_off, val = 0;
2096
2097 reg_type = BNXT_FW_HEALTH_REG_TYPE(reg);
2098 reg_off = BNXT_FW_HEALTH_REG_OFF(reg);
2099 switch (reg_type) {
2100 case BNXT_FW_HEALTH_REG_TYPE_CFG:
2101 pci_read_config_dword(bp->pdev, reg_off, &val);
2102 break;
2103 case BNXT_FW_HEALTH_REG_TYPE_GRC:
2104 reg_off = fw_health->mapped_regs[reg_idx];
df561f66 2105 fallthrough;
7e914027
MC
2106 case BNXT_FW_HEALTH_REG_TYPE_BAR0:
2107 val = readl(bp->bar0 + reg_off);
2108 break;
2109 case BNXT_FW_HEALTH_REG_TYPE_BAR1:
2110 val = readl(bp->bar1 + reg_off);
2111 break;
2112 }
2113 if (reg_idx == BNXT_FW_RESET_INPROG_REG)
2114 val &= fw_health->fw_reset_inprog_reg_mask;
2115 return val;
2116}
2117
8d4bd96b
MC
2118static u16 bnxt_agg_ring_id_to_grp_idx(struct bnxt *bp, u16 ring_id)
2119{
2120 int i;
2121
2122 for (i = 0; i < bp->rx_nr_rings; i++) {
2123 u16 grp_idx = bp->rx_ring[i].bnapi->index;
2124 struct bnxt_ring_grp_info *grp_info;
2125
2126 grp_info = &bp->grp_info[grp_idx];
2127 if (grp_info->agg_fw_ring_id == ring_id)
2128 return grp_idx;
2129 }
2130 return INVALID_HW_RING_ID;
2131}
2132
abf90ac2
PC
2133static void bnxt_event_error_report(struct bnxt *bp, u32 data1, u32 data2)
2134{
0fb8582a
MC
2135 u32 err_type = BNXT_EVENT_ERROR_REPORT_TYPE(data1);
2136
2137 switch (err_type) {
abf90ac2
PC
2138 case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_INVALID_SIGNAL:
2139 netdev_err(bp->dev, "1PPS: Received invalid signal on pin%lu from the external source. Please fix the signal and reconfigure the pin\n",
2140 BNXT_EVENT_INVALID_SIGNAL_DATA(data2));
2141 break;
5a717f4a
SK
2142 case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_PAUSE_STORM:
2143 netdev_warn(bp->dev, "Pause Storm detected!\n");
2144 break;
0fb8582a
MC
2145 case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DOORBELL_DROP_THRESHOLD:
2146 netdev_warn(bp->dev, "One or more MMIO doorbells dropped by the device!\n");
2147 break;
abf90ac2 2148 default:
0fb8582a
MC
2149 netdev_err(bp->dev, "FW reported unknown error type %u\n",
2150 err_type);
abf90ac2
PC
2151 break;
2152 }
2153}
2154
4bb13abf 2155#define BNXT_GET_EVENT_PORT(data) \
87c374de
MC
2156 ((data) & \
2157 ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK)
4bb13abf 2158
8d4bd96b
MC
2159#define BNXT_EVENT_RING_TYPE(data2) \
2160 ((data2) & \
2161 ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_MASK)
2162
2163#define BNXT_EVENT_RING_TYPE_RX(data2) \
2164 (BNXT_EVENT_RING_TYPE(data2) == \
2165 ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_RX)
2166
8bcf6f04
PC
2167#define BNXT_EVENT_PHC_EVENT_TYPE(data1) \
2168 (((data1) & ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_MASK) >>\
2169 ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_SFT)
2170
2171#define BNXT_EVENT_PHC_RTC_UPDATE(data1) \
2172 (((data1) & ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_PHC_TIME_MSB_MASK) >>\
2173 ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_PHC_TIME_MSB_SFT)
2174
2175#define BNXT_PHC_BITS 48
2176
c0c050c5
MC
2177static int bnxt_async_event_process(struct bnxt *bp,
2178 struct hwrm_async_event_cmpl *cmpl)
2179{
2180 u16 event_id = le16_to_cpu(cmpl->event_id);
03ab8ca1
MC
2181 u32 data1 = le32_to_cpu(cmpl->event_data1);
2182 u32 data2 = le32_to_cpu(cmpl->event_data2);
c0c050c5 2183
8fa4219d
EP
2184 netdev_dbg(bp->dev, "hwrm event 0x%x {0x%x, 0x%x}\n",
2185 event_id, data1, data2);
2186
c0c050c5
MC
2187 /* TODO CHIMP_FW: Define event id's for link change, error etc */
2188 switch (event_id) {
87c374de 2189 case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE: {
8cbde117
MC
2190 struct bnxt_link_info *link_info = &bp->link_info;
2191
2192 if (BNXT_VF(bp))
2193 goto async_event_process_exit;
a8168b6c
MC
2194
2195 /* print unsupported speed warning in forced speed mode only */
2196 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED) &&
2197 (data1 & 0x20000)) {
8cbde117
MC
2198 u16 fw_speed = link_info->force_link_speed;
2199 u32 speed = bnxt_fw_to_ethtool_speed(fw_speed);
2200
a8168b6c
MC
2201 if (speed != SPEED_UNKNOWN)
2202 netdev_warn(bp->dev, "Link speed %d no longer supported\n",
2203 speed);
8cbde117 2204 }
286ef9d6 2205 set_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, &bp->sp_event);
8cbde117 2206 }
df561f66 2207 fallthrough;
b1613e78
MC
2208 case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE:
2209 case ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE:
2210 set_bit(BNXT_LINK_CFG_CHANGE_SP_EVENT, &bp->sp_event);
df561f66 2211 fallthrough;
87c374de 2212 case ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE:
c0c050c5 2213 set_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event);
19241368 2214 break;
87c374de 2215 case ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD:
19241368 2216 set_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event);
c0c050c5 2217 break;
87c374de 2218 case ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED: {
4bb13abf
MC
2219 u16 port_id = BNXT_GET_EVENT_PORT(data1);
2220
2221 if (BNXT_VF(bp))
2222 break;
2223
2224 if (bp->pf.port_id != port_id)
2225 break;
2226
4bb13abf
MC
2227 set_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event);
2228 break;
2229 }
87c374de 2230 case ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE:
fc0f1929
MC
2231 if (BNXT_PF(bp))
2232 goto async_event_process_exit;
2233 set_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event);
2234 break;
5863b10a 2235 case ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY: {
aadb0b1a 2236 char *type_str = "Solicited";
5863b10a 2237
8280b38e
VV
2238 if (!bp->fw_health)
2239 goto async_event_process_exit;
2240
2151fe08
MC
2241 bp->fw_reset_timestamp = jiffies;
2242 bp->fw_reset_min_dsecs = cmpl->timestamp_lo;
2243 if (!bp->fw_reset_min_dsecs)
2244 bp->fw_reset_min_dsecs = BNXT_DFLT_FW_RST_MIN_DSECS;
2245 bp->fw_reset_max_dsecs = le16_to_cpu(cmpl->timestamp_hi);
2246 if (!bp->fw_reset_max_dsecs)
2247 bp->fw_reset_max_dsecs = BNXT_DFLT_FW_RST_MAX_DSECS;
8f6c5e4d
EP
2248 if (EVENT_DATA1_RESET_NOTIFY_FW_ACTIVATION(data1)) {
2249 set_bit(BNXT_STATE_FW_ACTIVATE_RESET, &bp->state);
2250 } else if (EVENT_DATA1_RESET_NOTIFY_FATAL(data1)) {
aadb0b1a 2251 type_str = "Fatal";
8cc95ceb 2252 bp->fw_health->fatalities++;
acfb50e4 2253 set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
aadb0b1a
EP
2254 } else if (data2 && BNXT_FW_STATUS_HEALTHY !=
2255 EVENT_DATA2_RESET_NOTIFY_FW_STATUS_CODE(data2)) {
2256 type_str = "Non-fatal";
8cc95ceb 2257 bp->fw_health->survivals++;
aadb0b1a 2258 set_bit(BNXT_STATE_FW_NON_FATAL_COND, &bp->state);
5863b10a 2259 }
871127e6 2260 netif_warn(bp, hw, bp->dev,
aadb0b1a
EP
2261 "%s firmware reset event, data1: 0x%x, data2: 0x%x, min wait %u ms, max wait %u ms\n",
2262 type_str, data1, data2,
871127e6
MC
2263 bp->fw_reset_min_dsecs * 100,
2264 bp->fw_reset_max_dsecs * 100);
2151fe08
MC
2265 set_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event);
2266 break;
5863b10a 2267 }
7e914027
MC
2268 case ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY: {
2269 struct bnxt_fw_health *fw_health = bp->fw_health;
1596847d
EP
2270 char *status_desc = "healthy";
2271 u32 status;
7e914027
MC
2272
2273 if (!fw_health)
2274 goto async_event_process_exit;
2275
1b2b9183
MC
2276 if (!EVENT_DATA1_RECOVERY_ENABLED(data1)) {
2277 fw_health->enabled = false;
1596847d 2278 netif_info(bp, drv, bp->dev, "Driver recovery watchdog is disabled\n");
7e914027 2279 break;
f4d95c3c 2280 }
1596847d 2281 fw_health->primary = EVENT_DATA1_RECOVERY_MASTER_FUNC(data1);
7e914027
MC
2282 fw_health->tmr_multiplier =
2283 DIV_ROUND_UP(fw_health->polling_dsecs * HZ,
2284 bp->current_interval * 10);
2285 fw_health->tmr_counter = fw_health->tmr_multiplier;
eca4cf12 2286 if (!fw_health->enabled)
1b2b9183
MC
2287 fw_health->last_fw_heartbeat =
2288 bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
eca4cf12
MC
2289 fw_health->last_fw_reset_cnt =
2290 bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
1596847d
EP
2291 status = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
2292 if (status != BNXT_FW_STATUS_HEALTHY)
2293 status_desc = "unhealthy";
f4d95c3c 2294 netif_info(bp, drv, bp->dev,
1596847d
EP
2295 "Driver recovery watchdog, role: %s, firmware status: 0x%x (%s), resets: %u\n",
2296 fw_health->primary ? "primary" : "backup", status,
2297 status_desc, fw_health->last_fw_reset_cnt);
1b2b9183
MC
2298 if (!fw_health->enabled) {
2299 /* Make sure tmr_counter is set and visible to
2300 * bnxt_health_check() before setting enabled to true.
2301 */
2302 smp_wmb();
2303 fw_health->enabled = true;
2304 }
7e914027
MC
2305 goto async_event_process_exit;
2306 }
a44daa8f 2307 case ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION:
871127e6
MC
2308 netif_notice(bp, hw, bp->dev,
2309 "Received firmware debug notification, data1: 0x%x, data2: 0x%x\n",
2310 data1, data2);
a44daa8f 2311 goto async_event_process_exit;
8d4bd96b 2312 case ASYNC_EVENT_CMPL_EVENT_ID_RING_MONITOR_MSG: {
8d4bd96b
MC
2313 struct bnxt_rx_ring_info *rxr;
2314 u16 grp_idx;
2315
2316 if (bp->flags & BNXT_FLAG_CHIP_P5)
2317 goto async_event_process_exit;
2318
2319 netdev_warn(bp->dev, "Ring monitor event, ring type %lu id 0x%x\n",
2320 BNXT_EVENT_RING_TYPE(data2), data1);
2321 if (!BNXT_EVENT_RING_TYPE_RX(data2))
2322 goto async_event_process_exit;
2323
2324 grp_idx = bnxt_agg_ring_id_to_grp_idx(bp, data1);
2325 if (grp_idx == INVALID_HW_RING_ID) {
2326 netdev_warn(bp->dev, "Unknown RX agg ring id 0x%x\n",
2327 data1);
2328 goto async_event_process_exit;
2329 }
2330 rxr = bp->bnapi[grp_idx]->rx_ring;
fea2993a 2331 bnxt_sched_reset_rxr(bp, rxr);
8d4bd96b
MC
2332 goto async_event_process_exit;
2333 }
df97b34d
MC
2334 case ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST: {
2335 struct bnxt_fw_health *fw_health = bp->fw_health;
2336
2337 netif_notice(bp, hw, bp->dev,
2338 "Received firmware echo request, data1: 0x%x, data2: 0x%x\n",
2339 data1, data2);
2340 if (fw_health) {
2341 fw_health->echo_req_data1 = data1;
2342 fw_health->echo_req_data2 = data2;
2343 set_bit(BNXT_FW_ECHO_REQUEST_SP_EVENT, &bp->sp_event);
2344 break;
2345 }
2346 goto async_event_process_exit;
2347 }
099fdeda
PC
2348 case ASYNC_EVENT_CMPL_EVENT_ID_PPS_TIMESTAMP: {
2349 bnxt_ptp_pps_event(bp, data1, data2);
abf90ac2
PC
2350 goto async_event_process_exit;
2351 }
2352 case ASYNC_EVENT_CMPL_EVENT_ID_ERROR_REPORT: {
2353 bnxt_event_error_report(bp, data1, data2);
099fdeda
PC
2354 goto async_event_process_exit;
2355 }
8bcf6f04
PC
2356 case ASYNC_EVENT_CMPL_EVENT_ID_PHC_UPDATE: {
2357 switch (BNXT_EVENT_PHC_EVENT_TYPE(data1)) {
2358 case ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_PHC_RTC_UPDATE:
8c154d27 2359 if (BNXT_PTP_USE_RTC(bp)) {
8bcf6f04
PC
2360 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
2361 u64 ns;
2362
319a7827
PC
2363 if (!ptp)
2364 goto async_event_process_exit;
2365
8bcf6f04
PC
2366 spin_lock_bh(&ptp->ptp_lock);
2367 bnxt_ptp_update_current_time(bp);
2368 ns = (((u64)BNXT_EVENT_PHC_RTC_UPDATE(data1) <<
2369 BNXT_PHC_BITS) | ptp->current_time);
2370 bnxt_ptp_rtc_timecounter_init(ptp, ns);
2371 spin_unlock_bh(&ptp->ptp_lock);
2372 }
2373 break;
2374 }
2375 goto async_event_process_exit;
2376 }
68f684e2
EP
2377 case ASYNC_EVENT_CMPL_EVENT_ID_DEFERRED_RESPONSE: {
2378 u16 seq_id = le32_to_cpu(cmpl->event_data2) & 0xffff;
2379
2380 hwrm_update_token(bp, seq_id, BNXT_HWRM_DEFERRED);
2381 goto async_event_process_exit;
2382 }
c0c050c5 2383 default:
19241368 2384 goto async_event_process_exit;
c0c050c5 2385 }
9b1a00fd 2386 __bnxt_queue_sp_work(bp);
19241368 2387async_event_process_exit:
c0c050c5
MC
2388 return 0;
2389}
2390
2391static int bnxt_hwrm_handler(struct bnxt *bp, struct tx_cmp *txcmp)
2392{
2393 u16 cmpl_type = TX_CMP_TYPE(txcmp), vf_id, seq_id;
2394 struct hwrm_cmpl *h_cmpl = (struct hwrm_cmpl *)txcmp;
2395 struct hwrm_fwd_req_cmpl *fwd_req_cmpl =
2396 (struct hwrm_fwd_req_cmpl *)txcmp;
2397
2398 switch (cmpl_type) {
2399 case CMPL_BASE_TYPE_HWRM_DONE:
2400 seq_id = le16_to_cpu(h_cmpl->sequence_id);
68f684e2 2401 hwrm_update_token(bp, seq_id, BNXT_HWRM_COMPLETE);
c0c050c5
MC
2402 break;
2403
2404 case CMPL_BASE_TYPE_HWRM_FWD_REQ:
2405 vf_id = le16_to_cpu(fwd_req_cmpl->source_id);
2406
2407 if ((vf_id < bp->pf.first_vf_id) ||
2408 (vf_id >= bp->pf.first_vf_id + bp->pf.active_vfs)) {
2409 netdev_err(bp->dev, "Msg contains invalid VF id %x\n",
2410 vf_id);
2411 return -EINVAL;
2412 }
2413
2414 set_bit(vf_id - bp->pf.first_vf_id, bp->pf.vf_event_bmap);
9b1a00fd 2415 bnxt_queue_sp_work(bp, BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT);
c0c050c5
MC
2416 break;
2417
2418 case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT:
2419 bnxt_async_event_process(bp,
2420 (struct hwrm_async_event_cmpl *)txcmp);
cc9fd180 2421 break;
c0c050c5
MC
2422
2423 default:
2424 break;
2425 }
2426
2427 return 0;
2428}
2429
2430static irqreturn_t bnxt_msix(int irq, void *dev_instance)
2431{
2432 struct bnxt_napi *bnapi = dev_instance;
2433 struct bnxt *bp = bnapi->bp;
2434 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2435 u32 cons = RING_CMP(cpr->cp_raw_cons);
2436
6a8788f2 2437 cpr->event_ctr++;
c0c050c5
MC
2438 prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
2439 napi_schedule(&bnapi->napi);
2440 return IRQ_HANDLED;
2441}
2442
2443static inline int bnxt_has_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
2444{
2445 u32 raw_cons = cpr->cp_raw_cons;
2446 u16 cons = RING_CMP(raw_cons);
2447 struct tx_cmp *txcmp;
2448
2449 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
2450
2451 return TX_CMP_VALID(txcmp, raw_cons);
2452}
2453
c0c050c5
MC
2454static irqreturn_t bnxt_inta(int irq, void *dev_instance)
2455{
2456 struct bnxt_napi *bnapi = dev_instance;
2457 struct bnxt *bp = bnapi->bp;
2458 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2459 u32 cons = RING_CMP(cpr->cp_raw_cons);
2460 u32 int_status;
2461
2462 prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
2463
2464 if (!bnxt_has_work(bp, cpr)) {
11809490 2465 int_status = readl(bp->bar0 + BNXT_CAG_REG_LEGACY_INT_STATUS);
c0c050c5
MC
2466 /* return if erroneous interrupt */
2467 if (!(int_status & (0x10000 << cpr->cp_ring_struct.fw_ring_id)))
2468 return IRQ_NONE;
2469 }
2470
2471 /* disable ring IRQ */
697197e5 2472 BNXT_CP_DB_IRQ_DIS(cpr->cp_db.doorbell);
c0c050c5
MC
2473
2474 /* Return here if interrupt is shared and is disabled. */
2475 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2476 return IRQ_HANDLED;
2477
2478 napi_schedule(&bnapi->napi);
2479 return IRQ_HANDLED;
2480}
2481
3675b92f
MC
2482static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
2483 int budget)
c0c050c5 2484{
e44758b7 2485 struct bnxt_napi *bnapi = cpr->bnapi;
c0c050c5
MC
2486 u32 raw_cons = cpr->cp_raw_cons;
2487 u32 cons;
2488 int tx_pkts = 0;
2489 int rx_pkts = 0;
4e5dbbda 2490 u8 event = 0;
c0c050c5
MC
2491 struct tx_cmp *txcmp;
2492
0fcec985 2493 cpr->has_more_work = 0;
340ac85e 2494 cpr->had_work_done = 1;
c0c050c5
MC
2495 while (1) {
2496 int rc;
2497
2498 cons = RING_CMP(raw_cons);
2499 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
2500
2501 if (!TX_CMP_VALID(txcmp, raw_cons))
2502 break;
2503
67a95e20
MC
2504 /* The valid test of the entry must be done first before
2505 * reading any further.
2506 */
b67daab0 2507 dma_rmb();
c0c050c5
MC
2508 if (TX_CMP_TYPE(txcmp) == CMP_TYPE_TX_L2_CMP) {
2509 tx_pkts++;
2510 /* return full budget so NAPI will complete. */
5bed8b07 2511 if (unlikely(tx_pkts >= bp->tx_wake_thresh)) {
c0c050c5 2512 rx_pkts = budget;
73f21c65 2513 raw_cons = NEXT_RAW_CMP(raw_cons);
0fcec985
MC
2514 if (budget)
2515 cpr->has_more_work = 1;
73f21c65
MC
2516 break;
2517 }
c0c050c5 2518 } else if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
2270bc5d 2519 if (likely(budget))
e44758b7 2520 rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event);
2270bc5d 2521 else
e44758b7 2522 rc = bnxt_force_rx_discard(bp, cpr, &raw_cons,
2270bc5d 2523 &event);
c0c050c5
MC
2524 if (likely(rc >= 0))
2525 rx_pkts += rc;
903649e7
MC
2526 /* Increment rx_pkts when rc is -ENOMEM to count towards
2527 * the NAPI budget. Otherwise, we may potentially loop
2528 * here forever if we consistently cannot allocate
2529 * buffers.
2530 */
2edbdb31 2531 else if (rc == -ENOMEM && budget)
903649e7 2532 rx_pkts++;
c0c050c5
MC
2533 else if (rc == -EBUSY) /* partial completion */
2534 break;
c0c050c5
MC
2535 } else if (unlikely((TX_CMP_TYPE(txcmp) ==
2536 CMPL_BASE_TYPE_HWRM_DONE) ||
2537 (TX_CMP_TYPE(txcmp) ==
2538 CMPL_BASE_TYPE_HWRM_FWD_REQ) ||
2539 (TX_CMP_TYPE(txcmp) ==
2540 CMPL_BASE_TYPE_HWRM_ASYNC_EVENT))) {
2541 bnxt_hwrm_handler(bp, txcmp);
2542 }
2543 raw_cons = NEXT_RAW_CMP(raw_cons);
2544
0fcec985
MC
2545 if (rx_pkts && rx_pkts == budget) {
2546 cpr->has_more_work = 1;
c0c050c5 2547 break;
0fcec985 2548 }
c0c050c5
MC
2549 }
2550
f18c2b77 2551 if (event & BNXT_REDIRECT_EVENT)
b976969b 2552 xdp_do_flush();
f18c2b77 2553
38413406
MC
2554 if (event & BNXT_TX_EVENT) {
2555 struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
38413406
MC
2556 u16 prod = txr->tx_prod;
2557
2558 /* Sync BD data before updating doorbell */
2559 wmb();
2560
697197e5 2561 bnxt_db_write_relaxed(bp, &txr->tx_db, prod);
38413406
MC
2562 }
2563
c0c050c5 2564 cpr->cp_raw_cons = raw_cons;
3675b92f
MC
2565 bnapi->tx_pkts += tx_pkts;
2566 bnapi->events |= event;
2567 return rx_pkts;
2568}
c0c050c5 2569
37b61cda
JK
2570static void __bnxt_poll_work_done(struct bnxt *bp, struct bnxt_napi *bnapi,
2571 int budget)
3675b92f 2572{
35b1b1fd 2573 if (bnapi->tx_pkts && !bnapi->tx_fault)
37b61cda 2574 bnapi->tx_int(bp, bnapi, budget);
c0c050c5 2575
8fbf58e1 2576 if ((bnapi->events & BNXT_RX_EVENT) && !(bnapi->in_reset)) {
b6ab4b01 2577 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
c0c050c5 2578
e8f267b0 2579 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
c0c050c5 2580 }
a7559bc8
AG
2581 if (bnapi->events & BNXT_AGG_EVENT) {
2582 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
2583
2584 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
2585 }
3675b92f
MC
2586 bnapi->events = 0;
2587}
2588
2589static int bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
2590 int budget)
2591{
2592 struct bnxt_napi *bnapi = cpr->bnapi;
2593 int rx_pkts;
2594
2595 rx_pkts = __bnxt_poll_work(bp, cpr, budget);
2596
2597 /* ACK completion ring before freeing tx ring and producing new
2598 * buffers in rx/agg rings to prevent overflowing the completion
2599 * ring.
2600 */
2601 bnxt_db_cq(bp, &cpr->cp_db, cpr->cp_raw_cons);
2602
37b61cda 2603 __bnxt_poll_work_done(bp, bnapi, budget);
c0c050c5
MC
2604 return rx_pkts;
2605}
2606
10bbdaf5
PS
2607static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget)
2608{
2609 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
2610 struct bnxt *bp = bnapi->bp;
2611 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2612 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
2613 struct tx_cmp *txcmp;
2614 struct rx_cmp_ext *rxcmp1;
2615 u32 cp_cons, tmp_raw_cons;
2616 u32 raw_cons = cpr->cp_raw_cons;
edc0140c 2617 bool flush_xdp = false;
10bbdaf5 2618 u32 rx_pkts = 0;
4e5dbbda 2619 u8 event = 0;
10bbdaf5
PS
2620
2621 while (1) {
2622 int rc;
2623
2624 cp_cons = RING_CMP(raw_cons);
2625 txcmp = &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2626
2627 if (!TX_CMP_VALID(txcmp, raw_cons))
2628 break;
2629
828affc2
MC
2630 /* The valid test of the entry must be done first before
2631 * reading any further.
2632 */
2633 dma_rmb();
10bbdaf5
PS
2634 if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
2635 tmp_raw_cons = NEXT_RAW_CMP(raw_cons);
2636 cp_cons = RING_CMP(tmp_raw_cons);
2637 rxcmp1 = (struct rx_cmp_ext *)
2638 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2639
2640 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
2641 break;
2642
2643 /* force an error to recycle the buffer */
2644 rxcmp1->rx_cmp_cfa_code_errors_v2 |=
2645 cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
2646
e44758b7 2647 rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event);
2edbdb31 2648 if (likely(rc == -EIO) && budget)
10bbdaf5
PS
2649 rx_pkts++;
2650 else if (rc == -EBUSY) /* partial completion */
2651 break;
edc0140c
SAS
2652 if (event & BNXT_REDIRECT_EVENT)
2653 flush_xdp = true;
10bbdaf5
PS
2654 } else if (unlikely(TX_CMP_TYPE(txcmp) ==
2655 CMPL_BASE_TYPE_HWRM_DONE)) {
2656 bnxt_hwrm_handler(bp, txcmp);
2657 } else {
2658 netdev_err(bp->dev,
2659 "Invalid completion received on special ring\n");
2660 }
2661 raw_cons = NEXT_RAW_CMP(raw_cons);
2662
2663 if (rx_pkts == budget)
2664 break;
2665 }
2666
2667 cpr->cp_raw_cons = raw_cons;
697197e5
MC
2668 BNXT_DB_CQ(&cpr->cp_db, cpr->cp_raw_cons);
2669 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
10bbdaf5 2670
434c975a 2671 if (event & BNXT_AGG_EVENT)
697197e5 2672 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
edc0140c
SAS
2673 if (flush_xdp)
2674 xdp_do_flush();
10bbdaf5
PS
2675
2676 if (!bnxt_has_work(bp, cpr) && rx_pkts < budget) {
6ad20165 2677 napi_complete_done(napi, rx_pkts);
697197e5 2678 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
10bbdaf5
PS
2679 }
2680 return rx_pkts;
2681}
2682
c0c050c5
MC
2683static int bnxt_poll(struct napi_struct *napi, int budget)
2684{
2685 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
2686 struct bnxt *bp = bnapi->bp;
2687 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2688 int work_done = 0;
2689
0da65f49
MC
2690 if (unlikely(test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))) {
2691 napi_complete(napi);
2692 return 0;
2693 }
c0c050c5 2694 while (1) {
e44758b7 2695 work_done += bnxt_poll_work(bp, cpr, budget - work_done);
c0c050c5 2696
73f21c65
MC
2697 if (work_done >= budget) {
2698 if (!budget)
697197e5 2699 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
c0c050c5 2700 break;
73f21c65 2701 }
c0c050c5
MC
2702
2703 if (!bnxt_has_work(bp, cpr)) {
e7b95691 2704 if (napi_complete_done(napi, work_done))
697197e5 2705 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
c0c050c5
MC
2706 break;
2707 }
2708 }
6a8788f2 2709 if (bp->flags & BNXT_FLAG_DIM) {
f06d0ca4 2710 struct dim_sample dim_sample = {};
6a8788f2 2711
8960b389
TG
2712 dim_update_sample(cpr->event_ctr,
2713 cpr->rx_packets,
2714 cpr->rx_bytes,
2715 &dim_sample);
6a8788f2
AG
2716 net_dim(&cpr->dim, dim_sample);
2717 }
c0c050c5
MC
2718 return work_done;
2719}
2720
0fcec985
MC
2721static int __bnxt_poll_cqs(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
2722{
2723 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2724 int i, work_done = 0;
2725
2726 for (i = 0; i < 2; i++) {
2727 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[i];
2728
2729 if (cpr2) {
2730 work_done += __bnxt_poll_work(bp, cpr2,
2731 budget - work_done);
2732 cpr->has_more_work |= cpr2->has_more_work;
2733 }
2734 }
2735 return work_done;
2736}
2737
2738static void __bnxt_poll_cqs_done(struct bnxt *bp, struct bnxt_napi *bnapi,
37b61cda 2739 u64 dbr_type, int budget)
0fcec985
MC
2740{
2741 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2742 int i;
2743
2744 for (i = 0; i < 2; i++) {
2745 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[i];
2746 struct bnxt_db_info *db;
2747
340ac85e 2748 if (cpr2 && cpr2->had_work_done) {
0fcec985 2749 db = &cpr2->cp_db;
c6132f6f
MC
2750 bnxt_writeq(bp, db->db_key64 | dbr_type |
2751 RING_CMP(cpr2->cp_raw_cons), db->doorbell);
0fcec985
MC
2752 cpr2->had_work_done = 0;
2753 }
2754 }
37b61cda 2755 __bnxt_poll_work_done(bp, bnapi, budget);
0fcec985
MC
2756}
2757
2758static int bnxt_poll_p5(struct napi_struct *napi, int budget)
2759{
2760 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
2761 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
dc1f5d1e 2762 struct bnxt_cp_ring_info *cpr_rx;
0fcec985
MC
2763 u32 raw_cons = cpr->cp_raw_cons;
2764 struct bnxt *bp = bnapi->bp;
2765 struct nqe_cn *nqcmp;
2766 int work_done = 0;
2767 u32 cons;
2768
0da65f49
MC
2769 if (unlikely(test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))) {
2770 napi_complete(napi);
2771 return 0;
2772 }
0fcec985
MC
2773 if (cpr->has_more_work) {
2774 cpr->has_more_work = 0;
2775 work_done = __bnxt_poll_cqs(bp, bnapi, budget);
0fcec985
MC
2776 }
2777 while (1) {
2778 cons = RING_CMP(raw_cons);
2779 nqcmp = &cpr->nq_desc_ring[CP_RING(cons)][CP_IDX(cons)];
2780
2781 if (!NQ_CMP_VALID(nqcmp, raw_cons)) {
54a9062f
MC
2782 if (cpr->has_more_work)
2783 break;
2784
37b61cda
JK
2785 __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ_ARMALL,
2786 budget);
0fcec985
MC
2787 cpr->cp_raw_cons = raw_cons;
2788 if (napi_complete_done(napi, work_done))
2789 BNXT_DB_NQ_ARM_P5(&cpr->cp_db,
2790 cpr->cp_raw_cons);
dc1f5d1e 2791 goto poll_done;
0fcec985
MC
2792 }
2793
2794 /* The valid test of the entry must be done first before
2795 * reading any further.
2796 */
2797 dma_rmb();
2798
2799 if (nqcmp->type == cpu_to_le16(NQ_CN_TYPE_CQ_NOTIFICATION)) {
2800 u32 idx = le32_to_cpu(nqcmp->cq_handle_low);
2801 struct bnxt_cp_ring_info *cpr2;
2802
195af579
MC
2803 /* No more budget for RX work */
2804 if (budget && work_done >= budget && idx == BNXT_RX_HDL)
2805 break;
2806
0fcec985
MC
2807 cpr2 = cpr->cp_ring_arr[idx];
2808 work_done += __bnxt_poll_work(bp, cpr2,
2809 budget - work_done);
54a9062f 2810 cpr->has_more_work |= cpr2->has_more_work;
0fcec985
MC
2811 } else {
2812 bnxt_hwrm_handler(bp, (struct tx_cmp *)nqcmp);
2813 }
2814 raw_cons = NEXT_RAW_CMP(raw_cons);
0fcec985 2815 }
37b61cda 2816 __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ, budget);
389a877a
MC
2817 if (raw_cons != cpr->cp_raw_cons) {
2818 cpr->cp_raw_cons = raw_cons;
2819 BNXT_DB_NQ_P5(&cpr->cp_db, raw_cons);
2820 }
dc1f5d1e
AG
2821poll_done:
2822 cpr_rx = cpr->cp_ring_arr[BNXT_RX_HDL];
2823 if (cpr_rx && (bp->flags & BNXT_FLAG_DIM)) {
2824 struct dim_sample dim_sample = {};
2825
2826 dim_update_sample(cpr->event_ctr,
2827 cpr_rx->rx_packets,
2828 cpr_rx->rx_bytes,
2829 &dim_sample);
2830 net_dim(&cpr->dim, dim_sample);
2831 }
0fcec985
MC
2832 return work_done;
2833}
2834
c0c050c5
MC
2835static void bnxt_free_tx_skbs(struct bnxt *bp)
2836{
2837 int i, max_idx;
2838 struct pci_dev *pdev = bp->pdev;
2839
b6ab4b01 2840 if (!bp->tx_ring)
c0c050c5
MC
2841 return;
2842
2843 max_idx = bp->tx_nr_pages * TX_DESC_CNT;
2844 for (i = 0; i < bp->tx_nr_rings; i++) {
b6ab4b01 2845 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
c0c050c5
MC
2846 int j;
2847
1affc01f
EP
2848 if (!txr->tx_buf_ring)
2849 continue;
2850
c0c050c5
MC
2851 for (j = 0; j < max_idx;) {
2852 struct bnxt_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
f18c2b77 2853 struct sk_buff *skb;
c0c050c5
MC
2854 int k, last;
2855
f18c2b77
AG
2856 if (i < bp->tx_nr_rings_xdp &&
2857 tx_buf->action == XDP_REDIRECT) {
2858 dma_unmap_single(&pdev->dev,
2859 dma_unmap_addr(tx_buf, mapping),
2860 dma_unmap_len(tx_buf, len),
df70303d 2861 DMA_TO_DEVICE);
f18c2b77
AG
2862 xdp_return_frame(tx_buf->xdpf);
2863 tx_buf->action = 0;
2864 tx_buf->xdpf = NULL;
2865 j++;
2866 continue;
2867 }
2868
2869 skb = tx_buf->skb;
c0c050c5
MC
2870 if (!skb) {
2871 j++;
2872 continue;
2873 }
2874
2875 tx_buf->skb = NULL;
2876
2877 if (tx_buf->is_push) {
2878 dev_kfree_skb(skb);
2879 j += 2;
2880 continue;
2881 }
2882
2883 dma_unmap_single(&pdev->dev,
2884 dma_unmap_addr(tx_buf, mapping),
2885 skb_headlen(skb),
df70303d 2886 DMA_TO_DEVICE);
c0c050c5
MC
2887
2888 last = tx_buf->nr_frags;
2889 j += 2;
d612a579
MC
2890 for (k = 0; k < last; k++, j++) {
2891 int ring_idx = j & bp->tx_ring_mask;
c0c050c5
MC
2892 skb_frag_t *frag = &skb_shinfo(skb)->frags[k];
2893
d612a579 2894 tx_buf = &txr->tx_buf_ring[ring_idx];
c0c050c5
MC
2895 dma_unmap_page(
2896 &pdev->dev,
2897 dma_unmap_addr(tx_buf, mapping),
df70303d 2898 skb_frag_size(frag), DMA_TO_DEVICE);
c0c050c5
MC
2899 }
2900 dev_kfree_skb(skb);
2901 }
2902 netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i));
2903 }
2904}
2905
975bc99a 2906static void bnxt_free_one_rx_ring_skbs(struct bnxt *bp, int ring_nr)
c0c050c5 2907{
975bc99a 2908 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
c0c050c5 2909 struct pci_dev *pdev = bp->pdev;
975bc99a
MC
2910 struct bnxt_tpa_idx_map *map;
2911 int i, max_idx, max_agg_idx;
c0c050c5
MC
2912
2913 max_idx = bp->rx_nr_pages * RX_DESC_CNT;
2914 max_agg_idx = bp->rx_agg_nr_pages * RX_DESC_CNT;
975bc99a
MC
2915 if (!rxr->rx_tpa)
2916 goto skip_rx_tpa_free;
c0c050c5 2917
975bc99a
MC
2918 for (i = 0; i < bp->max_tpa; i++) {
2919 struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[i];
2920 u8 *data = tpa_info->data;
c0c050c5 2921
975bc99a
MC
2922 if (!data)
2923 continue;
c0c050c5 2924
975bc99a
MC
2925 dma_unmap_single_attrs(&pdev->dev, tpa_info->mapping,
2926 bp->rx_buf_use_size, bp->rx_dir,
2927 DMA_ATTR_WEAK_ORDERING);
c0c050c5 2928
975bc99a 2929 tpa_info->data = NULL;
c0c050c5 2930
720908e5 2931 skb_free_frag(data);
975bc99a 2932 }
c0c050c5 2933
975bc99a 2934skip_rx_tpa_free:
1affc01f
EP
2935 if (!rxr->rx_buf_ring)
2936 goto skip_rx_buf_free;
2937
975bc99a
MC
2938 for (i = 0; i < max_idx; i++) {
2939 struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[i];
2940 dma_addr_t mapping = rx_buf->mapping;
2941 void *data = rx_buf->data;
c0c050c5 2942
975bc99a
MC
2943 if (!data)
2944 continue;
c0c050c5 2945
975bc99a
MC
2946 rx_buf->data = NULL;
2947 if (BNXT_RX_PAGE_MODE(bp)) {
975bc99a
MC
2948 page_pool_recycle_direct(rxr->page_pool, data);
2949 } else {
2950 dma_unmap_single_attrs(&pdev->dev, mapping,
2951 bp->rx_buf_use_size, bp->rx_dir,
2952 DMA_ATTR_WEAK_ORDERING);
720908e5 2953 skb_free_frag(data);
c0c050c5 2954 }
975bc99a 2955 }
1affc01f
EP
2956
2957skip_rx_buf_free:
2958 if (!rxr->rx_agg_ring)
2959 goto skip_rx_agg_free;
2960
975bc99a
MC
2961 for (i = 0; i < max_agg_idx; i++) {
2962 struct bnxt_sw_rx_agg_bd *rx_agg_buf = &rxr->rx_agg_ring[i];
2963 struct page *page = rx_agg_buf->page;
c0c050c5 2964
975bc99a
MC
2965 if (!page)
2966 continue;
c0c050c5 2967
86b05508
SK
2968 rx_agg_buf->page = NULL;
2969 __clear_bit(i, rxr->rx_agg_bmap);
c0c050c5 2970
86b05508 2971 page_pool_recycle_direct(rxr->page_pool, page);
975bc99a 2972 }
1affc01f
EP
2973
2974skip_rx_agg_free:
975bc99a
MC
2975 map = rxr->rx_tpa_idx_map;
2976 if (map)
2977 memset(map->agg_idx_bmap, 0, sizeof(map->agg_idx_bmap));
2978}
2979
2980static void bnxt_free_rx_skbs(struct bnxt *bp)
2981{
2982 int i;
2983
2984 if (!bp->rx_ring)
2985 return;
2986
2987 for (i = 0; i < bp->rx_nr_rings; i++)
2988 bnxt_free_one_rx_ring_skbs(bp, i);
c0c050c5
MC
2989}
2990
2991static void bnxt_free_skbs(struct bnxt *bp)
2992{
2993 bnxt_free_tx_skbs(bp);
2994 bnxt_free_rx_skbs(bp);
2995}
2996
41435c39
MC
2997static void bnxt_init_ctx_mem(struct bnxt_mem_init *mem_init, void *p, int len)
2998{
2999 u8 init_val = mem_init->init_val;
3000 u16 offset = mem_init->offset;
3001 u8 *p2 = p;
3002 int i;
3003
3004 if (!init_val)
3005 return;
3006 if (offset == BNXT_MEM_INVALID_OFFSET) {
3007 memset(p, init_val, len);
3008 return;
3009 }
3010 for (i = 0; i < len; i += mem_init->size)
3011 *(p2 + i + offset) = init_val;
3012}
3013
6fe19886 3014static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
c0c050c5
MC
3015{
3016 struct pci_dev *pdev = bp->pdev;
3017 int i;
3018
985941e1
MC
3019 if (!rmem->pg_arr)
3020 goto skip_pages;
3021
6fe19886
MC
3022 for (i = 0; i < rmem->nr_pages; i++) {
3023 if (!rmem->pg_arr[i])
c0c050c5
MC
3024 continue;
3025
6fe19886
MC
3026 dma_free_coherent(&pdev->dev, rmem->page_size,
3027 rmem->pg_arr[i], rmem->dma_arr[i]);
c0c050c5 3028
6fe19886 3029 rmem->pg_arr[i] = NULL;
c0c050c5 3030 }
985941e1 3031skip_pages:
6fe19886 3032 if (rmem->pg_tbl) {
4f49b2b8
MC
3033 size_t pg_tbl_size = rmem->nr_pages * 8;
3034
3035 if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG)
3036 pg_tbl_size = rmem->page_size;
3037 dma_free_coherent(&pdev->dev, pg_tbl_size,
6fe19886
MC
3038 rmem->pg_tbl, rmem->pg_tbl_map);
3039 rmem->pg_tbl = NULL;
c0c050c5 3040 }
6fe19886
MC
3041 if (rmem->vmem_size && *rmem->vmem) {
3042 vfree(*rmem->vmem);
3043 *rmem->vmem = NULL;
c0c050c5
MC
3044 }
3045}
3046
6fe19886 3047static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
c0c050c5 3048{
c0c050c5 3049 struct pci_dev *pdev = bp->pdev;
66cca20a 3050 u64 valid_bit = 0;
6fe19886 3051 int i;
c0c050c5 3052
66cca20a
MC
3053 if (rmem->flags & (BNXT_RMEM_VALID_PTE_FLAG | BNXT_RMEM_RING_PTE_FLAG))
3054 valid_bit = PTU_PTE_VALID;
4f49b2b8
MC
3055 if ((rmem->nr_pages > 1 || rmem->depth > 0) && !rmem->pg_tbl) {
3056 size_t pg_tbl_size = rmem->nr_pages * 8;
3057
3058 if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG)
3059 pg_tbl_size = rmem->page_size;
3060 rmem->pg_tbl = dma_alloc_coherent(&pdev->dev, pg_tbl_size,
6fe19886 3061 &rmem->pg_tbl_map,
c0c050c5 3062 GFP_KERNEL);
6fe19886 3063 if (!rmem->pg_tbl)
c0c050c5
MC
3064 return -ENOMEM;
3065 }
3066
6fe19886 3067 for (i = 0; i < rmem->nr_pages; i++) {
66cca20a
MC
3068 u64 extra_bits = valid_bit;
3069
6fe19886
MC
3070 rmem->pg_arr[i] = dma_alloc_coherent(&pdev->dev,
3071 rmem->page_size,
3072 &rmem->dma_arr[i],
c0c050c5 3073 GFP_KERNEL);
6fe19886 3074 if (!rmem->pg_arr[i])
c0c050c5
MC
3075 return -ENOMEM;
3076
41435c39
MC
3077 if (rmem->mem_init)
3078 bnxt_init_ctx_mem(rmem->mem_init, rmem->pg_arr[i],
3079 rmem->page_size);
4f49b2b8 3080 if (rmem->nr_pages > 1 || rmem->depth > 0) {
66cca20a
MC
3081 if (i == rmem->nr_pages - 2 &&
3082 (rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
3083 extra_bits |= PTU_PTE_NEXT_TO_LAST;
3084 else if (i == rmem->nr_pages - 1 &&
3085 (rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
3086 extra_bits |= PTU_PTE_LAST;
3087 rmem->pg_tbl[i] =
3088 cpu_to_le64(rmem->dma_arr[i] | extra_bits);
3089 }
c0c050c5
MC
3090 }
3091
6fe19886
MC
3092 if (rmem->vmem_size) {
3093 *rmem->vmem = vzalloc(rmem->vmem_size);
3094 if (!(*rmem->vmem))
c0c050c5
MC
3095 return -ENOMEM;
3096 }
3097 return 0;
3098}
3099
4a228a3a
MC
3100static void bnxt_free_tpa_info(struct bnxt *bp)
3101{
accd7e23 3102 int i, j;
4a228a3a
MC
3103
3104 for (i = 0; i < bp->rx_nr_rings; i++) {
3105 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
3106
ec4d8e7c
MC
3107 kfree(rxr->rx_tpa_idx_map);
3108 rxr->rx_tpa_idx_map = NULL;
79632e9b 3109 if (rxr->rx_tpa) {
accd7e23
MC
3110 for (j = 0; j < bp->max_tpa; j++) {
3111 kfree(rxr->rx_tpa[j].agg_arr);
3112 rxr->rx_tpa[j].agg_arr = NULL;
3113 }
79632e9b 3114 }
4a228a3a
MC
3115 kfree(rxr->rx_tpa);
3116 rxr->rx_tpa = NULL;
3117 }
3118}
3119
3120static int bnxt_alloc_tpa_info(struct bnxt *bp)
3121{
accd7e23 3122 int i, j;
79632e9b
MC
3123
3124 bp->max_tpa = MAX_TPA;
3125 if (bp->flags & BNXT_FLAG_CHIP_P5) {
3126 if (!bp->max_tpa_v2)
3127 return 0;
3128 bp->max_tpa = max_t(u16, bp->max_tpa_v2, MAX_TPA_P5);
79632e9b 3129 }
4a228a3a
MC
3130
3131 for (i = 0; i < bp->rx_nr_rings; i++) {
3132 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
79632e9b 3133 struct rx_agg_cmp *agg;
4a228a3a 3134
79632e9b 3135 rxr->rx_tpa = kcalloc(bp->max_tpa, sizeof(struct bnxt_tpa_info),
4a228a3a
MC
3136 GFP_KERNEL);
3137 if (!rxr->rx_tpa)
3138 return -ENOMEM;
79632e9b
MC
3139
3140 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
3141 continue;
accd7e23
MC
3142 for (j = 0; j < bp->max_tpa; j++) {
3143 agg = kcalloc(MAX_SKB_FRAGS, sizeof(*agg), GFP_KERNEL);
3144 if (!agg)
3145 return -ENOMEM;
3146 rxr->rx_tpa[j].agg_arr = agg;
3147 }
ec4d8e7c
MC
3148 rxr->rx_tpa_idx_map = kzalloc(sizeof(*rxr->rx_tpa_idx_map),
3149 GFP_KERNEL);
3150 if (!rxr->rx_tpa_idx_map)
3151 return -ENOMEM;
4a228a3a
MC
3152 }
3153 return 0;
3154}
3155
c0c050c5
MC
3156static void bnxt_free_rx_rings(struct bnxt *bp)
3157{
3158 int i;
3159
b6ab4b01 3160 if (!bp->rx_ring)
c0c050c5
MC
3161 return;
3162
4a228a3a 3163 bnxt_free_tpa_info(bp);
c0c050c5 3164 for (i = 0; i < bp->rx_nr_rings; i++) {
b6ab4b01 3165 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
c0c050c5
MC
3166 struct bnxt_ring_struct *ring;
3167
c6d30e83
MC
3168 if (rxr->xdp_prog)
3169 bpf_prog_put(rxr->xdp_prog);
3170
96a8604f
JDB
3171 if (xdp_rxq_info_is_reg(&rxr->xdp_rxq))
3172 xdp_rxq_info_unreg(&rxr->xdp_rxq);
3173
12479f62 3174 page_pool_destroy(rxr->page_pool);
322b87ca
AG
3175 rxr->page_pool = NULL;
3176
c0c050c5
MC
3177 kfree(rxr->rx_agg_bmap);
3178 rxr->rx_agg_bmap = NULL;
3179
3180 ring = &rxr->rx_ring_struct;
6fe19886 3181 bnxt_free_ring(bp, &ring->ring_mem);
c0c050c5
MC
3182
3183 ring = &rxr->rx_agg_ring_struct;
6fe19886 3184 bnxt_free_ring(bp, &ring->ring_mem);
c0c050c5
MC
3185 }
3186}
3187
322b87ca
AG
3188static int bnxt_alloc_rx_page_pool(struct bnxt *bp,
3189 struct bnxt_rx_ring_info *rxr)
3190{
3191 struct page_pool_params pp = { 0 };
3192
86b05508
SK
3193 pp.pool_size = bp->rx_agg_ring_size;
3194 if (BNXT_RX_PAGE_MODE(bp))
3195 pp.pool_size += bp->rx_ring_size;
322b87ca 3196 pp.nid = dev_to_node(&bp->pdev->dev);
294e39e0 3197 pp.napi = &rxr->bnapi->napi;
322b87ca 3198 pp.dev = &bp->pdev->dev;
578fcfd2
SK
3199 pp.dma_dir = bp->rx_dir;
3200 pp.max_len = PAGE_SIZE;
3201 pp.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
f6974b4c
SK
3202 if (PAGE_SIZE > BNXT_RX_PAGE_SIZE)
3203 pp.flags |= PP_FLAG_PAGE_FRAG;
322b87ca
AG
3204
3205 rxr->page_pool = page_pool_create(&pp);
3206 if (IS_ERR(rxr->page_pool)) {
3207 int err = PTR_ERR(rxr->page_pool);
3208
3209 rxr->page_pool = NULL;
3210 return err;
3211 }
3212 return 0;
3213}
3214
c0c050c5
MC
3215static int bnxt_alloc_rx_rings(struct bnxt *bp)
3216{
4a228a3a 3217 int i, rc = 0, agg_rings = 0;
c0c050c5 3218
b6ab4b01
MC
3219 if (!bp->rx_ring)
3220 return -ENOMEM;
3221
c0c050c5
MC
3222 if (bp->flags & BNXT_FLAG_AGG_RINGS)
3223 agg_rings = 1;
3224
c0c050c5 3225 for (i = 0; i < bp->rx_nr_rings; i++) {
b6ab4b01 3226 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
c0c050c5
MC
3227 struct bnxt_ring_struct *ring;
3228
c0c050c5
MC
3229 ring = &rxr->rx_ring_struct;
3230
322b87ca
AG
3231 rc = bnxt_alloc_rx_page_pool(bp, rxr);
3232 if (rc)
3233 return rc;
3234
b02e5a0e 3235 rc = xdp_rxq_info_reg(&rxr->xdp_rxq, bp->dev, i, 0);
12479f62 3236 if (rc < 0)
96a8604f
JDB
3237 return rc;
3238
f18c2b77 3239 rc = xdp_rxq_info_reg_mem_model(&rxr->xdp_rxq,
322b87ca
AG
3240 MEM_TYPE_PAGE_POOL,
3241 rxr->page_pool);
f18c2b77
AG
3242 if (rc) {
3243 xdp_rxq_info_unreg(&rxr->xdp_rxq);
3244 return rc;
3245 }
3246
6fe19886 3247 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
c0c050c5
MC
3248 if (rc)
3249 return rc;
3250
2c61d211 3251 ring->grp_idx = i;
c0c050c5
MC
3252 if (agg_rings) {
3253 u16 mem_size;
3254
3255 ring = &rxr->rx_agg_ring_struct;
6fe19886 3256 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
c0c050c5
MC
3257 if (rc)
3258 return rc;
3259
9899bb59 3260 ring->grp_idx = i;
c0c050c5
MC
3261 rxr->rx_agg_bmap_size = bp->rx_agg_ring_mask + 1;
3262 mem_size = rxr->rx_agg_bmap_size / 8;
3263 rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL);
3264 if (!rxr->rx_agg_bmap)
3265 return -ENOMEM;
c0c050c5
MC
3266 }
3267 }
4a228a3a
MC
3268 if (bp->flags & BNXT_FLAG_TPA)
3269 rc = bnxt_alloc_tpa_info(bp);
3270 return rc;
c0c050c5
MC
3271}
3272
3273static void bnxt_free_tx_rings(struct bnxt *bp)
3274{
3275 int i;
3276 struct pci_dev *pdev = bp->pdev;
3277
b6ab4b01 3278 if (!bp->tx_ring)
c0c050c5
MC
3279 return;
3280
3281 for (i = 0; i < bp->tx_nr_rings; i++) {
b6ab4b01 3282 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
c0c050c5
MC
3283 struct bnxt_ring_struct *ring;
3284
c0c050c5
MC
3285 if (txr->tx_push) {
3286 dma_free_coherent(&pdev->dev, bp->tx_push_size,
3287 txr->tx_push, txr->tx_push_mapping);
3288 txr->tx_push = NULL;
3289 }
3290
3291 ring = &txr->tx_ring_struct;
3292
6fe19886 3293 bnxt_free_ring(bp, &ring->ring_mem);
c0c050c5
MC
3294 }
3295}
3296
3297static int bnxt_alloc_tx_rings(struct bnxt *bp)
3298{
3299 int i, j, rc;
3300 struct pci_dev *pdev = bp->pdev;
3301
3302 bp->tx_push_size = 0;
3303 if (bp->tx_push_thresh) {
3304 int push_size;
3305
3306 push_size = L1_CACHE_ALIGN(sizeof(struct tx_push_bd) +
3307 bp->tx_push_thresh);
3308
4419dbe6 3309 if (push_size > 256) {
c0c050c5
MC
3310 push_size = 0;
3311 bp->tx_push_thresh = 0;
3312 }
3313
3314 bp->tx_push_size = push_size;
3315 }
3316
3317 for (i = 0, j = 0; i < bp->tx_nr_rings; i++) {
b6ab4b01 3318 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
c0c050c5 3319 struct bnxt_ring_struct *ring;
2e8ef77e 3320 u8 qidx;
c0c050c5 3321
c0c050c5
MC
3322 ring = &txr->tx_ring_struct;
3323
6fe19886 3324 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
c0c050c5
MC
3325 if (rc)
3326 return rc;
3327
9899bb59 3328 ring->grp_idx = txr->bnapi->index;
c0c050c5 3329 if (bp->tx_push_size) {
c0c050c5
MC
3330 dma_addr_t mapping;
3331
3332 /* One pre-allocated DMA buffer to backup
3333 * TX push operation
3334 */
3335 txr->tx_push = dma_alloc_coherent(&pdev->dev,
3336 bp->tx_push_size,
3337 &txr->tx_push_mapping,
3338 GFP_KERNEL);
3339
3340 if (!txr->tx_push)
3341 return -ENOMEM;
3342
c0c050c5
MC
3343 mapping = txr->tx_push_mapping +
3344 sizeof(struct tx_push_bd);
4419dbe6 3345 txr->data_mapping = cpu_to_le64(mapping);
c0c050c5 3346 }
2e8ef77e
MC
3347 qidx = bp->tc_to_qidx[j];
3348 ring->queue_id = bp->q_info[qidx].queue_id;
4f81def2 3349 spin_lock_init(&txr->xdp_tx_lock);
5f449249
MC
3350 if (i < bp->tx_nr_rings_xdp)
3351 continue;
c0c050c5
MC
3352 if (i % bp->tx_nr_rings_per_tc == (bp->tx_nr_rings_per_tc - 1))
3353 j++;
3354 }
3355 return 0;
3356}
3357
03c74487
MC
3358static void bnxt_free_cp_arrays(struct bnxt_cp_ring_info *cpr)
3359{
985941e1
MC
3360 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
3361
03c74487
MC
3362 kfree(cpr->cp_desc_ring);
3363 cpr->cp_desc_ring = NULL;
985941e1 3364 ring->ring_mem.pg_arr = NULL;
03c74487
MC
3365 kfree(cpr->cp_desc_mapping);
3366 cpr->cp_desc_mapping = NULL;
985941e1 3367 ring->ring_mem.dma_arr = NULL;
03c74487
MC
3368}
3369
3370static int bnxt_alloc_cp_arrays(struct bnxt_cp_ring_info *cpr, int n)
3371{
3372 cpr->cp_desc_ring = kcalloc(n, sizeof(*cpr->cp_desc_ring), GFP_KERNEL);
3373 if (!cpr->cp_desc_ring)
3374 return -ENOMEM;
3375 cpr->cp_desc_mapping = kcalloc(n, sizeof(*cpr->cp_desc_mapping),
3376 GFP_KERNEL);
3377 if (!cpr->cp_desc_mapping)
3378 return -ENOMEM;
3379 return 0;
3380}
3381
3382static void bnxt_free_all_cp_arrays(struct bnxt *bp)
3383{
3384 int i;
3385
3386 if (!bp->bnapi)
3387 return;
3388 for (i = 0; i < bp->cp_nr_rings; i++) {
3389 struct bnxt_napi *bnapi = bp->bnapi[i];
3390
3391 if (!bnapi)
3392 continue;
3393 bnxt_free_cp_arrays(&bnapi->cp_ring);
3394 }
3395}
3396
3397static int bnxt_alloc_all_cp_arrays(struct bnxt *bp)
3398{
3399 int i, n = bp->cp_nr_pages;
3400
3401 for (i = 0; i < bp->cp_nr_rings; i++) {
3402 struct bnxt_napi *bnapi = bp->bnapi[i];
3403 int rc;
3404
3405 if (!bnapi)
3406 continue;
3407 rc = bnxt_alloc_cp_arrays(&bnapi->cp_ring, n);
3408 if (rc)
3409 return rc;
3410 }
3411 return 0;
3412}
3413
c0c050c5
MC
3414static void bnxt_free_cp_rings(struct bnxt *bp)
3415{
3416 int i;
3417
3418 if (!bp->bnapi)
3419 return;
3420
3421 for (i = 0; i < bp->cp_nr_rings; i++) {
3422 struct bnxt_napi *bnapi = bp->bnapi[i];
3423 struct bnxt_cp_ring_info *cpr;
3424 struct bnxt_ring_struct *ring;
50e3ab78 3425 int j;
c0c050c5
MC
3426
3427 if (!bnapi)
3428 continue;
3429
3430 cpr = &bnapi->cp_ring;
3431 ring = &cpr->cp_ring_struct;
3432
6fe19886 3433 bnxt_free_ring(bp, &ring->ring_mem);
50e3ab78
MC
3434
3435 for (j = 0; j < 2; j++) {
3436 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
3437
3438 if (cpr2) {
3439 ring = &cpr2->cp_ring_struct;
3440 bnxt_free_ring(bp, &ring->ring_mem);
03c74487 3441 bnxt_free_cp_arrays(cpr2);
50e3ab78
MC
3442 kfree(cpr2);
3443 cpr->cp_ring_arr[j] = NULL;
3444 }
3445 }
c0c050c5
MC
3446 }
3447}
3448
50e3ab78
MC
3449static struct bnxt_cp_ring_info *bnxt_alloc_cp_sub_ring(struct bnxt *bp)
3450{
3451 struct bnxt_ring_mem_info *rmem;
3452 struct bnxt_ring_struct *ring;
3453 struct bnxt_cp_ring_info *cpr;
3454 int rc;
3455
3456 cpr = kzalloc(sizeof(*cpr), GFP_KERNEL);
3457 if (!cpr)
3458 return NULL;
3459
03c74487
MC
3460 rc = bnxt_alloc_cp_arrays(cpr, bp->cp_nr_pages);
3461 if (rc) {
3462 bnxt_free_cp_arrays(cpr);
3463 kfree(cpr);
3464 return NULL;
3465 }
50e3ab78
MC
3466 ring = &cpr->cp_ring_struct;
3467 rmem = &ring->ring_mem;
3468 rmem->nr_pages = bp->cp_nr_pages;
3469 rmem->page_size = HW_CMPD_RING_SIZE;
3470 rmem->pg_arr = (void **)cpr->cp_desc_ring;
3471 rmem->dma_arr = cpr->cp_desc_mapping;
3472 rmem->flags = BNXT_RMEM_RING_PTE_FLAG;
3473 rc = bnxt_alloc_ring(bp, rmem);
3474 if (rc) {
3475 bnxt_free_ring(bp, rmem);
03c74487 3476 bnxt_free_cp_arrays(cpr);
50e3ab78
MC
3477 kfree(cpr);
3478 cpr = NULL;
3479 }
3480 return cpr;
3481}
3482
c0c050c5
MC
3483static int bnxt_alloc_cp_rings(struct bnxt *bp)
3484{
50e3ab78 3485 bool sh = !!(bp->flags & BNXT_FLAG_SHARED_RINGS);
e5811b8c 3486 int i, rc, ulp_base_vec, ulp_msix;
c0c050c5 3487
e5811b8c
MC
3488 ulp_msix = bnxt_get_ulp_msix_num(bp);
3489 ulp_base_vec = bnxt_get_ulp_msix_base(bp);
c0c050c5
MC
3490 for (i = 0; i < bp->cp_nr_rings; i++) {
3491 struct bnxt_napi *bnapi = bp->bnapi[i];
3492 struct bnxt_cp_ring_info *cpr;
3493 struct bnxt_ring_struct *ring;
3494
3495 if (!bnapi)
3496 continue;
3497
3498 cpr = &bnapi->cp_ring;
50e3ab78 3499 cpr->bnapi = bnapi;
c0c050c5
MC
3500 ring = &cpr->cp_ring_struct;
3501
6fe19886 3502 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
c0c050c5
MC
3503 if (rc)
3504 return rc;
e5811b8c
MC
3505
3506 if (ulp_msix && i >= ulp_base_vec)
3507 ring->map_idx = i + ulp_msix;
3508 else
3509 ring->map_idx = i;
50e3ab78
MC
3510
3511 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
3512 continue;
3513
3514 if (i < bp->rx_nr_rings) {
3515 struct bnxt_cp_ring_info *cpr2 =
3516 bnxt_alloc_cp_sub_ring(bp);
3517
3518 cpr->cp_ring_arr[BNXT_RX_HDL] = cpr2;
3519 if (!cpr2)
3520 return -ENOMEM;
3521 cpr2->bnapi = bnapi;
3522 }
3523 if ((sh && i < bp->tx_nr_rings) ||
3524 (!sh && i >= bp->rx_nr_rings)) {
3525 struct bnxt_cp_ring_info *cpr2 =
3526 bnxt_alloc_cp_sub_ring(bp);
3527
3528 cpr->cp_ring_arr[BNXT_TX_HDL] = cpr2;
3529 if (!cpr2)
3530 return -ENOMEM;
3531 cpr2->bnapi = bnapi;
3532 }
c0c050c5
MC
3533 }
3534 return 0;
3535}
3536
3537static void bnxt_init_ring_struct(struct bnxt *bp)
3538{
3539 int i;
3540
3541 for (i = 0; i < bp->cp_nr_rings; i++) {
3542 struct bnxt_napi *bnapi = bp->bnapi[i];
6fe19886 3543 struct bnxt_ring_mem_info *rmem;
c0c050c5
MC
3544 struct bnxt_cp_ring_info *cpr;
3545 struct bnxt_rx_ring_info *rxr;
3546 struct bnxt_tx_ring_info *txr;
3547 struct bnxt_ring_struct *ring;
3548
3549 if (!bnapi)
3550 continue;
3551
3552 cpr = &bnapi->cp_ring;
3553 ring = &cpr->cp_ring_struct;
6fe19886
MC
3554 rmem = &ring->ring_mem;
3555 rmem->nr_pages = bp->cp_nr_pages;
3556 rmem->page_size = HW_CMPD_RING_SIZE;
3557 rmem->pg_arr = (void **)cpr->cp_desc_ring;
3558 rmem->dma_arr = cpr->cp_desc_mapping;
3559 rmem->vmem_size = 0;
c0c050c5 3560
b6ab4b01 3561 rxr = bnapi->rx_ring;
3b2b7d9d
MC
3562 if (!rxr)
3563 goto skip_rx;
3564
c0c050c5 3565 ring = &rxr->rx_ring_struct;
6fe19886
MC
3566 rmem = &ring->ring_mem;
3567 rmem->nr_pages = bp->rx_nr_pages;
3568 rmem->page_size = HW_RXBD_RING_SIZE;
3569 rmem->pg_arr = (void **)rxr->rx_desc_ring;
3570 rmem->dma_arr = rxr->rx_desc_mapping;
3571 rmem->vmem_size = SW_RXBD_RING_SIZE * bp->rx_nr_pages;
3572 rmem->vmem = (void **)&rxr->rx_buf_ring;
c0c050c5
MC
3573
3574 ring = &rxr->rx_agg_ring_struct;
6fe19886
MC
3575 rmem = &ring->ring_mem;
3576 rmem->nr_pages = bp->rx_agg_nr_pages;
3577 rmem->page_size = HW_RXBD_RING_SIZE;
3578 rmem->pg_arr = (void **)rxr->rx_agg_desc_ring;
3579 rmem->dma_arr = rxr->rx_agg_desc_mapping;
3580 rmem->vmem_size = SW_RXBD_AGG_RING_SIZE * bp->rx_agg_nr_pages;
3581 rmem->vmem = (void **)&rxr->rx_agg_ring;
c0c050c5 3582
3b2b7d9d 3583skip_rx:
b6ab4b01 3584 txr = bnapi->tx_ring;
3b2b7d9d
MC
3585 if (!txr)
3586 continue;
3587
c0c050c5 3588 ring = &txr->tx_ring_struct;
6fe19886
MC
3589 rmem = &ring->ring_mem;
3590 rmem->nr_pages = bp->tx_nr_pages;
3591 rmem->page_size = HW_RXBD_RING_SIZE;
3592 rmem->pg_arr = (void **)txr->tx_desc_ring;
3593 rmem->dma_arr = txr->tx_desc_mapping;
3594 rmem->vmem_size = SW_TXBD_RING_SIZE * bp->tx_nr_pages;
3595 rmem->vmem = (void **)&txr->tx_buf_ring;
c0c050c5
MC
3596 }
3597}
3598
3599static void bnxt_init_rxbd_pages(struct bnxt_ring_struct *ring, u32 type)
3600{
3601 int i;
3602 u32 prod;
3603 struct rx_bd **rx_buf_ring;
3604
6fe19886
MC
3605 rx_buf_ring = (struct rx_bd **)ring->ring_mem.pg_arr;
3606 for (i = 0, prod = 0; i < ring->ring_mem.nr_pages; i++) {
c0c050c5
MC
3607 int j;
3608 struct rx_bd *rxbd;
3609
3610 rxbd = rx_buf_ring[i];
3611 if (!rxbd)
3612 continue;
3613
3614 for (j = 0; j < RX_DESC_CNT; j++, rxbd++, prod++) {
3615 rxbd->rx_bd_len_flags_type = cpu_to_le32(type);
3616 rxbd->rx_bd_opaque = prod;
3617 }
3618 }
3619}
3620
7737d325 3621static int bnxt_alloc_one_rx_ring(struct bnxt *bp, int ring_nr)
c0c050c5 3622{
7737d325 3623 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
c0c050c5 3624 struct net_device *dev = bp->dev;
7737d325 3625 u32 prod;
c0c050c5
MC
3626 int i;
3627
c0c050c5
MC
3628 prod = rxr->rx_prod;
3629 for (i = 0; i < bp->rx_ring_size; i++) {
7737d325 3630 if (bnxt_alloc_rx_data(bp, rxr, prod, GFP_KERNEL)) {
c0c050c5
MC
3631 netdev_warn(dev, "init'ed rx ring %d with %d/%d skbs only\n",
3632 ring_nr, i, bp->rx_ring_size);
3633 break;
3634 }
3635 prod = NEXT_RX(prod);
3636 }
3637 rxr->rx_prod = prod;
edd0c2cc 3638
c0c050c5
MC
3639 if (!(bp->flags & BNXT_FLAG_AGG_RINGS))
3640 return 0;
3641
c0c050c5
MC
3642 prod = rxr->rx_agg_prod;
3643 for (i = 0; i < bp->rx_agg_ring_size; i++) {
7737d325 3644 if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_KERNEL)) {
c0c050c5
MC
3645 netdev_warn(dev, "init'ed rx ring %d with %d/%d pages only\n",
3646 ring_nr, i, bp->rx_ring_size);
3647 break;
3648 }
3649 prod = NEXT_RX_AGG(prod);
3650 }
3651 rxr->rx_agg_prod = prod;
c0c050c5 3652
7737d325
MC
3653 if (rxr->rx_tpa) {
3654 dma_addr_t mapping;
3655 u8 *data;
c0c050c5 3656
7737d325 3657 for (i = 0; i < bp->max_tpa; i++) {
720908e5 3658 data = __bnxt_alloc_rx_frag(bp, &mapping, GFP_KERNEL);
7737d325
MC
3659 if (!data)
3660 return -ENOMEM;
c0c050c5 3661
7737d325
MC
3662 rxr->rx_tpa[i].data = data;
3663 rxr->rx_tpa[i].data_ptr = data + bp->rx_offset;
3664 rxr->rx_tpa[i].mapping = mapping;
c0c050c5
MC
3665 }
3666 }
c0c050c5
MC
3667 return 0;
3668}
3669
7737d325
MC
3670static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
3671{
3672 struct bnxt_rx_ring_info *rxr;
3673 struct bnxt_ring_struct *ring;
3674 u32 type;
3675
3676 type = (bp->rx_buf_use_size << RX_BD_LEN_SHIFT) |
3677 RX_BD_TYPE_RX_PACKET_BD | RX_BD_FLAGS_EOP;
3678
3679 if (NET_IP_ALIGN == 2)
3680 type |= RX_BD_FLAGS_SOP;
3681
3682 rxr = &bp->rx_ring[ring_nr];
3683 ring = &rxr->rx_ring_struct;
3684 bnxt_init_rxbd_pages(ring, type);
3685
3686 if (BNXT_RX_PAGE_MODE(bp) && bp->xdp_prog) {
3687 bpf_prog_add(bp->xdp_prog, 1);
3688 rxr->xdp_prog = bp->xdp_prog;
3689 }
3690 ring->fw_ring_id = INVALID_HW_RING_ID;
3691
3692 ring = &rxr->rx_agg_ring_struct;
3693 ring->fw_ring_id = INVALID_HW_RING_ID;
3694
3695 if ((bp->flags & BNXT_FLAG_AGG_RINGS)) {
3696 type = ((u32)BNXT_RX_PAGE_SIZE << RX_BD_LEN_SHIFT) |
3697 RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP;
3698
3699 bnxt_init_rxbd_pages(ring, type);
3700 }
3701
3702 return bnxt_alloc_one_rx_ring(bp, ring_nr);
3703}
3704
2247925f
SP
3705static void bnxt_init_cp_rings(struct bnxt *bp)
3706{
3e08b184 3707 int i, j;
2247925f
SP
3708
3709 for (i = 0; i < bp->cp_nr_rings; i++) {
3710 struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
3711 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
3712
3713 ring->fw_ring_id = INVALID_HW_RING_ID;
6a8788f2
AG
3714 cpr->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks;
3715 cpr->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs;
3e08b184
MC
3716 for (j = 0; j < 2; j++) {
3717 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
3718
3719 if (!cpr2)
3720 continue;
3721
3722 ring = &cpr2->cp_ring_struct;
3723 ring->fw_ring_id = INVALID_HW_RING_ID;
3724 cpr2->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks;
3725 cpr2->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs;
3726 }
2247925f
SP
3727 }
3728}
3729
c0c050c5
MC
3730static int bnxt_init_rx_rings(struct bnxt *bp)
3731{
3732 int i, rc = 0;
3733
c61fb99c 3734 if (BNXT_RX_PAGE_MODE(bp)) {
c6d30e83
MC
3735 bp->rx_offset = NET_IP_ALIGN + XDP_PACKET_HEADROOM;
3736 bp->rx_dma_offset = XDP_PACKET_HEADROOM;
c61fb99c
MC
3737 } else {
3738 bp->rx_offset = BNXT_RX_OFFSET;
3739 bp->rx_dma_offset = BNXT_RX_DMA_OFFSET;
3740 }
b3dba77c 3741
c0c050c5
MC
3742 for (i = 0; i < bp->rx_nr_rings; i++) {
3743 rc = bnxt_init_one_rx_ring(bp, i);
3744 if (rc)
3745 break;
3746 }
3747
3748 return rc;
3749}
3750
3751static int bnxt_init_tx_rings(struct bnxt *bp)
3752{
3753 u16 i;
3754
3755 bp->tx_wake_thresh = max_t(int, bp->tx_ring_size / 2,
5bed8b07 3756 BNXT_MIN_TX_DESC_CNT);
c0c050c5
MC
3757
3758 for (i = 0; i < bp->tx_nr_rings; i++) {
b6ab4b01 3759 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
c0c050c5
MC
3760 struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
3761
3762 ring->fw_ring_id = INVALID_HW_RING_ID;
3763 }
3764
3765 return 0;
3766}
3767
3768static void bnxt_free_ring_grps(struct bnxt *bp)
3769{
3770 kfree(bp->grp_info);
3771 bp->grp_info = NULL;
3772}
3773
3774static int bnxt_init_ring_grps(struct bnxt *bp, bool irq_re_init)
3775{
3776 int i;
3777
3778 if (irq_re_init) {
3779 bp->grp_info = kcalloc(bp->cp_nr_rings,
3780 sizeof(struct bnxt_ring_grp_info),
3781 GFP_KERNEL);
3782 if (!bp->grp_info)
3783 return -ENOMEM;
3784 }
3785 for (i = 0; i < bp->cp_nr_rings; i++) {
3786 if (irq_re_init)
3787 bp->grp_info[i].fw_stats_ctx = INVALID_HW_RING_ID;
3788 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
3789 bp->grp_info[i].rx_fw_ring_id = INVALID_HW_RING_ID;
3790 bp->grp_info[i].agg_fw_ring_id = INVALID_HW_RING_ID;
3791 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
3792 }
3793 return 0;
3794}
3795
3796static void bnxt_free_vnics(struct bnxt *bp)
3797{
3798 kfree(bp->vnic_info);
3799 bp->vnic_info = NULL;
3800 bp->nr_vnics = 0;
3801}
3802
3803static int bnxt_alloc_vnics(struct bnxt *bp)
3804{
3805 int num_vnics = 1;
3806
3807#ifdef CONFIG_RFS_ACCEL
9b3d15e6 3808 if ((bp->flags & (BNXT_FLAG_RFS | BNXT_FLAG_CHIP_P5)) == BNXT_FLAG_RFS)
c0c050c5
MC
3809 num_vnics += bp->rx_nr_rings;
3810#endif
3811
dc52c6c7
PS
3812 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
3813 num_vnics++;
3814
c0c050c5
MC
3815 bp->vnic_info = kcalloc(num_vnics, sizeof(struct bnxt_vnic_info),
3816 GFP_KERNEL);
3817 if (!bp->vnic_info)
3818 return -ENOMEM;
3819
3820 bp->nr_vnics = num_vnics;
3821 return 0;
3822}
3823
3824static void bnxt_init_vnics(struct bnxt *bp)
3825{
3826 int i;
3827
3828 for (i = 0; i < bp->nr_vnics; i++) {
3829 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
44c6f72a 3830 int j;
c0c050c5
MC
3831
3832 vnic->fw_vnic_id = INVALID_HW_RING_ID;
44c6f72a
MC
3833 for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++)
3834 vnic->fw_rss_cos_lb_ctx[j] = INVALID_HW_RING_ID;
3835
c0c050c5
MC
3836 vnic->fw_l2_ctx_id = INVALID_HW_RING_ID;
3837
3838 if (bp->vnic_info[i].rss_hash_key) {
3839 if (i == 0)
197173db 3840 get_random_bytes(vnic->rss_hash_key,
c0c050c5
MC
3841 HW_HASH_KEY_SIZE);
3842 else
3843 memcpy(vnic->rss_hash_key,
3844 bp->vnic_info[0].rss_hash_key,
3845 HW_HASH_KEY_SIZE);
3846 }
3847 }
3848}
3849
3850static int bnxt_calc_nr_ring_pages(u32 ring_size, int desc_per_pg)
3851{
3852 int pages;
3853
3854 pages = ring_size / desc_per_pg;
3855
3856 if (!pages)
3857 return 1;
3858
3859 pages++;
3860
3861 while (pages & (pages - 1))
3862 pages++;
3863
3864 return pages;
3865}
3866
c6d30e83 3867void bnxt_set_tpa_flags(struct bnxt *bp)
c0c050c5
MC
3868{
3869 bp->flags &= ~BNXT_FLAG_TPA;
341138c3
MC
3870 if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
3871 return;
c0c050c5
MC
3872 if (bp->dev->features & NETIF_F_LRO)
3873 bp->flags |= BNXT_FLAG_LRO;
1054aee8 3874 else if (bp->dev->features & NETIF_F_GRO_HW)
c0c050c5
MC
3875 bp->flags |= BNXT_FLAG_GRO;
3876}
3877
3878/* bp->rx_ring_size, bp->tx_ring_size, dev->mtu, BNXT_FLAG_{G|L}RO flags must
3879 * be set on entry.
3880 */
3881void bnxt_set_ring_params(struct bnxt *bp)
3882{
27640ce6 3883 u32 ring_size, rx_size, rx_space, max_rx_cmpl;
c0c050c5
MC
3884 u32 agg_factor = 0, agg_ring_size = 0;
3885
3886 /* 8 for CRC and VLAN */
3887 rx_size = SKB_DATA_ALIGN(bp->dev->mtu + ETH_HLEN + NET_IP_ALIGN + 8);
3888
32861236 3889 rx_space = rx_size + ALIGN(max(NET_SKB_PAD, XDP_PACKET_HEADROOM), 8) +
c0c050c5
MC
3890 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
3891
3892 bp->rx_copy_thresh = BNXT_RX_COPY_THRESH;
3893 ring_size = bp->rx_ring_size;
3894 bp->rx_agg_ring_size = 0;
3895 bp->rx_agg_nr_pages = 0;
3896
3897 if (bp->flags & BNXT_FLAG_TPA)
2839f28b 3898 agg_factor = min_t(u32, 4, 65536 / BNXT_RX_PAGE_SIZE);
c0c050c5
MC
3899
3900 bp->flags &= ~BNXT_FLAG_JUMBO;
bdbd1eb5 3901 if (rx_space > PAGE_SIZE && !(bp->flags & BNXT_FLAG_NO_AGG_RINGS)) {
c0c050c5
MC
3902 u32 jumbo_factor;
3903
3904 bp->flags |= BNXT_FLAG_JUMBO;
3905 jumbo_factor = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
3906 if (jumbo_factor > agg_factor)
3907 agg_factor = jumbo_factor;
3908 }
c1129b51
MC
3909 if (agg_factor) {
3910 if (ring_size > BNXT_MAX_RX_DESC_CNT_JUM_ENA) {
3911 ring_size = BNXT_MAX_RX_DESC_CNT_JUM_ENA;
3912 netdev_warn(bp->dev, "RX ring size reduced from %d to %d because the jumbo ring is now enabled\n",
3913 bp->rx_ring_size, ring_size);
3914 bp->rx_ring_size = ring_size;
3915 }
3916 agg_ring_size = ring_size * agg_factor;
c0c050c5 3917
c0c050c5
MC
3918 bp->rx_agg_nr_pages = bnxt_calc_nr_ring_pages(agg_ring_size,
3919 RX_DESC_CNT);
3920 if (bp->rx_agg_nr_pages > MAX_RX_AGG_PAGES) {
3921 u32 tmp = agg_ring_size;
3922
3923 bp->rx_agg_nr_pages = MAX_RX_AGG_PAGES;
3924 agg_ring_size = MAX_RX_AGG_PAGES * RX_DESC_CNT - 1;
3925 netdev_warn(bp->dev, "rx agg ring size %d reduced to %d.\n",
3926 tmp, agg_ring_size);
3927 }
3928 bp->rx_agg_ring_size = agg_ring_size;
3929 bp->rx_agg_ring_mask = (bp->rx_agg_nr_pages * RX_DESC_CNT) - 1;
32861236
AG
3930
3931 if (BNXT_RX_PAGE_MODE(bp)) {
1abeacc1
MC
3932 rx_space = PAGE_SIZE;
3933 rx_size = PAGE_SIZE -
3934 ALIGN(max(NET_SKB_PAD, XDP_PACKET_HEADROOM), 8) -
3935 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
32861236
AG
3936 } else {
3937 rx_size = SKB_DATA_ALIGN(BNXT_RX_COPY_THRESH + NET_IP_ALIGN);
3938 rx_space = rx_size + NET_SKB_PAD +
3939 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
3940 }
c0c050c5
MC
3941 }
3942
3943 bp->rx_buf_use_size = rx_size;
3944 bp->rx_buf_size = rx_space;
3945
3946 bp->rx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, RX_DESC_CNT);
3947 bp->rx_ring_mask = (bp->rx_nr_pages * RX_DESC_CNT) - 1;
3948
3949 ring_size = bp->tx_ring_size;
3950 bp->tx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, TX_DESC_CNT);
3951 bp->tx_ring_mask = (bp->tx_nr_pages * TX_DESC_CNT) - 1;
3952
27640ce6
MC
3953 max_rx_cmpl = bp->rx_ring_size;
3954 /* MAX TPA needs to be added because TPA_START completions are
3955 * immediately recycled, so the TPA completions are not bound by
3956 * the RX ring size.
3957 */
3958 if (bp->flags & BNXT_FLAG_TPA)
3959 max_rx_cmpl += bp->max_tpa;
3960 /* RX and TPA completions are 32-byte, all others are 16-byte */
3961 ring_size = max_rx_cmpl * 2 + agg_ring_size + bp->tx_ring_size;
c0c050c5
MC
3962 bp->cp_ring_size = ring_size;
3963
3964 bp->cp_nr_pages = bnxt_calc_nr_ring_pages(ring_size, CP_DESC_CNT);
3965 if (bp->cp_nr_pages > MAX_CP_PAGES) {
3966 bp->cp_nr_pages = MAX_CP_PAGES;
3967 bp->cp_ring_size = MAX_CP_PAGES * CP_DESC_CNT - 1;
3968 netdev_warn(bp->dev, "completion ring size %d reduced to %d.\n",
3969 ring_size, bp->cp_ring_size);
3970 }
3971 bp->cp_bit = bp->cp_nr_pages * CP_DESC_CNT;
3972 bp->cp_ring_mask = bp->cp_bit - 1;
3973}
3974
96a8604f
JDB
3975/* Changing allocation mode of RX rings.
3976 * TODO: Update when extending xdp_rxq_info to support allocation modes.
3977 */
c61fb99c 3978int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode)
6bb19474 3979{
08450ea9
MC
3980 struct net_device *dev = bp->dev;
3981
c61fb99c 3982 if (page_mode) {
c61fb99c 3983 bp->flags &= ~BNXT_FLAG_AGG_RINGS;
1dc4c557
AG
3984 bp->flags |= BNXT_FLAG_RX_PAGE_MODE;
3985
08450ea9
MC
3986 if (bp->xdp_prog->aux->xdp_has_frags)
3987 dev->max_mtu = min_t(u16, bp->max_mtu, BNXT_MAX_MTU);
3988 else
3989 dev->max_mtu =
3990 min_t(u16, bp->max_mtu, BNXT_MAX_PAGE_MODE_MTU);
3991 if (dev->mtu > BNXT_MAX_PAGE_MODE_MTU) {
1dc4c557
AG
3992 bp->flags |= BNXT_FLAG_JUMBO;
3993 bp->rx_skb_func = bnxt_rx_multi_page_skb;
1dc4c557
AG
3994 } else {
3995 bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
3996 bp->rx_skb_func = bnxt_rx_page_skb;
1dc4c557 3997 }
c61fb99c 3998 bp->rx_dir = DMA_BIDIRECTIONAL;
1054aee8 3999 /* Disable LRO or GRO_HW */
08450ea9 4000 netdev_update_features(dev);
c61fb99c 4001 } else {
08450ea9 4002 dev->max_mtu = bp->max_mtu;
c61fb99c
MC
4003 bp->flags &= ~BNXT_FLAG_RX_PAGE_MODE;
4004 bp->rx_dir = DMA_FROM_DEVICE;
4005 bp->rx_skb_func = bnxt_rx_skb;
4006 }
6bb19474
MC
4007 return 0;
4008}
4009
c0c050c5
MC
4010static void bnxt_free_vnic_attributes(struct bnxt *bp)
4011{
4012 int i;
4013 struct bnxt_vnic_info *vnic;
4014 struct pci_dev *pdev = bp->pdev;
4015
4016 if (!bp->vnic_info)
4017 return;
4018
4019 for (i = 0; i < bp->nr_vnics; i++) {
4020 vnic = &bp->vnic_info[i];
4021
4022 kfree(vnic->fw_grp_ids);
4023 vnic->fw_grp_ids = NULL;
4024
4025 kfree(vnic->uc_list);
4026 vnic->uc_list = NULL;
4027
4028 if (vnic->mc_list) {
4029 dma_free_coherent(&pdev->dev, vnic->mc_list_size,
4030 vnic->mc_list, vnic->mc_list_mapping);
4031 vnic->mc_list = NULL;
4032 }
4033
4034 if (vnic->rss_table) {
34370d24 4035 dma_free_coherent(&pdev->dev, vnic->rss_table_size,
c0c050c5
MC
4036 vnic->rss_table,
4037 vnic->rss_table_dma_addr);
4038 vnic->rss_table = NULL;
4039 }
4040
4041 vnic->rss_hash_key = NULL;
4042 vnic->flags = 0;
4043 }
4044}
4045
4046static int bnxt_alloc_vnic_attributes(struct bnxt *bp)
4047{
4048 int i, rc = 0, size;
4049 struct bnxt_vnic_info *vnic;
4050 struct pci_dev *pdev = bp->pdev;
4051 int max_rings;
4052
4053 for (i = 0; i < bp->nr_vnics; i++) {
4054 vnic = &bp->vnic_info[i];
4055
4056 if (vnic->flags & BNXT_VNIC_UCAST_FLAG) {
4057 int mem_size = (BNXT_MAX_UC_ADDRS - 1) * ETH_ALEN;
4058
4059 if (mem_size > 0) {
4060 vnic->uc_list = kmalloc(mem_size, GFP_KERNEL);
4061 if (!vnic->uc_list) {
4062 rc = -ENOMEM;
4063 goto out;
4064 }
4065 }
4066 }
4067
4068 if (vnic->flags & BNXT_VNIC_MCAST_FLAG) {
4069 vnic->mc_list_size = BNXT_MAX_MC_ADDRS * ETH_ALEN;
4070 vnic->mc_list =
4071 dma_alloc_coherent(&pdev->dev,
4072 vnic->mc_list_size,
4073 &vnic->mc_list_mapping,
4074 GFP_KERNEL);
4075 if (!vnic->mc_list) {
4076 rc = -ENOMEM;
4077 goto out;
4078 }
4079 }
4080
44c6f72a
MC
4081 if (bp->flags & BNXT_FLAG_CHIP_P5)
4082 goto vnic_skip_grps;
4083
c0c050c5
MC
4084 if (vnic->flags & BNXT_VNIC_RSS_FLAG)
4085 max_rings = bp->rx_nr_rings;
4086 else
4087 max_rings = 1;
4088
4089 vnic->fw_grp_ids = kcalloc(max_rings, sizeof(u16), GFP_KERNEL);
4090 if (!vnic->fw_grp_ids) {
4091 rc = -ENOMEM;
4092 goto out;
4093 }
44c6f72a 4094vnic_skip_grps:
ae10ae74
MC
4095 if ((bp->flags & BNXT_FLAG_NEW_RSS_CAP) &&
4096 !(vnic->flags & BNXT_VNIC_RSS_FLAG))
4097 continue;
4098
c0c050c5 4099 /* Allocate rss table and hash key */
34370d24
MC
4100 size = L1_CACHE_ALIGN(HW_HASH_INDEX_SIZE * sizeof(u16));
4101 if (bp->flags & BNXT_FLAG_CHIP_P5)
4102 size = L1_CACHE_ALIGN(BNXT_MAX_RSS_TABLE_SIZE_P5);
4103
4104 vnic->rss_table_size = size + HW_HASH_KEY_SIZE;
4105 vnic->rss_table = dma_alloc_coherent(&pdev->dev,
4106 vnic->rss_table_size,
c0c050c5
MC
4107 &vnic->rss_table_dma_addr,
4108 GFP_KERNEL);
4109 if (!vnic->rss_table) {
4110 rc = -ENOMEM;
4111 goto out;
4112 }
4113
c0c050c5
MC
4114 vnic->rss_hash_key = ((void *)vnic->rss_table) + size;
4115 vnic->rss_hash_key_dma_addr = vnic->rss_table_dma_addr + size;
4116 }
4117 return 0;
4118
4119out:
4120 return rc;
4121}
4122
4123static void bnxt_free_hwrm_resources(struct bnxt *bp)
4124{
68f684e2
EP
4125 struct bnxt_hwrm_wait_token *token;
4126
f9ff5782
EP
4127 dma_pool_destroy(bp->hwrm_dma_pool);
4128 bp->hwrm_dma_pool = NULL;
68f684e2
EP
4129
4130 rcu_read_lock();
4131 hlist_for_each_entry_rcu(token, &bp->hwrm_pending_list, node)
4132 WRITE_ONCE(token->state, BNXT_HWRM_CANCELLED);
4133 rcu_read_unlock();
c0c050c5
MC
4134}
4135
4136static int bnxt_alloc_hwrm_resources(struct bnxt *bp)
4137{
b34695a8 4138 bp->hwrm_dma_pool = dma_pool_create("bnxt_hwrm", &bp->pdev->dev,
f9ff5782
EP
4139 BNXT_HWRM_DMA_SIZE,
4140 BNXT_HWRM_DMA_ALIGN, 0);
4141 if (!bp->hwrm_dma_pool)
e605db80
DK
4142 return -ENOMEM;
4143
68f684e2
EP
4144 INIT_HLIST_HEAD(&bp->hwrm_pending_list);
4145
e605db80
DK
4146 return 0;
4147}
4148
177a6cde 4149static void bnxt_free_stats_mem(struct bnxt *bp, struct bnxt_stats_mem *stats)
c0c050c5 4150{
a37120b2
MC
4151 kfree(stats->hw_masks);
4152 stats->hw_masks = NULL;
4153 kfree(stats->sw_stats);
4154 stats->sw_stats = NULL;
177a6cde
MC
4155 if (stats->hw_stats) {
4156 dma_free_coherent(&bp->pdev->dev, stats->len, stats->hw_stats,
4157 stats->hw_stats_map);
4158 stats->hw_stats = NULL;
4159 }
4160}
c0c050c5 4161
a37120b2
MC
4162static int bnxt_alloc_stats_mem(struct bnxt *bp, struct bnxt_stats_mem *stats,
4163 bool alloc_masks)
177a6cde
MC
4164{
4165 stats->hw_stats = dma_alloc_coherent(&bp->pdev->dev, stats->len,
4166 &stats->hw_stats_map, GFP_KERNEL);
4167 if (!stats->hw_stats)
4168 return -ENOMEM;
00db3cba 4169
a37120b2
MC
4170 stats->sw_stats = kzalloc(stats->len, GFP_KERNEL);
4171 if (!stats->sw_stats)
4172 goto stats_mem_err;
4173
4174 if (alloc_masks) {
4175 stats->hw_masks = kzalloc(stats->len, GFP_KERNEL);
4176 if (!stats->hw_masks)
4177 goto stats_mem_err;
4178 }
177a6cde 4179 return 0;
a37120b2
MC
4180
4181stats_mem_err:
4182 bnxt_free_stats_mem(bp, stats);
4183 return -ENOMEM;
177a6cde 4184}
00db3cba 4185
d752d053
MC
4186static void bnxt_fill_masks(u64 *mask_arr, u64 mask, int count)
4187{
4188 int i;
4189
4190 for (i = 0; i < count; i++)
4191 mask_arr[i] = mask;
4192}
4193
4194static void bnxt_copy_hw_masks(u64 *mask_arr, __le64 *hw_mask_arr, int count)
4195{
4196 int i;
4197
4198 for (i = 0; i < count; i++)
4199 mask_arr[i] = le64_to_cpu(hw_mask_arr[i]);
4200}
4201
4202static int bnxt_hwrm_func_qstat_ext(struct bnxt *bp,
4203 struct bnxt_stats_mem *stats)
4204{
bbf33d1d
EP
4205 struct hwrm_func_qstats_ext_output *resp;
4206 struct hwrm_func_qstats_ext_input *req;
d752d053
MC
4207 __le64 *hw_masks;
4208 int rc;
4209
4210 if (!(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED) ||
4211 !(bp->flags & BNXT_FLAG_CHIP_P5))
4212 return -EOPNOTSUPP;
4213
bbf33d1d 4214 rc = hwrm_req_init(bp, req, HWRM_FUNC_QSTATS_EXT);
d752d053 4215 if (rc)
bbf33d1d 4216 return rc;
d752d053 4217
bbf33d1d
EP
4218 req->fid = cpu_to_le16(0xffff);
4219 req->flags = FUNC_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK;
d752d053 4220
bbf33d1d
EP
4221 resp = hwrm_req_hold(bp, req);
4222 rc = hwrm_req_send(bp, req);
4223 if (!rc) {
4224 hw_masks = &resp->rx_ucast_pkts;
4225 bnxt_copy_hw_masks(stats->hw_masks, hw_masks, stats->len / 8);
4226 }
4227 hwrm_req_drop(bp, req);
d752d053
MC
4228 return rc;
4229}
4230
531d1d26
MC
4231static int bnxt_hwrm_port_qstats(struct bnxt *bp, u8 flags);
4232static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp, u8 flags);
4233
d752d053
MC
4234static void bnxt_init_stats(struct bnxt *bp)
4235{
4236 struct bnxt_napi *bnapi = bp->bnapi[0];
4237 struct bnxt_cp_ring_info *cpr;
4238 struct bnxt_stats_mem *stats;
531d1d26
MC
4239 __le64 *rx_stats, *tx_stats;
4240 int rc, rx_count, tx_count;
4241 u64 *rx_masks, *tx_masks;
d752d053 4242 u64 mask;
531d1d26 4243 u8 flags;
d752d053
MC
4244
4245 cpr = &bnapi->cp_ring;
4246 stats = &cpr->stats;
4247 rc = bnxt_hwrm_func_qstat_ext(bp, stats);
4248 if (rc) {
4249 if (bp->flags & BNXT_FLAG_CHIP_P5)
4250 mask = (1ULL << 48) - 1;
4251 else
4252 mask = -1ULL;
4253 bnxt_fill_masks(stats->hw_masks, mask, stats->len / 8);
4254 }
531d1d26
MC
4255 if (bp->flags & BNXT_FLAG_PORT_STATS) {
4256 stats = &bp->port_stats;
4257 rx_stats = stats->hw_stats;
4258 rx_masks = stats->hw_masks;
4259 rx_count = sizeof(struct rx_port_stats) / 8;
4260 tx_stats = rx_stats + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
4261 tx_masks = rx_masks + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
4262 tx_count = sizeof(struct tx_port_stats) / 8;
4263
4264 flags = PORT_QSTATS_REQ_FLAGS_COUNTER_MASK;
4265 rc = bnxt_hwrm_port_qstats(bp, flags);
4266 if (rc) {
4267 mask = (1ULL << 40) - 1;
4268
4269 bnxt_fill_masks(rx_masks, mask, rx_count);
4270 bnxt_fill_masks(tx_masks, mask, tx_count);
4271 } else {
4272 bnxt_copy_hw_masks(rx_masks, rx_stats, rx_count);
4273 bnxt_copy_hw_masks(tx_masks, tx_stats, tx_count);
4274 bnxt_hwrm_port_qstats(bp, 0);
4275 }
4276 }
4277 if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
4278 stats = &bp->rx_port_stats_ext;
4279 rx_stats = stats->hw_stats;
4280 rx_masks = stats->hw_masks;
4281 rx_count = sizeof(struct rx_port_stats_ext) / 8;
4282 stats = &bp->tx_port_stats_ext;
4283 tx_stats = stats->hw_stats;
4284 tx_masks = stats->hw_masks;
4285 tx_count = sizeof(struct tx_port_stats_ext) / 8;
4286
c07fa08f 4287 flags = PORT_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK;
531d1d26
MC
4288 rc = bnxt_hwrm_port_qstats_ext(bp, flags);
4289 if (rc) {
4290 mask = (1ULL << 40) - 1;
4291
4292 bnxt_fill_masks(rx_masks, mask, rx_count);
4293 if (tx_stats)
4294 bnxt_fill_masks(tx_masks, mask, tx_count);
4295 } else {
4296 bnxt_copy_hw_masks(rx_masks, rx_stats, rx_count);
4297 if (tx_stats)
4298 bnxt_copy_hw_masks(tx_masks, tx_stats,
4299 tx_count);
4300 bnxt_hwrm_port_qstats_ext(bp, 0);
4301 }
4302 }
d752d053
MC
4303}
4304
177a6cde
MC
4305static void bnxt_free_port_stats(struct bnxt *bp)
4306{
4307 bp->flags &= ~BNXT_FLAG_PORT_STATS;
4308 bp->flags &= ~BNXT_FLAG_PORT_STATS_EXT;
36e53349 4309
177a6cde
MC
4310 bnxt_free_stats_mem(bp, &bp->port_stats);
4311 bnxt_free_stats_mem(bp, &bp->rx_port_stats_ext);
4312 bnxt_free_stats_mem(bp, &bp->tx_port_stats_ext);
fd3ab1c7
MC
4313}
4314
4315static void bnxt_free_ring_stats(struct bnxt *bp)
4316{
177a6cde 4317 int i;
3bdf56c4 4318
c0c050c5
MC
4319 if (!bp->bnapi)
4320 return;
4321
c0c050c5
MC
4322 for (i = 0; i < bp->cp_nr_rings; i++) {
4323 struct bnxt_napi *bnapi = bp->bnapi[i];
4324 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4325
177a6cde 4326 bnxt_free_stats_mem(bp, &cpr->stats);
c0c050c5
MC
4327 }
4328}
4329
4330static int bnxt_alloc_stats(struct bnxt *bp)
4331{
4332 u32 size, i;
177a6cde 4333 int rc;
c0c050c5 4334
4e748506 4335 size = bp->hw_ring_stats_size;
c0c050c5
MC
4336
4337 for (i = 0; i < bp->cp_nr_rings; i++) {
4338 struct bnxt_napi *bnapi = bp->bnapi[i];
4339 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4340
177a6cde 4341 cpr->stats.len = size;
a37120b2 4342 rc = bnxt_alloc_stats_mem(bp, &cpr->stats, !i);
177a6cde
MC
4343 if (rc)
4344 return rc;
c0c050c5
MC
4345
4346 cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
4347 }
3bdf56c4 4348
a220eabc
VV
4349 if (BNXT_VF(bp) || bp->chip_num == CHIP_NUM_58700)
4350 return 0;
fd3ab1c7 4351
177a6cde 4352 if (bp->port_stats.hw_stats)
a220eabc 4353 goto alloc_ext_stats;
3bdf56c4 4354
177a6cde 4355 bp->port_stats.len = BNXT_PORT_STATS_SIZE;
a37120b2 4356 rc = bnxt_alloc_stats_mem(bp, &bp->port_stats, true);
177a6cde
MC
4357 if (rc)
4358 return rc;
3bdf56c4 4359
a220eabc 4360 bp->flags |= BNXT_FLAG_PORT_STATS;
00db3cba 4361
fd3ab1c7 4362alloc_ext_stats:
a220eabc
VV
4363 /* Display extended statistics only if FW supports it */
4364 if (bp->hwrm_spec_code < 0x10804 || bp->hwrm_spec_code == 0x10900)
6154532f 4365 if (!(bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED))
00db3cba
VV
4366 return 0;
4367
177a6cde 4368 if (bp->rx_port_stats_ext.hw_stats)
a220eabc 4369 goto alloc_tx_ext_stats;
fd3ab1c7 4370
177a6cde 4371 bp->rx_port_stats_ext.len = sizeof(struct rx_port_stats_ext);
a37120b2 4372 rc = bnxt_alloc_stats_mem(bp, &bp->rx_port_stats_ext, true);
177a6cde
MC
4373 /* Extended stats are optional */
4374 if (rc)
a220eabc 4375 return 0;
00db3cba 4376
fd3ab1c7 4377alloc_tx_ext_stats:
177a6cde 4378 if (bp->tx_port_stats_ext.hw_stats)
dfe64de9 4379 return 0;
fd3ab1c7 4380
6154532f
VV
4381 if (bp->hwrm_spec_code >= 0x10902 ||
4382 (bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED)) {
177a6cde 4383 bp->tx_port_stats_ext.len = sizeof(struct tx_port_stats_ext);
a37120b2 4384 rc = bnxt_alloc_stats_mem(bp, &bp->tx_port_stats_ext, true);
177a6cde
MC
4385 /* Extended stats are optional */
4386 if (rc)
4387 return 0;
3bdf56c4 4388 }
a220eabc 4389 bp->flags |= BNXT_FLAG_PORT_STATS_EXT;
c0c050c5
MC
4390 return 0;
4391}
4392
4393static void bnxt_clear_ring_indices(struct bnxt *bp)
4394{
4395 int i;
4396
4397 if (!bp->bnapi)
4398 return;
4399
4400 for (i = 0; i < bp->cp_nr_rings; i++) {
4401 struct bnxt_napi *bnapi = bp->bnapi[i];
4402 struct bnxt_cp_ring_info *cpr;
4403 struct bnxt_rx_ring_info *rxr;
4404 struct bnxt_tx_ring_info *txr;
4405
4406 if (!bnapi)
4407 continue;
4408
4409 cpr = &bnapi->cp_ring;
4410 cpr->cp_raw_cons = 0;
4411
b6ab4b01 4412 txr = bnapi->tx_ring;
3b2b7d9d
MC
4413 if (txr) {
4414 txr->tx_prod = 0;
4415 txr->tx_cons = 0;
4416 }
c0c050c5 4417
b6ab4b01 4418 rxr = bnapi->rx_ring;
3b2b7d9d
MC
4419 if (rxr) {
4420 rxr->rx_prod = 0;
4421 rxr->rx_agg_prod = 0;
4422 rxr->rx_sw_agg_prod = 0;
376a5b86 4423 rxr->rx_next_cons = 0;
3b2b7d9d 4424 }
c0c050c5
MC
4425 }
4426}
4427
4428static void bnxt_free_ntp_fltrs(struct bnxt *bp, bool irq_reinit)
4429{
4430#ifdef CONFIG_RFS_ACCEL
4431 int i;
4432
4433 /* Under rtnl_lock and all our NAPIs have been disabled. It's
4434 * safe to delete the hash table.
4435 */
4436 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
4437 struct hlist_head *head;
4438 struct hlist_node *tmp;
4439 struct bnxt_ntuple_filter *fltr;
4440
4441 head = &bp->ntp_fltr_hash_tbl[i];
4442 hlist_for_each_entry_safe(fltr, tmp, head, hash) {
4443 hlist_del(&fltr->hash);
4444 kfree(fltr);
4445 }
4446 }
4447 if (irq_reinit) {
45262522 4448 bitmap_free(bp->ntp_fltr_bmap);
c0c050c5
MC
4449 bp->ntp_fltr_bmap = NULL;
4450 }
4451 bp->ntp_fltr_count = 0;
4452#endif
4453}
4454
4455static int bnxt_alloc_ntp_fltrs(struct bnxt *bp)
4456{
4457#ifdef CONFIG_RFS_ACCEL
4458 int i, rc = 0;
4459
4460 if (!(bp->flags & BNXT_FLAG_RFS))
4461 return 0;
4462
4463 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++)
4464 INIT_HLIST_HEAD(&bp->ntp_fltr_hash_tbl[i]);
4465
4466 bp->ntp_fltr_count = 0;
45262522 4467 bp->ntp_fltr_bmap = bitmap_zalloc(BNXT_NTP_FLTR_MAX_FLTR, GFP_KERNEL);
c0c050c5
MC
4468
4469 if (!bp->ntp_fltr_bmap)
4470 rc = -ENOMEM;
4471
4472 return rc;
4473#else
4474 return 0;
4475#endif
4476}
4477
4478static void bnxt_free_mem(struct bnxt *bp, bool irq_re_init)
4479{
4480 bnxt_free_vnic_attributes(bp);
4481 bnxt_free_tx_rings(bp);
4482 bnxt_free_rx_rings(bp);
4483 bnxt_free_cp_rings(bp);
03c74487 4484 bnxt_free_all_cp_arrays(bp);
c0c050c5
MC
4485 bnxt_free_ntp_fltrs(bp, irq_re_init);
4486 if (irq_re_init) {
fd3ab1c7 4487 bnxt_free_ring_stats(bp);
b0d28207 4488 if (!(bp->phy_flags & BNXT_PHY_FL_PORT_STATS_NO_RESET) ||
eba93de6 4489 test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
fea6b333 4490 bnxt_free_port_stats(bp);
c0c050c5
MC
4491 bnxt_free_ring_grps(bp);
4492 bnxt_free_vnics(bp);
a960dec9
MC
4493 kfree(bp->tx_ring_map);
4494 bp->tx_ring_map = NULL;
b6ab4b01
MC
4495 kfree(bp->tx_ring);
4496 bp->tx_ring = NULL;
4497 kfree(bp->rx_ring);
4498 bp->rx_ring = NULL;
c0c050c5
MC
4499 kfree(bp->bnapi);
4500 bp->bnapi = NULL;
4501 } else {
4502 bnxt_clear_ring_indices(bp);
4503 }
4504}
4505
4506static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init)
4507{
01657bcd 4508 int i, j, rc, size, arr_size;
c0c050c5
MC
4509 void *bnapi;
4510
4511 if (irq_re_init) {
4512 /* Allocate bnapi mem pointer array and mem block for
4513 * all queues
4514 */
4515 arr_size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi *) *
4516 bp->cp_nr_rings);
4517 size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi));
4518 bnapi = kzalloc(arr_size + size * bp->cp_nr_rings, GFP_KERNEL);
4519 if (!bnapi)
4520 return -ENOMEM;
4521
4522 bp->bnapi = bnapi;
4523 bnapi += arr_size;
4524 for (i = 0; i < bp->cp_nr_rings; i++, bnapi += size) {
4525 bp->bnapi[i] = bnapi;
4526 bp->bnapi[i]->index = i;
4527 bp->bnapi[i]->bp = bp;
e38287b7
MC
4528 if (bp->flags & BNXT_FLAG_CHIP_P5) {
4529 struct bnxt_cp_ring_info *cpr =
4530 &bp->bnapi[i]->cp_ring;
4531
4532 cpr->cp_ring_struct.ring_mem.flags =
4533 BNXT_RMEM_RING_PTE_FLAG;
4534 }
c0c050c5
MC
4535 }
4536
b6ab4b01
MC
4537 bp->rx_ring = kcalloc(bp->rx_nr_rings,
4538 sizeof(struct bnxt_rx_ring_info),
4539 GFP_KERNEL);
4540 if (!bp->rx_ring)
4541 return -ENOMEM;
4542
4543 for (i = 0; i < bp->rx_nr_rings; i++) {
e38287b7
MC
4544 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
4545
4546 if (bp->flags & BNXT_FLAG_CHIP_P5) {
4547 rxr->rx_ring_struct.ring_mem.flags =
4548 BNXT_RMEM_RING_PTE_FLAG;
4549 rxr->rx_agg_ring_struct.ring_mem.flags =
4550 BNXT_RMEM_RING_PTE_FLAG;
4551 }
4552 rxr->bnapi = bp->bnapi[i];
b6ab4b01
MC
4553 bp->bnapi[i]->rx_ring = &bp->rx_ring[i];
4554 }
4555
4556 bp->tx_ring = kcalloc(bp->tx_nr_rings,
4557 sizeof(struct bnxt_tx_ring_info),
4558 GFP_KERNEL);
4559 if (!bp->tx_ring)
4560 return -ENOMEM;
4561
a960dec9
MC
4562 bp->tx_ring_map = kcalloc(bp->tx_nr_rings, sizeof(u16),
4563 GFP_KERNEL);
4564
4565 if (!bp->tx_ring_map)
4566 return -ENOMEM;
4567
01657bcd
MC
4568 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
4569 j = 0;
4570 else
4571 j = bp->rx_nr_rings;
4572
4573 for (i = 0; i < bp->tx_nr_rings; i++, j++) {
e38287b7
MC
4574 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
4575
4576 if (bp->flags & BNXT_FLAG_CHIP_P5)
4577 txr->tx_ring_struct.ring_mem.flags =
4578 BNXT_RMEM_RING_PTE_FLAG;
4579 txr->bnapi = bp->bnapi[j];
4580 bp->bnapi[j]->tx_ring = txr;
5f449249 4581 bp->tx_ring_map[i] = bp->tx_nr_rings_xdp + i;
38413406 4582 if (i >= bp->tx_nr_rings_xdp) {
e38287b7 4583 txr->txq_index = i - bp->tx_nr_rings_xdp;
38413406
MC
4584 bp->bnapi[j]->tx_int = bnxt_tx_int;
4585 } else {
fa3e93e8 4586 bp->bnapi[j]->flags |= BNXT_NAPI_FLAG_XDP;
38413406
MC
4587 bp->bnapi[j]->tx_int = bnxt_tx_int_xdp;
4588 }
b6ab4b01
MC
4589 }
4590
c0c050c5
MC
4591 rc = bnxt_alloc_stats(bp);
4592 if (rc)
4593 goto alloc_mem_err;
d752d053 4594 bnxt_init_stats(bp);
c0c050c5
MC
4595
4596 rc = bnxt_alloc_ntp_fltrs(bp);
4597 if (rc)
4598 goto alloc_mem_err;
4599
4600 rc = bnxt_alloc_vnics(bp);
4601 if (rc)
4602 goto alloc_mem_err;
4603 }
4604
03c74487
MC
4605 rc = bnxt_alloc_all_cp_arrays(bp);
4606 if (rc)
4607 goto alloc_mem_err;
4608
c0c050c5
MC
4609 bnxt_init_ring_struct(bp);
4610
4611 rc = bnxt_alloc_rx_rings(bp);
4612 if (rc)
4613 goto alloc_mem_err;
4614
4615 rc = bnxt_alloc_tx_rings(bp);
4616 if (rc)
4617 goto alloc_mem_err;
4618
4619 rc = bnxt_alloc_cp_rings(bp);
4620 if (rc)
4621 goto alloc_mem_err;
4622
4623 bp->vnic_info[0].flags |= BNXT_VNIC_RSS_FLAG | BNXT_VNIC_MCAST_FLAG |
4624 BNXT_VNIC_UCAST_FLAG;
4625 rc = bnxt_alloc_vnic_attributes(bp);
4626 if (rc)
4627 goto alloc_mem_err;
4628 return 0;
4629
4630alloc_mem_err:
4631 bnxt_free_mem(bp, true);
4632 return rc;
4633}
4634
9d8bc097
MC
4635static void bnxt_disable_int(struct bnxt *bp)
4636{
4637 int i;
4638
4639 if (!bp->bnapi)
4640 return;
4641
4642 for (i = 0; i < bp->cp_nr_rings; i++) {
4643 struct bnxt_napi *bnapi = bp->bnapi[i];
4644 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
daf1f1e7 4645 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
9d8bc097 4646
daf1f1e7 4647 if (ring->fw_ring_id != INVALID_HW_RING_ID)
697197e5 4648 bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons);
9d8bc097
MC
4649 }
4650}
4651
e5811b8c
MC
4652static int bnxt_cp_num_to_irq_num(struct bnxt *bp, int n)
4653{
4654 struct bnxt_napi *bnapi = bp->bnapi[n];
4655 struct bnxt_cp_ring_info *cpr;
4656
4657 cpr = &bnapi->cp_ring;
4658 return cpr->cp_ring_struct.map_idx;
4659}
4660
9d8bc097
MC
4661static void bnxt_disable_int_sync(struct bnxt *bp)
4662{
4663 int i;
4664
38290e37
MC
4665 if (!bp->irq_tbl)
4666 return;
4667
9d8bc097
MC
4668 atomic_inc(&bp->intr_sem);
4669
4670 bnxt_disable_int(bp);
e5811b8c
MC
4671 for (i = 0; i < bp->cp_nr_rings; i++) {
4672 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
4673
4674 synchronize_irq(bp->irq_tbl[map_idx].vector);
4675 }
9d8bc097
MC
4676}
4677
4678static void bnxt_enable_int(struct bnxt *bp)
4679{
4680 int i;
4681
4682 atomic_set(&bp->intr_sem, 0);
4683 for (i = 0; i < bp->cp_nr_rings; i++) {
4684 struct bnxt_napi *bnapi = bp->bnapi[i];
4685 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4686
697197e5 4687 bnxt_db_nq_arm(bp, &cpr->cp_db, cpr->cp_raw_cons);
9d8bc097
MC
4688 }
4689}
4690
2e882468
VV
4691int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp, unsigned long *bmap, int bmap_size,
4692 bool async_only)
c0c050c5 4693{
25be8623
MC
4694 DECLARE_BITMAP(async_events_bmap, 256);
4695 u32 *events = (u32 *)async_events_bmap;
bbf33d1d
EP
4696 struct hwrm_func_drv_rgtr_output *resp;
4697 struct hwrm_func_drv_rgtr_input *req;
acfb50e4 4698 u32 flags;
2e882468 4699 int rc, i;
a1653b13 4700
bbf33d1d
EP
4701 rc = hwrm_req_init(bp, req, HWRM_FUNC_DRV_RGTR);
4702 if (rc)
4703 return rc;
a1653b13 4704
bbf33d1d
EP
4705 req->enables = cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE |
4706 FUNC_DRV_RGTR_REQ_ENABLES_VER |
4707 FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
a1653b13 4708
bbf33d1d 4709 req->os_type = cpu_to_le16(FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX);
8280b38e
VV
4710 flags = FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE;
4711 if (bp->fw_cap & BNXT_FW_CAP_HOT_RESET)
4712 flags |= FUNC_DRV_RGTR_REQ_FLAGS_HOT_RESET_SUPPORT;
acfb50e4 4713 if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
e633a329
VV
4714 flags |= FUNC_DRV_RGTR_REQ_FLAGS_ERROR_RECOVERY_SUPPORT |
4715 FUNC_DRV_RGTR_REQ_FLAGS_MASTER_SUPPORT;
bbf33d1d
EP
4716 req->flags = cpu_to_le32(flags);
4717 req->ver_maj_8b = DRV_VER_MAJ;
4718 req->ver_min_8b = DRV_VER_MIN;
4719 req->ver_upd_8b = DRV_VER_UPD;
4720 req->ver_maj = cpu_to_le16(DRV_VER_MAJ);
4721 req->ver_min = cpu_to_le16(DRV_VER_MIN);
4722 req->ver_upd = cpu_to_le16(DRV_VER_UPD);
c0c050c5
MC
4723
4724 if (BNXT_PF(bp)) {
9b0436c3 4725 u32 data[8];
a1653b13 4726 int i;
c0c050c5 4727
9b0436c3
MC
4728 memset(data, 0, sizeof(data));
4729 for (i = 0; i < ARRAY_SIZE(bnxt_vf_req_snif); i++) {
4730 u16 cmd = bnxt_vf_req_snif[i];
4731 unsigned int bit, idx;
4732
4733 idx = cmd / 32;
4734 bit = cmd % 32;
4735 data[idx] |= 1 << bit;
4736 }
c0c050c5 4737
de68f5de 4738 for (i = 0; i < 8; i++)
bbf33d1d 4739 req->vf_req_fwd[i] = cpu_to_le32(data[i]);
de68f5de 4740
bbf33d1d 4741 req->enables |=
c0c050c5
MC
4742 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD);
4743 }
4744
abd43a13 4745 if (bp->fw_cap & BNXT_FW_CAP_OVS_64BIT_HANDLE)
bbf33d1d 4746 req->flags |= cpu_to_le32(
abd43a13
VD
4747 FUNC_DRV_RGTR_REQ_FLAGS_FLOW_HANDLE_64BIT_MODE);
4748
2e882468
VV
4749 memset(async_events_bmap, 0, sizeof(async_events_bmap));
4750 for (i = 0; i < ARRAY_SIZE(bnxt_async_events_arr); i++) {
4751 u16 event_id = bnxt_async_events_arr[i];
4752
4753 if (event_id == ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY &&
4754 !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
4755 continue;
319a7827
PC
4756 if (event_id == ASYNC_EVENT_CMPL_EVENT_ID_PHC_UPDATE &&
4757 !bp->ptp_cfg)
4758 continue;
2e882468
VV
4759 __set_bit(bnxt_async_events_arr[i], async_events_bmap);
4760 }
4761 if (bmap && bmap_size) {
4762 for (i = 0; i < bmap_size; i++) {
4763 if (test_bit(i, bmap))
4764 __set_bit(i, async_events_bmap);
4765 }
4766 }
4767 for (i = 0; i < 8; i++)
bbf33d1d 4768 req->async_event_fwd[i] |= cpu_to_le32(events[i]);
2e882468
VV
4769
4770 if (async_only)
bbf33d1d 4771 req->enables =
2e882468
VV
4772 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
4773
bbf33d1d
EP
4774 resp = hwrm_req_hold(bp, req);
4775 rc = hwrm_req_send(bp, req);
bdb38602
VV
4776 if (!rc) {
4777 set_bit(BNXT_STATE_DRV_REGISTERED, &bp->state);
4778 if (resp->flags &
4779 cpu_to_le32(FUNC_DRV_RGTR_RESP_FLAGS_IF_CHANGE_SUPPORTED))
4780 bp->fw_cap |= BNXT_FW_CAP_IF_CHANGE;
4781 }
bbf33d1d 4782 hwrm_req_drop(bp, req);
25e1acd6 4783 return rc;
c0c050c5
MC
4784}
4785
228ea8c1 4786int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp)
be58a0da 4787{
bbf33d1d
EP
4788 struct hwrm_func_drv_unrgtr_input *req;
4789 int rc;
be58a0da 4790
bdb38602
VV
4791 if (!test_and_clear_bit(BNXT_STATE_DRV_REGISTERED, &bp->state))
4792 return 0;
4793
bbf33d1d
EP
4794 rc = hwrm_req_init(bp, req, HWRM_FUNC_DRV_UNRGTR);
4795 if (rc)
4796 return rc;
4797 return hwrm_req_send(bp, req);
be58a0da
JH
4798}
4799
c0c050c5
MC
4800static int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, u8 tunnel_type)
4801{
bbf33d1d
EP
4802 struct hwrm_tunnel_dst_port_free_input *req;
4803 int rc;
c0c050c5 4804
7ae9dc35
MC
4805 if (tunnel_type == TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN &&
4806 bp->vxlan_fw_dst_port_id == INVALID_HW_RING_ID)
4807 return 0;
4808 if (tunnel_type == TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE &&
4809 bp->nge_fw_dst_port_id == INVALID_HW_RING_ID)
4810 return 0;
4811
bbf33d1d
EP
4812 rc = hwrm_req_init(bp, req, HWRM_TUNNEL_DST_PORT_FREE);
4813 if (rc)
4814 return rc;
4815
4816 req->tunnel_type = tunnel_type;
c0c050c5
MC
4817
4818 switch (tunnel_type) {
4819 case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN:
bbf33d1d 4820 req->tunnel_dst_port_id = cpu_to_le16(bp->vxlan_fw_dst_port_id);
7ae9dc35 4821 bp->vxlan_port = 0;
442a35a5 4822 bp->vxlan_fw_dst_port_id = INVALID_HW_RING_ID;
c0c050c5
MC
4823 break;
4824 case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE:
bbf33d1d 4825 req->tunnel_dst_port_id = cpu_to_le16(bp->nge_fw_dst_port_id);
7ae9dc35 4826 bp->nge_port = 0;
442a35a5 4827 bp->nge_fw_dst_port_id = INVALID_HW_RING_ID;
c0c050c5
MC
4828 break;
4829 default:
4830 break;
4831 }
4832
bbf33d1d 4833 rc = hwrm_req_send(bp, req);
c0c050c5
MC
4834 if (rc)
4835 netdev_err(bp->dev, "hwrm_tunnel_dst_port_free failed. rc:%d\n",
4836 rc);
4837 return rc;
4838}
4839
4840static int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, __be16 port,
4841 u8 tunnel_type)
4842{
bbf33d1d
EP
4843 struct hwrm_tunnel_dst_port_alloc_output *resp;
4844 struct hwrm_tunnel_dst_port_alloc_input *req;
4845 int rc;
c0c050c5 4846
bbf33d1d
EP
4847 rc = hwrm_req_init(bp, req, HWRM_TUNNEL_DST_PORT_ALLOC);
4848 if (rc)
4849 return rc;
c0c050c5 4850
bbf33d1d
EP
4851 req->tunnel_type = tunnel_type;
4852 req->tunnel_dst_port_val = port;
c0c050c5 4853
bbf33d1d
EP
4854 resp = hwrm_req_hold(bp, req);
4855 rc = hwrm_req_send(bp, req);
c0c050c5
MC
4856 if (rc) {
4857 netdev_err(bp->dev, "hwrm_tunnel_dst_port_alloc failed. rc:%d\n",
4858 rc);
4859 goto err_out;
4860 }
4861
57aac71b
CJ
4862 switch (tunnel_type) {
4863 case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN:
7ae9dc35 4864 bp->vxlan_port = port;
442a35a5
JK
4865 bp->vxlan_fw_dst_port_id =
4866 le16_to_cpu(resp->tunnel_dst_port_id);
57aac71b
CJ
4867 break;
4868 case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE:
7ae9dc35 4869 bp->nge_port = port;
442a35a5 4870 bp->nge_fw_dst_port_id = le16_to_cpu(resp->tunnel_dst_port_id);
57aac71b
CJ
4871 break;
4872 default:
4873 break;
4874 }
4875
c0c050c5 4876err_out:
bbf33d1d 4877 hwrm_req_drop(bp, req);
c0c050c5
MC
4878 return rc;
4879}
4880
4881static int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, u16 vnic_id)
4882{
bbf33d1d 4883 struct hwrm_cfa_l2_set_rx_mask_input *req;
c0c050c5 4884 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
bbf33d1d 4885 int rc;
c0c050c5 4886
bbf33d1d
EP
4887 rc = hwrm_req_init(bp, req, HWRM_CFA_L2_SET_RX_MASK);
4888 if (rc)
4889 return rc;
c0c050c5 4890
bbf33d1d 4891 req->vnic_id = cpu_to_le32(vnic->fw_vnic_id);
8cdb1592
PC
4892 if (vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_MCAST) {
4893 req->num_mc_entries = cpu_to_le32(vnic->mc_list_count);
4894 req->mc_tbl_addr = cpu_to_le64(vnic->mc_list_mapping);
4895 }
bbf33d1d
EP
4896 req->mask = cpu_to_le32(vnic->rx_mask);
4897 return hwrm_req_send_silent(bp, req);
c0c050c5
MC
4898}
4899
4900#ifdef CONFIG_RFS_ACCEL
4901static int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp,
4902 struct bnxt_ntuple_filter *fltr)
4903{
bbf33d1d
EP
4904 struct hwrm_cfa_ntuple_filter_free_input *req;
4905 int rc;
c0c050c5 4906
bbf33d1d
EP
4907 rc = hwrm_req_init(bp, req, HWRM_CFA_NTUPLE_FILTER_FREE);
4908 if (rc)
4909 return rc;
4910
4911 req->ntuple_filter_id = fltr->filter_id;
4912 return hwrm_req_send(bp, req);
c0c050c5
MC
4913}
4914
4915#define BNXT_NTP_FLTR_FLAGS \
4916 (CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID | \
4917 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE | \
4918 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR | \
4919 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE | \
4920 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR | \
4921 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR_MASK | \
4922 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR | \
4923 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR_MASK | \
4924 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL | \
4925 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT | \
4926 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT_MASK | \
4927 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT | \
4928 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT_MASK | \
c193554e 4929 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_ID)
c0c050c5 4930
61aad724
MC
4931#define BNXT_NTP_TUNNEL_FLTR_FLAG \
4932 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE
4933
c0c050c5
MC
4934static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
4935 struct bnxt_ntuple_filter *fltr)
4936{
5c209fc8 4937 struct hwrm_cfa_ntuple_filter_alloc_output *resp;
bbf33d1d 4938 struct hwrm_cfa_ntuple_filter_alloc_input *req;
c0c050c5 4939 struct flow_keys *keys = &fltr->fkeys;
ac33906c 4940 struct bnxt_vnic_info *vnic;
41136ab3 4941 u32 flags = 0;
bbf33d1d 4942 int rc;
c0c050c5 4943
bbf33d1d
EP
4944 rc = hwrm_req_init(bp, req, HWRM_CFA_NTUPLE_FILTER_ALLOC);
4945 if (rc)
4946 return rc;
4947
4948 req->l2_filter_id = bp->vnic_info[0].fw_l2_filter_id[fltr->l2_fltr_idx];
c0c050c5 4949
41136ab3
MC
4950 if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2) {
4951 flags = CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DEST_RFS_RING_IDX;
bbf33d1d 4952 req->dst_id = cpu_to_le16(fltr->rxq);
ac33906c
MC
4953 } else {
4954 vnic = &bp->vnic_info[fltr->rxq + 1];
bbf33d1d 4955 req->dst_id = cpu_to_le16(vnic->fw_vnic_id);
ac33906c 4956 }
bbf33d1d
EP
4957 req->flags = cpu_to_le32(flags);
4958 req->enables = cpu_to_le32(BNXT_NTP_FLTR_FLAGS);
c0c050c5 4959
bbf33d1d
EP
4960 req->ethertype = htons(ETH_P_IP);
4961 memcpy(req->src_macaddr, fltr->src_mac_addr, ETH_ALEN);
4962 req->ip_addr_type = CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4;
4963 req->ip_protocol = keys->basic.ip_proto;
c0c050c5 4964
dda0e746
MC
4965 if (keys->basic.n_proto == htons(ETH_P_IPV6)) {
4966 int i;
4967
bbf33d1d
EP
4968 req->ethertype = htons(ETH_P_IPV6);
4969 req->ip_addr_type =
dda0e746 4970 CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6;
bbf33d1d 4971 *(struct in6_addr *)&req->src_ipaddr[0] =
dda0e746 4972 keys->addrs.v6addrs.src;
bbf33d1d 4973 *(struct in6_addr *)&req->dst_ipaddr[0] =
dda0e746
MC
4974 keys->addrs.v6addrs.dst;
4975 for (i = 0; i < 4; i++) {
bbf33d1d
EP
4976 req->src_ipaddr_mask[i] = cpu_to_be32(0xffffffff);
4977 req->dst_ipaddr_mask[i] = cpu_to_be32(0xffffffff);
dda0e746
MC
4978 }
4979 } else {
bbf33d1d
EP
4980 req->src_ipaddr[0] = keys->addrs.v4addrs.src;
4981 req->src_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
4982 req->dst_ipaddr[0] = keys->addrs.v4addrs.dst;
4983 req->dst_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
dda0e746 4984 }
61aad724 4985 if (keys->control.flags & FLOW_DIS_ENCAPSULATION) {
bbf33d1d
EP
4986 req->enables |= cpu_to_le32(BNXT_NTP_TUNNEL_FLTR_FLAG);
4987 req->tunnel_type =
61aad724
MC
4988 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL;
4989 }
c0c050c5 4990
bbf33d1d
EP
4991 req->src_port = keys->ports.src;
4992 req->src_port_mask = cpu_to_be16(0xffff);
4993 req->dst_port = keys->ports.dst;
4994 req->dst_port_mask = cpu_to_be16(0xffff);
c0c050c5 4995
bbf33d1d
EP
4996 resp = hwrm_req_hold(bp, req);
4997 rc = hwrm_req_send(bp, req);
4998 if (!rc)
c0c050c5 4999 fltr->filter_id = resp->ntuple_filter_id;
bbf33d1d 5000 hwrm_req_drop(bp, req);
c0c050c5
MC
5001 return rc;
5002}
5003#endif
5004
5005static int bnxt_hwrm_set_vnic_filter(struct bnxt *bp, u16 vnic_id, u16 idx,
76660757 5006 const u8 *mac_addr)
c0c050c5 5007{
bbf33d1d
EP
5008 struct hwrm_cfa_l2_filter_alloc_output *resp;
5009 struct hwrm_cfa_l2_filter_alloc_input *req;
5010 int rc;
c0c050c5 5011
bbf33d1d
EP
5012 rc = hwrm_req_init(bp, req, HWRM_CFA_L2_FILTER_ALLOC);
5013 if (rc)
5014 return rc;
5015
5016 req->flags = cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX);
dc52c6c7 5017 if (!BNXT_CHIP_TYPE_NITRO_A0(bp))
bbf33d1d 5018 req->flags |=
dc52c6c7 5019 cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST);
bbf33d1d
EP
5020 req->dst_id = cpu_to_le16(bp->vnic_info[vnic_id].fw_vnic_id);
5021 req->enables =
c0c050c5 5022 cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR |
c193554e 5023 CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID |
c0c050c5 5024 CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK);
bbf33d1d
EP
5025 memcpy(req->l2_addr, mac_addr, ETH_ALEN);
5026 req->l2_addr_mask[0] = 0xff;
5027 req->l2_addr_mask[1] = 0xff;
5028 req->l2_addr_mask[2] = 0xff;
5029 req->l2_addr_mask[3] = 0xff;
5030 req->l2_addr_mask[4] = 0xff;
5031 req->l2_addr_mask[5] = 0xff;
5032
5033 resp = hwrm_req_hold(bp, req);
5034 rc = hwrm_req_send(bp, req);
c0c050c5
MC
5035 if (!rc)
5036 bp->vnic_info[vnic_id].fw_l2_filter_id[idx] =
5037 resp->l2_filter_id;
bbf33d1d 5038 hwrm_req_drop(bp, req);
c0c050c5
MC
5039 return rc;
5040}
5041
5042static int bnxt_hwrm_clear_vnic_filter(struct bnxt *bp)
5043{
bbf33d1d 5044 struct hwrm_cfa_l2_filter_free_input *req;
c0c050c5 5045 u16 i, j, num_of_vnics = 1; /* only vnic 0 supported */
bbf33d1d 5046 int rc;
c0c050c5
MC
5047
5048 /* Any associated ntuple filters will also be cleared by firmware. */
bbf33d1d
EP
5049 rc = hwrm_req_init(bp, req, HWRM_CFA_L2_FILTER_FREE);
5050 if (rc)
5051 return rc;
5052 hwrm_req_hold(bp, req);
c0c050c5
MC
5053 for (i = 0; i < num_of_vnics; i++) {
5054 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
5055
5056 for (j = 0; j < vnic->uc_filter_count; j++) {
bbf33d1d 5057 req->l2_filter_id = vnic->fw_l2_filter_id[j];
c0c050c5 5058
bbf33d1d 5059 rc = hwrm_req_send(bp, req);
c0c050c5
MC
5060 }
5061 vnic->uc_filter_count = 0;
5062 }
bbf33d1d 5063 hwrm_req_drop(bp, req);
c0c050c5
MC
5064 return rc;
5065}
5066
5067static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags)
5068{
5069 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
79632e9b 5070 u16 max_aggs = VNIC_TPA_CFG_REQ_MAX_AGGS_MAX;
bbf33d1d
EP
5071 struct hwrm_vnic_tpa_cfg_input *req;
5072 int rc;
c0c050c5 5073
3c4fe80b
MC
5074 if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
5075 return 0;
5076
bbf33d1d
EP
5077 rc = hwrm_req_init(bp, req, HWRM_VNIC_TPA_CFG);
5078 if (rc)
5079 return rc;
c0c050c5
MC
5080
5081 if (tpa_flags) {
5082 u16 mss = bp->dev->mtu - 40;
5083 u32 nsegs, n, segs = 0, flags;
5084
5085 flags = VNIC_TPA_CFG_REQ_FLAGS_TPA |
5086 VNIC_TPA_CFG_REQ_FLAGS_ENCAP_TPA |
5087 VNIC_TPA_CFG_REQ_FLAGS_RSC_WND_UPDATE |
5088 VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_ECN |
5089 VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_SAME_GRE_SEQ;
5090 if (tpa_flags & BNXT_FLAG_GRO)
5091 flags |= VNIC_TPA_CFG_REQ_FLAGS_GRO;
5092
bbf33d1d 5093 req->flags = cpu_to_le32(flags);
c0c050c5 5094
bbf33d1d 5095 req->enables =
c0c050c5 5096 cpu_to_le32(VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_SEGS |
c193554e
MC
5097 VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS |
5098 VNIC_TPA_CFG_REQ_ENABLES_MIN_AGG_LEN);
c0c050c5
MC
5099
5100 /* Number of segs are log2 units, and first packet is not
5101 * included as part of this units.
5102 */
2839f28b
MC
5103 if (mss <= BNXT_RX_PAGE_SIZE) {
5104 n = BNXT_RX_PAGE_SIZE / mss;
c0c050c5
MC
5105 nsegs = (MAX_SKB_FRAGS - 1) * n;
5106 } else {
2839f28b
MC
5107 n = mss / BNXT_RX_PAGE_SIZE;
5108 if (mss & (BNXT_RX_PAGE_SIZE - 1))
c0c050c5
MC
5109 n++;
5110 nsegs = (MAX_SKB_FRAGS - n) / n;
5111 }
5112
79632e9b
MC
5113 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5114 segs = MAX_TPA_SEGS_P5;
5115 max_aggs = bp->max_tpa;
5116 } else {
5117 segs = ilog2(nsegs);
5118 }
bbf33d1d
EP
5119 req->max_agg_segs = cpu_to_le16(segs);
5120 req->max_aggs = cpu_to_le16(max_aggs);
c193554e 5121
bbf33d1d 5122 req->min_agg_len = cpu_to_le32(512);
c0c050c5 5123 }
bbf33d1d 5124 req->vnic_id = cpu_to_le16(vnic->fw_vnic_id);
c0c050c5 5125
bbf33d1d 5126 return hwrm_req_send(bp, req);
c0c050c5
MC
5127}
5128
2c61d211
MC
5129static u16 bnxt_cp_ring_from_grp(struct bnxt *bp, struct bnxt_ring_struct *ring)
5130{
5131 struct bnxt_ring_grp_info *grp_info;
5132
5133 grp_info = &bp->grp_info[ring->grp_idx];
5134 return grp_info->cp_fw_ring_id;
5135}
5136
5137static u16 bnxt_cp_ring_for_rx(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
5138{
5139 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5140 struct bnxt_napi *bnapi = rxr->bnapi;
5141 struct bnxt_cp_ring_info *cpr;
5142
5143 cpr = bnapi->cp_ring.cp_ring_arr[BNXT_RX_HDL];
5144 return cpr->cp_ring_struct.fw_ring_id;
5145 } else {
5146 return bnxt_cp_ring_from_grp(bp, &rxr->rx_ring_struct);
5147 }
5148}
5149
5150static u16 bnxt_cp_ring_for_tx(struct bnxt *bp, struct bnxt_tx_ring_info *txr)
5151{
5152 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5153 struct bnxt_napi *bnapi = txr->bnapi;
5154 struct bnxt_cp_ring_info *cpr;
5155
5156 cpr = bnapi->cp_ring.cp_ring_arr[BNXT_TX_HDL];
5157 return cpr->cp_ring_struct.fw_ring_id;
5158 } else {
5159 return bnxt_cp_ring_from_grp(bp, &txr->tx_ring_struct);
5160 }
5161}
5162
1667cbf6
MC
5163static int bnxt_alloc_rss_indir_tbl(struct bnxt *bp)
5164{
5165 int entries;
5166
5167 if (bp->flags & BNXT_FLAG_CHIP_P5)
5168 entries = BNXT_MAX_RSS_TABLE_ENTRIES_P5;
5169 else
5170 entries = HW_HASH_INDEX_SIZE;
5171
5172 bp->rss_indir_tbl_entries = entries;
5173 bp->rss_indir_tbl = kmalloc_array(entries, sizeof(*bp->rss_indir_tbl),
5174 GFP_KERNEL);
5175 if (!bp->rss_indir_tbl)
5176 return -ENOMEM;
5177 return 0;
5178}
5179
5180static void bnxt_set_dflt_rss_indir_tbl(struct bnxt *bp)
5181{
5182 u16 max_rings, max_entries, pad, i;
5183
5184 if (!bp->rx_nr_rings)
5185 return;
5186
5187 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
5188 max_rings = bp->rx_nr_rings - 1;
5189 else
5190 max_rings = bp->rx_nr_rings;
5191
5192 max_entries = bnxt_get_rxfh_indir_size(bp->dev);
5193
5194 for (i = 0; i < max_entries; i++)
5195 bp->rss_indir_tbl[i] = ethtool_rxfh_indir_default(i, max_rings);
5196
5197 pad = bp->rss_indir_tbl_entries - max_entries;
5198 if (pad)
5199 memset(&bp->rss_indir_tbl[i], 0, pad * sizeof(u16));
5200}
5201
bd3191b5
MC
5202static u16 bnxt_get_max_rss_ring(struct bnxt *bp)
5203{
5204 u16 i, tbl_size, max_ring = 0;
5205
5206 if (!bp->rss_indir_tbl)
5207 return 0;
5208
5209 tbl_size = bnxt_get_rxfh_indir_size(bp->dev);
5210 for (i = 0; i < tbl_size; i++)
5211 max_ring = max(max_ring, bp->rss_indir_tbl[i]);
5212 return max_ring;
5213}
5214
f9f6a3fb
MC
5215int bnxt_get_nr_rss_ctxs(struct bnxt *bp, int rx_rings)
5216{
5217 if (bp->flags & BNXT_FLAG_CHIP_P5)
5218 return DIV_ROUND_UP(rx_rings, BNXT_RSS_TABLE_ENTRIES_P5);
5219 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
5220 return 2;
5221 return 1;
5222}
5223
41d2dd42 5224static void bnxt_fill_hw_rss_tbl(struct bnxt *bp, struct bnxt_vnic_info *vnic)
f33a305d
MC
5225{
5226 bool no_rss = !(vnic->flags & BNXT_VNIC_RSS_FLAG);
5227 u16 i, j;
5228
5229 /* Fill the RSS indirection table with ring group ids */
5230 for (i = 0, j = 0; i < HW_HASH_INDEX_SIZE; i++) {
5231 if (!no_rss)
5232 j = bp->rss_indir_tbl[i];
5233 vnic->rss_table[i] = cpu_to_le16(vnic->fw_grp_ids[j]);
5234 }
5235}
5236
41d2dd42
EP
5237static void bnxt_fill_hw_rss_tbl_p5(struct bnxt *bp,
5238 struct bnxt_vnic_info *vnic)
f33a305d
MC
5239{
5240 __le16 *ring_tbl = vnic->rss_table;
5241 struct bnxt_rx_ring_info *rxr;
5242 u16 tbl_size, i;
5243
5244 tbl_size = bnxt_get_rxfh_indir_size(bp->dev);
5245
5246 for (i = 0; i < tbl_size; i++) {
5247 u16 ring_id, j;
5248
5249 j = bp->rss_indir_tbl[i];
5250 rxr = &bp->rx_ring[j];
5251
5252 ring_id = rxr->rx_ring_struct.fw_ring_id;
5253 *ring_tbl++ = cpu_to_le16(ring_id);
5254 ring_id = bnxt_cp_ring_for_rx(bp, rxr);
5255 *ring_tbl++ = cpu_to_le16(ring_id);
5256 }
5257}
5258
41d2dd42
EP
5259static void
5260__bnxt_hwrm_vnic_set_rss(struct bnxt *bp, struct hwrm_vnic_rss_cfg_input *req,
5261 struct bnxt_vnic_info *vnic)
f33a305d
MC
5262{
5263 if (bp->flags & BNXT_FLAG_CHIP_P5)
41d2dd42 5264 bnxt_fill_hw_rss_tbl_p5(bp, vnic);
f33a305d 5265 else
41d2dd42
EP
5266 bnxt_fill_hw_rss_tbl(bp, vnic);
5267
98a4322b
EP
5268 if (bp->rss_hash_delta) {
5269 req->hash_type = cpu_to_le32(bp->rss_hash_delta);
5270 if (bp->rss_hash_cfg & bp->rss_hash_delta)
5271 req->flags |= VNIC_RSS_CFG_REQ_FLAGS_HASH_TYPE_INCLUDE;
5272 else
5273 req->flags |= VNIC_RSS_CFG_REQ_FLAGS_HASH_TYPE_EXCLUDE;
5274 } else {
5275 req->hash_type = cpu_to_le32(bp->rss_hash_cfg);
5276 }
41d2dd42
EP
5277 req->hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT;
5278 req->ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr);
5279 req->hash_key_tbl_addr = cpu_to_le64(vnic->rss_hash_key_dma_addr);
f33a305d
MC
5280}
5281
c0c050c5
MC
5282static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss)
5283{
c0c050c5 5284 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
bbf33d1d
EP
5285 struct hwrm_vnic_rss_cfg_input *req;
5286 int rc;
c0c050c5 5287
7b3af4f7
MC
5288 if ((bp->flags & BNXT_FLAG_CHIP_P5) ||
5289 vnic->fw_rss_cos_lb_ctx[0] == INVALID_HW_RING_ID)
c0c050c5
MC
5290 return 0;
5291
bbf33d1d
EP
5292 rc = hwrm_req_init(bp, req, HWRM_VNIC_RSS_CFG);
5293 if (rc)
5294 return rc;
5295
41d2dd42
EP
5296 if (set_rss)
5297 __bnxt_hwrm_vnic_set_rss(bp, req, vnic);
bbf33d1d
EP
5298 req->rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
5299 return hwrm_req_send(bp, req);
c0c050c5
MC
5300}
5301
7b3af4f7
MC
5302static int bnxt_hwrm_vnic_set_rss_p5(struct bnxt *bp, u16 vnic_id, bool set_rss)
5303{
5304 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
bbf33d1d 5305 struct hwrm_vnic_rss_cfg_input *req;
f33a305d
MC
5306 dma_addr_t ring_tbl_map;
5307 u32 i, nr_ctxs;
bbf33d1d
EP
5308 int rc;
5309
5310 rc = hwrm_req_init(bp, req, HWRM_VNIC_RSS_CFG);
5311 if (rc)
5312 return rc;
5313
5314 req->vnic_id = cpu_to_le16(vnic->fw_vnic_id);
5315 if (!set_rss)
5316 return hwrm_req_send(bp, req);
7b3af4f7 5317
41d2dd42 5318 __bnxt_hwrm_vnic_set_rss(bp, req, vnic);
f33a305d 5319 ring_tbl_map = vnic->rss_table_dma_addr;
f9f6a3fb 5320 nr_ctxs = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings);
7b3af4f7 5321
bbf33d1d
EP
5322 hwrm_req_hold(bp, req);
5323 for (i = 0; i < nr_ctxs; ring_tbl_map += BNXT_RSS_TABLE_SIZE_P5, i++) {
5324 req->ring_grp_tbl_addr = cpu_to_le64(ring_tbl_map);
5325 req->ring_table_pair_index = i;
5326 req->rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[i]);
5327 rc = hwrm_req_send(bp, req);
7b3af4f7 5328 if (rc)
bbf33d1d 5329 goto exit;
7b3af4f7 5330 }
bbf33d1d
EP
5331
5332exit:
5333 hwrm_req_drop(bp, req);
5334 return rc;
7b3af4f7
MC
5335}
5336
98a4322b
EP
5337static void bnxt_hwrm_update_rss_hash_cfg(struct bnxt *bp)
5338{
5339 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
5340 struct hwrm_vnic_rss_qcfg_output *resp;
5341 struct hwrm_vnic_rss_qcfg_input *req;
5342
5343 if (hwrm_req_init(bp, req, HWRM_VNIC_RSS_QCFG))
5344 return;
5345
095d5dc0 5346 req->vnic_id = cpu_to_le16(vnic->fw_vnic_id);
98a4322b
EP
5347 /* all contexts configured to same hash_type, zero always exists */
5348 req->rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
5349 resp = hwrm_req_hold(bp, req);
5350 if (!hwrm_req_send(bp, req)) {
5351 bp->rss_hash_cfg = le32_to_cpu(resp->hash_type) ?: bp->rss_hash_cfg;
5352 bp->rss_hash_delta = 0;
5353 }
5354 hwrm_req_drop(bp, req);
5355}
5356
c0c050c5
MC
5357static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, u16 vnic_id)
5358{
5359 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
bbf33d1d
EP
5360 struct hwrm_vnic_plcmodes_cfg_input *req;
5361 int rc;
5362
5363 rc = hwrm_req_init(bp, req, HWRM_VNIC_PLCMODES_CFG);
5364 if (rc)
5365 return rc;
c0c050c5 5366
32861236
AG
5367 req->flags = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT);
5368 req->enables = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID);
5369
a056ebcc
MC
5370 if (BNXT_RX_PAGE_MODE(bp)) {
5371 req->jumbo_thresh = cpu_to_le16(bp->rx_buf_use_size);
5372 } else {
32861236
AG
5373 req->flags |= cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 |
5374 VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6);
5375 req->enables |=
5376 cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID);
a056ebcc
MC
5377 req->jumbo_thresh = cpu_to_le16(bp->rx_copy_thresh);
5378 req->hds_threshold = cpu_to_le16(bp->rx_copy_thresh);
32861236 5379 }
bbf33d1d
EP
5380 req->vnic_id = cpu_to_le32(vnic->fw_vnic_id);
5381 return hwrm_req_send(bp, req);
c0c050c5
MC
5382}
5383
94ce9caa
PS
5384static void bnxt_hwrm_vnic_ctx_free_one(struct bnxt *bp, u16 vnic_id,
5385 u16 ctx_idx)
c0c050c5 5386{
bbf33d1d 5387 struct hwrm_vnic_rss_cos_lb_ctx_free_input *req;
c0c050c5 5388
bbf33d1d
EP
5389 if (hwrm_req_init(bp, req, HWRM_VNIC_RSS_COS_LB_CTX_FREE))
5390 return;
5391
5392 req->rss_cos_lb_ctx_id =
94ce9caa 5393 cpu_to_le16(bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx]);
c0c050c5 5394
bbf33d1d 5395 hwrm_req_send(bp, req);
94ce9caa 5396 bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] = INVALID_HW_RING_ID;
c0c050c5
MC
5397}
5398
5399static void bnxt_hwrm_vnic_ctx_free(struct bnxt *bp)
5400{
94ce9caa 5401 int i, j;
c0c050c5
MC
5402
5403 for (i = 0; i < bp->nr_vnics; i++) {
5404 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
5405
94ce9caa
PS
5406 for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++) {
5407 if (vnic->fw_rss_cos_lb_ctx[j] != INVALID_HW_RING_ID)
5408 bnxt_hwrm_vnic_ctx_free_one(bp, i, j);
5409 }
c0c050c5
MC
5410 }
5411 bp->rsscos_nr_ctxs = 0;
5412}
5413
94ce9caa 5414static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, u16 vnic_id, u16 ctx_idx)
c0c050c5 5415{
bbf33d1d
EP
5416 struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp;
5417 struct hwrm_vnic_rss_cos_lb_ctx_alloc_input *req;
c0c050c5 5418 int rc;
c0c050c5 5419
bbf33d1d
EP
5420 rc = hwrm_req_init(bp, req, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC);
5421 if (rc)
5422 return rc;
c0c050c5 5423
bbf33d1d
EP
5424 resp = hwrm_req_hold(bp, req);
5425 rc = hwrm_req_send(bp, req);
c0c050c5 5426 if (!rc)
94ce9caa 5427 bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] =
c0c050c5 5428 le16_to_cpu(resp->rss_cos_lb_ctx_id);
bbf33d1d 5429 hwrm_req_drop(bp, req);
c0c050c5
MC
5430
5431 return rc;
5432}
5433
abe93ad2
MC
5434static u32 bnxt_get_roce_vnic_mode(struct bnxt *bp)
5435{
5436 if (bp->flags & BNXT_FLAG_ROCE_MIRROR_CAP)
5437 return VNIC_CFG_REQ_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_MODE;
5438 return VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE;
5439}
5440
a588e458 5441int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id)
c0c050c5 5442{
c0c050c5 5443 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
bbf33d1d
EP
5444 struct hwrm_vnic_cfg_input *req;
5445 unsigned int ring = 0, grp_idx;
cf6645f8 5446 u16 def_vlan = 0;
bbf33d1d 5447 int rc;
c0c050c5 5448
bbf33d1d
EP
5449 rc = hwrm_req_init(bp, req, HWRM_VNIC_CFG);
5450 if (rc)
5451 return rc;
dc52c6c7 5452
7b3af4f7
MC
5453 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5454 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[0];
5455
bbf33d1d 5456 req->default_rx_ring_id =
7b3af4f7 5457 cpu_to_le16(rxr->rx_ring_struct.fw_ring_id);
bbf33d1d 5458 req->default_cmpl_ring_id =
7b3af4f7 5459 cpu_to_le16(bnxt_cp_ring_for_rx(bp, rxr));
bbf33d1d 5460 req->enables =
7b3af4f7
MC
5461 cpu_to_le32(VNIC_CFG_REQ_ENABLES_DEFAULT_RX_RING_ID |
5462 VNIC_CFG_REQ_ENABLES_DEFAULT_CMPL_RING_ID);
5463 goto vnic_mru;
5464 }
bbf33d1d 5465 req->enables = cpu_to_le32(VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP);
c0c050c5 5466 /* Only RSS support for now TBD: COS & LB */
dc52c6c7 5467 if (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID) {
bbf33d1d
EP
5468 req->rss_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
5469 req->enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
dc52c6c7 5470 VNIC_CFG_REQ_ENABLES_MRU);
ae10ae74 5471 } else if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG) {
bbf33d1d 5472 req->rss_rule =
ae10ae74 5473 cpu_to_le16(bp->vnic_info[0].fw_rss_cos_lb_ctx[0]);
bbf33d1d 5474 req->enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
ae10ae74 5475 VNIC_CFG_REQ_ENABLES_MRU);
bbf33d1d 5476 req->flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE);
dc52c6c7 5477 } else {
bbf33d1d 5478 req->rss_rule = cpu_to_le16(0xffff);
dc52c6c7 5479 }
94ce9caa 5480
dc52c6c7
PS
5481 if (BNXT_CHIP_TYPE_NITRO_A0(bp) &&
5482 (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID)) {
bbf33d1d
EP
5483 req->cos_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[1]);
5484 req->enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_COS_RULE);
94ce9caa 5485 } else {
bbf33d1d 5486 req->cos_rule = cpu_to_le16(0xffff);
94ce9caa
PS
5487 }
5488
c0c050c5 5489 if (vnic->flags & BNXT_VNIC_RSS_FLAG)
b81a90d3 5490 ring = 0;
c0c050c5 5491 else if (vnic->flags & BNXT_VNIC_RFS_FLAG)
b81a90d3 5492 ring = vnic_id - 1;
76595193
PS
5493 else if ((vnic_id == 1) && BNXT_CHIP_TYPE_NITRO_A0(bp))
5494 ring = bp->rx_nr_rings - 1;
c0c050c5 5495
b81a90d3 5496 grp_idx = bp->rx_ring[ring].bnapi->index;
bbf33d1d
EP
5497 req->dflt_ring_grp = cpu_to_le16(bp->grp_info[grp_idx].fw_grp_id);
5498 req->lb_rule = cpu_to_le16(0xffff);
7b3af4f7 5499vnic_mru:
bbf33d1d 5500 req->mru = cpu_to_le16(bp->dev->mtu + ETH_HLEN + VLAN_HLEN);
c0c050c5 5501
bbf33d1d 5502 req->vnic_id = cpu_to_le16(vnic->fw_vnic_id);
cf6645f8
MC
5503#ifdef CONFIG_BNXT_SRIOV
5504 if (BNXT_VF(bp))
5505 def_vlan = bp->vf.vlan;
5506#endif
5507 if ((bp->flags & BNXT_FLAG_STRIP_VLAN) || def_vlan)
bbf33d1d 5508 req->flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE);
dafcdf5e 5509 if (!vnic_id && bnxt_ulp_registered(bp->edev))
bbf33d1d 5510 req->flags |= cpu_to_le32(bnxt_get_roce_vnic_mode(bp));
c0c050c5 5511
bbf33d1d 5512 return hwrm_req_send(bp, req);
c0c050c5
MC
5513}
5514
3d061591 5515static void bnxt_hwrm_vnic_free_one(struct bnxt *bp, u16 vnic_id)
c0c050c5 5516{
c0c050c5 5517 if (bp->vnic_info[vnic_id].fw_vnic_id != INVALID_HW_RING_ID) {
bbf33d1d 5518 struct hwrm_vnic_free_input *req;
c0c050c5 5519
bbf33d1d
EP
5520 if (hwrm_req_init(bp, req, HWRM_VNIC_FREE))
5521 return;
5522
5523 req->vnic_id =
c0c050c5
MC
5524 cpu_to_le32(bp->vnic_info[vnic_id].fw_vnic_id);
5525
bbf33d1d 5526 hwrm_req_send(bp, req);
c0c050c5
MC
5527 bp->vnic_info[vnic_id].fw_vnic_id = INVALID_HW_RING_ID;
5528 }
c0c050c5
MC
5529}
5530
5531static void bnxt_hwrm_vnic_free(struct bnxt *bp)
5532{
5533 u16 i;
5534
5535 for (i = 0; i < bp->nr_vnics; i++)
5536 bnxt_hwrm_vnic_free_one(bp, i);
5537}
5538
b81a90d3
MC
5539static int bnxt_hwrm_vnic_alloc(struct bnxt *bp, u16 vnic_id,
5540 unsigned int start_rx_ring_idx,
5541 unsigned int nr_rings)
c0c050c5 5542{
b81a90d3 5543 unsigned int i, j, grp_idx, end_idx = start_rx_ring_idx + nr_rings;
44c6f72a 5544 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
bbf33d1d
EP
5545 struct hwrm_vnic_alloc_output *resp;
5546 struct hwrm_vnic_alloc_input *req;
5547 int rc;
5548
5549 rc = hwrm_req_init(bp, req, HWRM_VNIC_ALLOC);
5550 if (rc)
5551 return rc;
44c6f72a
MC
5552
5553 if (bp->flags & BNXT_FLAG_CHIP_P5)
5554 goto vnic_no_ring_grps;
c0c050c5
MC
5555
5556 /* map ring groups to this vnic */
b81a90d3
MC
5557 for (i = start_rx_ring_idx, j = 0; i < end_idx; i++, j++) {
5558 grp_idx = bp->rx_ring[i].bnapi->index;
5559 if (bp->grp_info[grp_idx].fw_grp_id == INVALID_HW_RING_ID) {
c0c050c5 5560 netdev_err(bp->dev, "Not enough ring groups avail:%x req:%x\n",
b81a90d3 5561 j, nr_rings);
c0c050c5
MC
5562 break;
5563 }
44c6f72a 5564 vnic->fw_grp_ids[j] = bp->grp_info[grp_idx].fw_grp_id;
c0c050c5
MC
5565 }
5566
44c6f72a
MC
5567vnic_no_ring_grps:
5568 for (i = 0; i < BNXT_MAX_CTX_PER_VNIC; i++)
5569 vnic->fw_rss_cos_lb_ctx[i] = INVALID_HW_RING_ID;
c0c050c5 5570 if (vnic_id == 0)
bbf33d1d 5571 req->flags = cpu_to_le32(VNIC_ALLOC_REQ_FLAGS_DEFAULT);
c0c050c5 5572
bbf33d1d
EP
5573 resp = hwrm_req_hold(bp, req);
5574 rc = hwrm_req_send(bp, req);
c0c050c5 5575 if (!rc)
44c6f72a 5576 vnic->fw_vnic_id = le32_to_cpu(resp->vnic_id);
bbf33d1d 5577 hwrm_req_drop(bp, req);
c0c050c5
MC
5578 return rc;
5579}
5580
8fdefd63
MC
5581static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp)
5582{
bbf33d1d
EP
5583 struct hwrm_vnic_qcaps_output *resp;
5584 struct hwrm_vnic_qcaps_input *req;
8fdefd63
MC
5585 int rc;
5586
fbbdbc64 5587 bp->hw_ring_stats_size = sizeof(struct ctx_hw_stats);
ba642ab7 5588 bp->flags &= ~(BNXT_FLAG_NEW_RSS_CAP | BNXT_FLAG_ROCE_MIRROR_CAP);
8fdefd63
MC
5589 if (bp->hwrm_spec_code < 0x10600)
5590 return 0;
5591
bbf33d1d
EP
5592 rc = hwrm_req_init(bp, req, HWRM_VNIC_QCAPS);
5593 if (rc)
5594 return rc;
5595
5596 resp = hwrm_req_hold(bp, req);
5597 rc = hwrm_req_send(bp, req);
8fdefd63 5598 if (!rc) {
abe93ad2
MC
5599 u32 flags = le32_to_cpu(resp->flags);
5600
41e8d798
MC
5601 if (!(bp->flags & BNXT_FLAG_CHIP_P5) &&
5602 (flags & VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP))
8fdefd63 5603 bp->flags |= BNXT_FLAG_NEW_RSS_CAP;
abe93ad2
MC
5604 if (flags &
5605 VNIC_QCAPS_RESP_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_CAP)
5606 bp->flags |= BNXT_FLAG_ROCE_MIRROR_CAP;
1da63ddd
EP
5607
5608 /* Older P5 fw before EXT_HW_STATS support did not set
5609 * VLAN_STRIP_CAP properly.
5610 */
5611 if ((flags & VNIC_QCAPS_RESP_FLAGS_VLAN_STRIP_CAP) ||
9d6b648c 5612 (BNXT_CHIP_P5_THOR(bp) &&
1da63ddd
EP
5613 !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED)))
5614 bp->fw_cap |= BNXT_FW_CAP_VLAN_RX_STRIP;
98a4322b
EP
5615 if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_HASH_TYPE_DELTA_CAP)
5616 bp->fw_cap |= BNXT_FW_CAP_RSS_HASH_TYPE_DELTA;
79632e9b 5617 bp->max_tpa_v2 = le16_to_cpu(resp->max_aggs_supported);
9d6b648c
MC
5618 if (bp->max_tpa_v2) {
5619 if (BNXT_CHIP_P5_THOR(bp))
5620 bp->hw_ring_stats_size = BNXT_RING_STATS_SIZE_P5;
5621 else
5622 bp->hw_ring_stats_size = BNXT_RING_STATS_SIZE_P5_SR2;
5623 }
8fdefd63 5624 }
bbf33d1d 5625 hwrm_req_drop(bp, req);
8fdefd63
MC
5626 return rc;
5627}
5628
c0c050c5
MC
5629static int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp)
5630{
bbf33d1d
EP
5631 struct hwrm_ring_grp_alloc_output *resp;
5632 struct hwrm_ring_grp_alloc_input *req;
5633 int rc;
c0c050c5 5634 u16 i;
c0c050c5 5635
44c6f72a
MC
5636 if (bp->flags & BNXT_FLAG_CHIP_P5)
5637 return 0;
5638
bbf33d1d
EP
5639 rc = hwrm_req_init(bp, req, HWRM_RING_GRP_ALLOC);
5640 if (rc)
5641 return rc;
5642
5643 resp = hwrm_req_hold(bp, req);
c0c050c5 5644 for (i = 0; i < bp->rx_nr_rings; i++) {
b81a90d3 5645 unsigned int grp_idx = bp->rx_ring[i].bnapi->index;
c0c050c5 5646
bbf33d1d
EP
5647 req->cr = cpu_to_le16(bp->grp_info[grp_idx].cp_fw_ring_id);
5648 req->rr = cpu_to_le16(bp->grp_info[grp_idx].rx_fw_ring_id);
5649 req->ar = cpu_to_le16(bp->grp_info[grp_idx].agg_fw_ring_id);
5650 req->sc = cpu_to_le16(bp->grp_info[grp_idx].fw_stats_ctx);
c0c050c5 5651
bbf33d1d 5652 rc = hwrm_req_send(bp, req);
c0c050c5 5653
c0c050c5
MC
5654 if (rc)
5655 break;
5656
b81a90d3
MC
5657 bp->grp_info[grp_idx].fw_grp_id =
5658 le32_to_cpu(resp->ring_group_id);
c0c050c5 5659 }
bbf33d1d 5660 hwrm_req_drop(bp, req);
c0c050c5
MC
5661 return rc;
5662}
5663
3d061591 5664static void bnxt_hwrm_ring_grp_free(struct bnxt *bp)
c0c050c5 5665{
bbf33d1d 5666 struct hwrm_ring_grp_free_input *req;
c0c050c5 5667 u16 i;
c0c050c5 5668
44c6f72a 5669 if (!bp->grp_info || (bp->flags & BNXT_FLAG_CHIP_P5))
3d061591 5670 return;
c0c050c5 5671
bbf33d1d
EP
5672 if (hwrm_req_init(bp, req, HWRM_RING_GRP_FREE))
5673 return;
c0c050c5 5674
bbf33d1d 5675 hwrm_req_hold(bp, req);
c0c050c5
MC
5676 for (i = 0; i < bp->cp_nr_rings; i++) {
5677 if (bp->grp_info[i].fw_grp_id == INVALID_HW_RING_ID)
5678 continue;
bbf33d1d 5679 req->ring_group_id =
c0c050c5
MC
5680 cpu_to_le32(bp->grp_info[i].fw_grp_id);
5681
bbf33d1d 5682 hwrm_req_send(bp, req);
c0c050c5
MC
5683 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
5684 }
bbf33d1d 5685 hwrm_req_drop(bp, req);
c0c050c5
MC
5686}
5687
5688static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
5689 struct bnxt_ring_struct *ring,
9899bb59 5690 u32 ring_type, u32 map_index)
c0c050c5 5691{
bbf33d1d
EP
5692 struct hwrm_ring_alloc_output *resp;
5693 struct hwrm_ring_alloc_input *req;
6fe19886 5694 struct bnxt_ring_mem_info *rmem = &ring->ring_mem;
9899bb59 5695 struct bnxt_ring_grp_info *grp_info;
bbf33d1d 5696 int rc, err = 0;
c0c050c5
MC
5697 u16 ring_id;
5698
bbf33d1d
EP
5699 rc = hwrm_req_init(bp, req, HWRM_RING_ALLOC);
5700 if (rc)
5701 goto exit;
c0c050c5 5702
bbf33d1d 5703 req->enables = 0;
6fe19886 5704 if (rmem->nr_pages > 1) {
bbf33d1d 5705 req->page_tbl_addr = cpu_to_le64(rmem->pg_tbl_map);
c0c050c5 5706 /* Page size is in log2 units */
bbf33d1d
EP
5707 req->page_size = BNXT_PAGE_SHIFT;
5708 req->page_tbl_depth = 1;
c0c050c5 5709 } else {
bbf33d1d 5710 req->page_tbl_addr = cpu_to_le64(rmem->dma_arr[0]);
c0c050c5 5711 }
bbf33d1d 5712 req->fbo = 0;
c0c050c5 5713 /* Association of ring index with doorbell index and MSIX number */
bbf33d1d 5714 req->logical_id = cpu_to_le16(map_index);
c0c050c5
MC
5715
5716 switch (ring_type) {
2c61d211
MC
5717 case HWRM_RING_ALLOC_TX: {
5718 struct bnxt_tx_ring_info *txr;
5719
5720 txr = container_of(ring, struct bnxt_tx_ring_info,
5721 tx_ring_struct);
bbf33d1d 5722 req->ring_type = RING_ALLOC_REQ_RING_TYPE_TX;
c0c050c5 5723 /* Association of transmit ring with completion ring */
9899bb59 5724 grp_info = &bp->grp_info[ring->grp_idx];
bbf33d1d
EP
5725 req->cmpl_ring_id = cpu_to_le16(bnxt_cp_ring_for_tx(bp, txr));
5726 req->length = cpu_to_le32(bp->tx_ring_mask + 1);
5727 req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
5728 req->queue_id = cpu_to_le16(ring->queue_id);
c0c050c5 5729 break;
2c61d211 5730 }
c0c050c5 5731 case HWRM_RING_ALLOC_RX:
bbf33d1d
EP
5732 req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
5733 req->length = cpu_to_le32(bp->rx_ring_mask + 1);
23aefdd7
MC
5734 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5735 u16 flags = 0;
5736
5737 /* Association of rx ring with stats context */
5738 grp_info = &bp->grp_info[ring->grp_idx];
bbf33d1d
EP
5739 req->rx_buf_size = cpu_to_le16(bp->rx_buf_use_size);
5740 req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
5741 req->enables |= cpu_to_le32(
23aefdd7
MC
5742 RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID);
5743 if (NET_IP_ALIGN == 2)
5744 flags = RING_ALLOC_REQ_FLAGS_RX_SOP_PAD;
bbf33d1d 5745 req->flags = cpu_to_le16(flags);
23aefdd7 5746 }
c0c050c5
MC
5747 break;
5748 case HWRM_RING_ALLOC_AGG:
23aefdd7 5749 if (bp->flags & BNXT_FLAG_CHIP_P5) {
bbf33d1d 5750 req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX_AGG;
23aefdd7
MC
5751 /* Association of agg ring with rx ring */
5752 grp_info = &bp->grp_info[ring->grp_idx];
bbf33d1d
EP
5753 req->rx_ring_id = cpu_to_le16(grp_info->rx_fw_ring_id);
5754 req->rx_buf_size = cpu_to_le16(BNXT_RX_PAGE_SIZE);
5755 req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
5756 req->enables |= cpu_to_le32(
23aefdd7
MC
5757 RING_ALLOC_REQ_ENABLES_RX_RING_ID_VALID |
5758 RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID);
5759 } else {
bbf33d1d 5760 req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
23aefdd7 5761 }
bbf33d1d 5762 req->length = cpu_to_le32(bp->rx_agg_ring_mask + 1);
c0c050c5
MC
5763 break;
5764 case HWRM_RING_ALLOC_CMPL:
bbf33d1d
EP
5765 req->ring_type = RING_ALLOC_REQ_RING_TYPE_L2_CMPL;
5766 req->length = cpu_to_le32(bp->cp_ring_mask + 1);
23aefdd7
MC
5767 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5768 /* Association of cp ring with nq */
5769 grp_info = &bp->grp_info[map_index];
bbf33d1d
EP
5770 req->nq_ring_id = cpu_to_le16(grp_info->cp_fw_ring_id);
5771 req->cq_handle = cpu_to_le64(ring->handle);
5772 req->enables |= cpu_to_le32(
23aefdd7
MC
5773 RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID);
5774 } else if (bp->flags & BNXT_FLAG_USING_MSIX) {
bbf33d1d 5775 req->int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
23aefdd7
MC
5776 }
5777 break;
5778 case HWRM_RING_ALLOC_NQ:
bbf33d1d
EP
5779 req->ring_type = RING_ALLOC_REQ_RING_TYPE_NQ;
5780 req->length = cpu_to_le32(bp->cp_ring_mask + 1);
c0c050c5 5781 if (bp->flags & BNXT_FLAG_USING_MSIX)
bbf33d1d 5782 req->int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
c0c050c5
MC
5783 break;
5784 default:
5785 netdev_err(bp->dev, "hwrm alloc invalid ring type %d\n",
5786 ring_type);
5787 return -1;
5788 }
5789
bbf33d1d
EP
5790 resp = hwrm_req_hold(bp, req);
5791 rc = hwrm_req_send(bp, req);
c0c050c5
MC
5792 err = le16_to_cpu(resp->error_code);
5793 ring_id = le16_to_cpu(resp->ring_id);
bbf33d1d 5794 hwrm_req_drop(bp, req);
c0c050c5 5795
bbf33d1d 5796exit:
c0c050c5 5797 if (rc || err) {
2727c888
MC
5798 netdev_err(bp->dev, "hwrm_ring_alloc type %d failed. rc:%x err:%x\n",
5799 ring_type, rc, err);
5800 return -EIO;
c0c050c5
MC
5801 }
5802 ring->fw_ring_id = ring_id;
5803 return rc;
5804}
5805
486b5c22
MC
5806static int bnxt_hwrm_set_async_event_cr(struct bnxt *bp, int idx)
5807{
5808 int rc;
5809
5810 if (BNXT_PF(bp)) {
bbf33d1d 5811 struct hwrm_func_cfg_input *req;
486b5c22 5812
bbf33d1d
EP
5813 rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG);
5814 if (rc)
5815 return rc;
5816
5817 req->fid = cpu_to_le16(0xffff);
5818 req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
5819 req->async_event_cr = cpu_to_le16(idx);
5820 return hwrm_req_send(bp, req);
486b5c22 5821 } else {
bbf33d1d
EP
5822 struct hwrm_func_vf_cfg_input *req;
5823
5824 rc = hwrm_req_init(bp, req, HWRM_FUNC_VF_CFG);
5825 if (rc)
5826 return rc;
486b5c22 5827
bbf33d1d 5828 req->enables =
486b5c22 5829 cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
bbf33d1d
EP
5830 req->async_event_cr = cpu_to_le16(idx);
5831 return hwrm_req_send(bp, req);
486b5c22 5832 }
486b5c22
MC
5833}
5834
697197e5
MC
5835static void bnxt_set_db(struct bnxt *bp, struct bnxt_db_info *db, u32 ring_type,
5836 u32 map_idx, u32 xid)
5837{
5838 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5839 if (BNXT_PF(bp))
ebdf73dc 5840 db->doorbell = bp->bar1 + DB_PF_OFFSET_P5;
697197e5 5841 else
ebdf73dc 5842 db->doorbell = bp->bar1 + DB_VF_OFFSET_P5;
697197e5
MC
5843 switch (ring_type) {
5844 case HWRM_RING_ALLOC_TX:
5845 db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SQ;
5846 break;
5847 case HWRM_RING_ALLOC_RX:
5848 case HWRM_RING_ALLOC_AGG:
5849 db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SRQ;
5850 break;
5851 case HWRM_RING_ALLOC_CMPL:
5852 db->db_key64 = DBR_PATH_L2;
5853 break;
5854 case HWRM_RING_ALLOC_NQ:
5855 db->db_key64 = DBR_PATH_L2;
5856 break;
5857 }
5858 db->db_key64 |= (u64)xid << DBR_XID_SFT;
5859 } else {
5860 db->doorbell = bp->bar1 + map_idx * 0x80;
5861 switch (ring_type) {
5862 case HWRM_RING_ALLOC_TX:
5863 db->db_key32 = DB_KEY_TX;
5864 break;
5865 case HWRM_RING_ALLOC_RX:
5866 case HWRM_RING_ALLOC_AGG:
5867 db->db_key32 = DB_KEY_RX;
5868 break;
5869 case HWRM_RING_ALLOC_CMPL:
5870 db->db_key32 = DB_KEY_CP;
5871 break;
5872 }
5873 }
5874}
5875
c0c050c5
MC
5876static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
5877{
e8f267b0 5878 bool agg_rings = !!(bp->flags & BNXT_FLAG_AGG_RINGS);
c0c050c5 5879 int i, rc = 0;
697197e5 5880 u32 type;
c0c050c5 5881
23aefdd7
MC
5882 if (bp->flags & BNXT_FLAG_CHIP_P5)
5883 type = HWRM_RING_ALLOC_NQ;
5884 else
5885 type = HWRM_RING_ALLOC_CMPL;
edd0c2cc
MC
5886 for (i = 0; i < bp->cp_nr_rings; i++) {
5887 struct bnxt_napi *bnapi = bp->bnapi[i];
5888 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5889 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
9899bb59 5890 u32 map_idx = ring->map_idx;
5e66e35a 5891 unsigned int vector;
c0c050c5 5892
5e66e35a
MC
5893 vector = bp->irq_tbl[map_idx].vector;
5894 disable_irq_nosync(vector);
697197e5 5895 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
5e66e35a
MC
5896 if (rc) {
5897 enable_irq(vector);
edd0c2cc 5898 goto err_out;
5e66e35a 5899 }
697197e5
MC
5900 bnxt_set_db(bp, &cpr->cp_db, type, map_idx, ring->fw_ring_id);
5901 bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons);
5e66e35a 5902 enable_irq(vector);
edd0c2cc 5903 bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id;
486b5c22
MC
5904
5905 if (!i) {
5906 rc = bnxt_hwrm_set_async_event_cr(bp, ring->fw_ring_id);
5907 if (rc)
5908 netdev_warn(bp->dev, "Failed to set async event completion ring.\n");
5909 }
c0c050c5
MC
5910 }
5911
697197e5 5912 type = HWRM_RING_ALLOC_TX;
edd0c2cc 5913 for (i = 0; i < bp->tx_nr_rings; i++) {
b6ab4b01 5914 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
3e08b184
MC
5915 struct bnxt_ring_struct *ring;
5916 u32 map_idx;
c0c050c5 5917
3e08b184
MC
5918 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5919 struct bnxt_napi *bnapi = txr->bnapi;
5920 struct bnxt_cp_ring_info *cpr, *cpr2;
5921 u32 type2 = HWRM_RING_ALLOC_CMPL;
5922
5923 cpr = &bnapi->cp_ring;
5924 cpr2 = cpr->cp_ring_arr[BNXT_TX_HDL];
5925 ring = &cpr2->cp_ring_struct;
5926 ring->handle = BNXT_TX_HDL;
5927 map_idx = bnapi->index;
5928 rc = hwrm_ring_alloc_send_msg(bp, ring, type2, map_idx);
5929 if (rc)
5930 goto err_out;
5931 bnxt_set_db(bp, &cpr2->cp_db, type2, map_idx,
5932 ring->fw_ring_id);
5933 bnxt_db_cq(bp, &cpr2->cp_db, cpr2->cp_raw_cons);
5934 }
5935 ring = &txr->tx_ring_struct;
5936 map_idx = i;
697197e5 5937 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
edd0c2cc
MC
5938 if (rc)
5939 goto err_out;
697197e5 5940 bnxt_set_db(bp, &txr->tx_db, type, map_idx, ring->fw_ring_id);
c0c050c5
MC
5941 }
5942
697197e5 5943 type = HWRM_RING_ALLOC_RX;
edd0c2cc 5944 for (i = 0; i < bp->rx_nr_rings; i++) {
b6ab4b01 5945 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
edd0c2cc 5946 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
3e08b184
MC
5947 struct bnxt_napi *bnapi = rxr->bnapi;
5948 u32 map_idx = bnapi->index;
c0c050c5 5949
697197e5 5950 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
edd0c2cc
MC
5951 if (rc)
5952 goto err_out;
697197e5 5953 bnxt_set_db(bp, &rxr->rx_db, type, map_idx, ring->fw_ring_id);
e8f267b0
MC
5954 /* If we have agg rings, post agg buffers first. */
5955 if (!agg_rings)
5956 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
b81a90d3 5957 bp->grp_info[map_idx].rx_fw_ring_id = ring->fw_ring_id;
3e08b184
MC
5958 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5959 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5960 u32 type2 = HWRM_RING_ALLOC_CMPL;
5961 struct bnxt_cp_ring_info *cpr2;
5962
5963 cpr2 = cpr->cp_ring_arr[BNXT_RX_HDL];
5964 ring = &cpr2->cp_ring_struct;
5965 ring->handle = BNXT_RX_HDL;
5966 rc = hwrm_ring_alloc_send_msg(bp, ring, type2, map_idx);
5967 if (rc)
5968 goto err_out;
5969 bnxt_set_db(bp, &cpr2->cp_db, type2, map_idx,
5970 ring->fw_ring_id);
5971 bnxt_db_cq(bp, &cpr2->cp_db, cpr2->cp_raw_cons);
5972 }
c0c050c5
MC
5973 }
5974
e8f267b0 5975 if (agg_rings) {
697197e5 5976 type = HWRM_RING_ALLOC_AGG;
c0c050c5 5977 for (i = 0; i < bp->rx_nr_rings; i++) {
b6ab4b01 5978 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
c0c050c5
MC
5979 struct bnxt_ring_struct *ring =
5980 &rxr->rx_agg_ring_struct;
9899bb59 5981 u32 grp_idx = ring->grp_idx;
b81a90d3 5982 u32 map_idx = grp_idx + bp->rx_nr_rings;
c0c050c5 5983
697197e5 5984 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
c0c050c5
MC
5985 if (rc)
5986 goto err_out;
5987
697197e5
MC
5988 bnxt_set_db(bp, &rxr->rx_agg_db, type, map_idx,
5989 ring->fw_ring_id);
5990 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
e8f267b0 5991 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
b81a90d3 5992 bp->grp_info[grp_idx].agg_fw_ring_id = ring->fw_ring_id;
c0c050c5
MC
5993 }
5994 }
5995err_out:
5996 return rc;
5997}
5998
5999static int hwrm_ring_free_send_msg(struct bnxt *bp,
6000 struct bnxt_ring_struct *ring,
6001 u32 ring_type, int cmpl_ring_id)
6002{
bbf33d1d
EP
6003 struct hwrm_ring_free_output *resp;
6004 struct hwrm_ring_free_input *req;
6005 u16 error_code = 0;
c0c050c5 6006 int rc;
c0c050c5 6007
b340dc68 6008 if (BNXT_NO_FW_ACCESS(bp))
b4fff207
MC
6009 return 0;
6010
bbf33d1d
EP
6011 rc = hwrm_req_init(bp, req, HWRM_RING_FREE);
6012 if (rc)
6013 goto exit;
c0c050c5 6014
bbf33d1d
EP
6015 req->cmpl_ring = cpu_to_le16(cmpl_ring_id);
6016 req->ring_type = ring_type;
6017 req->ring_id = cpu_to_le16(ring->fw_ring_id);
c0c050c5 6018
bbf33d1d
EP
6019 resp = hwrm_req_hold(bp, req);
6020 rc = hwrm_req_send(bp, req);
6021 error_code = le16_to_cpu(resp->error_code);
6022 hwrm_req_drop(bp, req);
6023exit:
c0c050c5 6024 if (rc || error_code) {
2727c888
MC
6025 netdev_err(bp->dev, "hwrm_ring_free type %d failed. rc:%x err:%x\n",
6026 ring_type, rc, error_code);
6027 return -EIO;
c0c050c5
MC
6028 }
6029 return 0;
6030}
6031
edd0c2cc 6032static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
c0c050c5 6033{
23aefdd7 6034 u32 type;
edd0c2cc 6035 int i;
c0c050c5
MC
6036
6037 if (!bp->bnapi)
edd0c2cc 6038 return;
c0c050c5 6039
edd0c2cc 6040 for (i = 0; i < bp->tx_nr_rings; i++) {
b6ab4b01 6041 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
edd0c2cc 6042 struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
edd0c2cc
MC
6043
6044 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1f83391b
MC
6045 u32 cmpl_ring_id = bnxt_cp_ring_for_tx(bp, txr);
6046
edd0c2cc
MC
6047 hwrm_ring_free_send_msg(bp, ring,
6048 RING_FREE_REQ_RING_TYPE_TX,
6049 close_path ? cmpl_ring_id :
6050 INVALID_HW_RING_ID);
6051 ring->fw_ring_id = INVALID_HW_RING_ID;
c0c050c5
MC
6052 }
6053 }
6054
edd0c2cc 6055 for (i = 0; i < bp->rx_nr_rings; i++) {
b6ab4b01 6056 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
edd0c2cc 6057 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
b81a90d3 6058 u32 grp_idx = rxr->bnapi->index;
edd0c2cc
MC
6059
6060 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1f83391b
MC
6061 u32 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
6062
edd0c2cc
MC
6063 hwrm_ring_free_send_msg(bp, ring,
6064 RING_FREE_REQ_RING_TYPE_RX,
6065 close_path ? cmpl_ring_id :
6066 INVALID_HW_RING_ID);
6067 ring->fw_ring_id = INVALID_HW_RING_ID;
b81a90d3
MC
6068 bp->grp_info[grp_idx].rx_fw_ring_id =
6069 INVALID_HW_RING_ID;
c0c050c5
MC
6070 }
6071 }
6072
23aefdd7
MC
6073 if (bp->flags & BNXT_FLAG_CHIP_P5)
6074 type = RING_FREE_REQ_RING_TYPE_RX_AGG;
6075 else
6076 type = RING_FREE_REQ_RING_TYPE_RX;
edd0c2cc 6077 for (i = 0; i < bp->rx_nr_rings; i++) {
b6ab4b01 6078 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
edd0c2cc 6079 struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct;
b81a90d3 6080 u32 grp_idx = rxr->bnapi->index;
edd0c2cc
MC
6081
6082 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1f83391b
MC
6083 u32 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
6084
23aefdd7 6085 hwrm_ring_free_send_msg(bp, ring, type,
edd0c2cc
MC
6086 close_path ? cmpl_ring_id :
6087 INVALID_HW_RING_ID);
6088 ring->fw_ring_id = INVALID_HW_RING_ID;
b81a90d3
MC
6089 bp->grp_info[grp_idx].agg_fw_ring_id =
6090 INVALID_HW_RING_ID;
c0c050c5
MC
6091 }
6092 }
6093
9d8bc097
MC
6094 /* The completion rings are about to be freed. After that the
6095 * IRQ doorbell will not work anymore. So we need to disable
6096 * IRQ here.
6097 */
6098 bnxt_disable_int_sync(bp);
6099
23aefdd7
MC
6100 if (bp->flags & BNXT_FLAG_CHIP_P5)
6101 type = RING_FREE_REQ_RING_TYPE_NQ;
6102 else
6103 type = RING_FREE_REQ_RING_TYPE_L2_CMPL;
edd0c2cc
MC
6104 for (i = 0; i < bp->cp_nr_rings; i++) {
6105 struct bnxt_napi *bnapi = bp->bnapi[i];
6106 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3e08b184
MC
6107 struct bnxt_ring_struct *ring;
6108 int j;
edd0c2cc 6109
3e08b184
MC
6110 for (j = 0; j < 2; j++) {
6111 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
6112
6113 if (cpr2) {
6114 ring = &cpr2->cp_ring_struct;
6115 if (ring->fw_ring_id == INVALID_HW_RING_ID)
6116 continue;
6117 hwrm_ring_free_send_msg(bp, ring,
6118 RING_FREE_REQ_RING_TYPE_L2_CMPL,
6119 INVALID_HW_RING_ID);
6120 ring->fw_ring_id = INVALID_HW_RING_ID;
6121 }
6122 }
6123 ring = &cpr->cp_ring_struct;
edd0c2cc 6124 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
23aefdd7 6125 hwrm_ring_free_send_msg(bp, ring, type,
edd0c2cc
MC
6126 INVALID_HW_RING_ID);
6127 ring->fw_ring_id = INVALID_HW_RING_ID;
6128 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
c0c050c5
MC
6129 }
6130 }
c0c050c5
MC
6131}
6132
41e8d798
MC
6133static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
6134 bool shared);
6135
674f50a5
MC
6136static int bnxt_hwrm_get_rings(struct bnxt *bp)
6137{
674f50a5 6138 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
bbf33d1d
EP
6139 struct hwrm_func_qcfg_output *resp;
6140 struct hwrm_func_qcfg_input *req;
674f50a5
MC
6141 int rc;
6142
6143 if (bp->hwrm_spec_code < 0x10601)
6144 return 0;
6145
bbf33d1d
EP
6146 rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG);
6147 if (rc)
6148 return rc;
6149
6150 req->fid = cpu_to_le16(0xffff);
6151 resp = hwrm_req_hold(bp, req);
6152 rc = hwrm_req_send(bp, req);
674f50a5 6153 if (rc) {
bbf33d1d 6154 hwrm_req_drop(bp, req);
d4f1420d 6155 return rc;
674f50a5
MC
6156 }
6157
6158 hw_resc->resv_tx_rings = le16_to_cpu(resp->alloc_tx_rings);
f1ca94de 6159 if (BNXT_NEW_RM(bp)) {
674f50a5
MC
6160 u16 cp, stats;
6161
6162 hw_resc->resv_rx_rings = le16_to_cpu(resp->alloc_rx_rings);
6163 hw_resc->resv_hw_ring_grps =
6164 le32_to_cpu(resp->alloc_hw_ring_grps);
6165 hw_resc->resv_vnics = le16_to_cpu(resp->alloc_vnics);
6166 cp = le16_to_cpu(resp->alloc_cmpl_rings);
6167 stats = le16_to_cpu(resp->alloc_stat_ctx);
75720e63 6168 hw_resc->resv_irqs = cp;
41e8d798
MC
6169 if (bp->flags & BNXT_FLAG_CHIP_P5) {
6170 int rx = hw_resc->resv_rx_rings;
6171 int tx = hw_resc->resv_tx_rings;
6172
6173 if (bp->flags & BNXT_FLAG_AGG_RINGS)
6174 rx >>= 1;
6175 if (cp < (rx + tx)) {
6176 bnxt_trim_rings(bp, &rx, &tx, cp, false);
6177 if (bp->flags & BNXT_FLAG_AGG_RINGS)
6178 rx <<= 1;
6179 hw_resc->resv_rx_rings = rx;
6180 hw_resc->resv_tx_rings = tx;
6181 }
75720e63 6182 hw_resc->resv_irqs = le16_to_cpu(resp->alloc_msix);
41e8d798
MC
6183 hw_resc->resv_hw_ring_grps = rx;
6184 }
674f50a5 6185 hw_resc->resv_cp_rings = cp;
780baad4 6186 hw_resc->resv_stat_ctxs = stats;
674f50a5 6187 }
bbf33d1d 6188 hwrm_req_drop(bp, req);
674f50a5
MC
6189 return 0;
6190}
6191
391be5c2
MC
6192int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings)
6193{
bbf33d1d
EP
6194 struct hwrm_func_qcfg_output *resp;
6195 struct hwrm_func_qcfg_input *req;
391be5c2
MC
6196 int rc;
6197
6198 if (bp->hwrm_spec_code < 0x10601)
6199 return 0;
6200
bbf33d1d
EP
6201 rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG);
6202 if (rc)
6203 return rc;
6204
6205 req->fid = cpu_to_le16(fid);
6206 resp = hwrm_req_hold(bp, req);
6207 rc = hwrm_req_send(bp, req);
391be5c2
MC
6208 if (!rc)
6209 *tx_rings = le16_to_cpu(resp->alloc_tx_rings);
6210
bbf33d1d 6211 hwrm_req_drop(bp, req);
391be5c2
MC
6212 return rc;
6213}
6214
41e8d798
MC
6215static bool bnxt_rfs_supported(struct bnxt *bp);
6216
bbf33d1d
EP
6217static struct hwrm_func_cfg_input *
6218__bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
6219 int ring_grps, int cp_rings, int stats, int vnics)
391be5c2 6220{
bbf33d1d 6221 struct hwrm_func_cfg_input *req;
674f50a5 6222 u32 enables = 0;
391be5c2 6223
bbf33d1d
EP
6224 if (hwrm_req_init(bp, req, HWRM_FUNC_CFG))
6225 return NULL;
6226
4ed50ef4 6227 req->fid = cpu_to_le16(0xffff);
674f50a5 6228 enables |= tx_rings ? FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
4ed50ef4 6229 req->num_tx_rings = cpu_to_le16(tx_rings);
f1ca94de 6230 if (BNXT_NEW_RM(bp)) {
674f50a5 6231 enables |= rx_rings ? FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS : 0;
3f93cd3f 6232 enables |= stats ? FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
41e8d798
MC
6233 if (bp->flags & BNXT_FLAG_CHIP_P5) {
6234 enables |= cp_rings ? FUNC_CFG_REQ_ENABLES_NUM_MSIX : 0;
6235 enables |= tx_rings + ring_grps ?
3f93cd3f 6236 FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
41e8d798
MC
6237 enables |= rx_rings ?
6238 FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
6239 } else {
6240 enables |= cp_rings ?
3f93cd3f 6241 FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
41e8d798
MC
6242 enables |= ring_grps ?
6243 FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS |
6244 FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
6245 }
dbe80d44 6246 enables |= vnics ? FUNC_CFG_REQ_ENABLES_NUM_VNICS : 0;
674f50a5 6247
4ed50ef4 6248 req->num_rx_rings = cpu_to_le16(rx_rings);
41e8d798
MC
6249 if (bp->flags & BNXT_FLAG_CHIP_P5) {
6250 req->num_cmpl_rings = cpu_to_le16(tx_rings + ring_grps);
6251 req->num_msix = cpu_to_le16(cp_rings);
6252 req->num_rsscos_ctxs =
6253 cpu_to_le16(DIV_ROUND_UP(ring_grps, 64));
6254 } else {
6255 req->num_cmpl_rings = cpu_to_le16(cp_rings);
6256 req->num_hw_ring_grps = cpu_to_le16(ring_grps);
6257 req->num_rsscos_ctxs = cpu_to_le16(1);
6258 if (!(bp->flags & BNXT_FLAG_NEW_RSS_CAP) &&
6259 bnxt_rfs_supported(bp))
6260 req->num_rsscos_ctxs =
6261 cpu_to_le16(ring_grps + 1);
6262 }
780baad4 6263 req->num_stat_ctxs = cpu_to_le16(stats);
4ed50ef4 6264 req->num_vnics = cpu_to_le16(vnics);
674f50a5 6265 }
4ed50ef4 6266 req->enables = cpu_to_le32(enables);
bbf33d1d 6267 return req;
4ed50ef4
MC
6268}
6269
bbf33d1d
EP
6270static struct hwrm_func_vf_cfg_input *
6271__bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
6272 int ring_grps, int cp_rings, int stats, int vnics)
4ed50ef4 6273{
bbf33d1d 6274 struct hwrm_func_vf_cfg_input *req;
4ed50ef4
MC
6275 u32 enables = 0;
6276
bbf33d1d
EP
6277 if (hwrm_req_init(bp, req, HWRM_FUNC_VF_CFG))
6278 return NULL;
6279
4ed50ef4 6280 enables |= tx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
41e8d798
MC
6281 enables |= rx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS |
6282 FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
3f93cd3f 6283 enables |= stats ? FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
41e8d798
MC
6284 if (bp->flags & BNXT_FLAG_CHIP_P5) {
6285 enables |= tx_rings + ring_grps ?
3f93cd3f 6286 FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
41e8d798
MC
6287 } else {
6288 enables |= cp_rings ?
3f93cd3f 6289 FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
41e8d798
MC
6290 enables |= ring_grps ?
6291 FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0;
6292 }
4ed50ef4 6293 enables |= vnics ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0;
41e8d798 6294 enables |= FUNC_VF_CFG_REQ_ENABLES_NUM_L2_CTXS;
4ed50ef4 6295
41e8d798 6296 req->num_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX);
4ed50ef4
MC
6297 req->num_tx_rings = cpu_to_le16(tx_rings);
6298 req->num_rx_rings = cpu_to_le16(rx_rings);
41e8d798
MC
6299 if (bp->flags & BNXT_FLAG_CHIP_P5) {
6300 req->num_cmpl_rings = cpu_to_le16(tx_rings + ring_grps);
6301 req->num_rsscos_ctxs = cpu_to_le16(DIV_ROUND_UP(ring_grps, 64));
6302 } else {
6303 req->num_cmpl_rings = cpu_to_le16(cp_rings);
6304 req->num_hw_ring_grps = cpu_to_le16(ring_grps);
6305 req->num_rsscos_ctxs = cpu_to_le16(BNXT_VF_MAX_RSS_CTX);
6306 }
780baad4 6307 req->num_stat_ctxs = cpu_to_le16(stats);
4ed50ef4
MC
6308 req->num_vnics = cpu_to_le16(vnics);
6309
6310 req->enables = cpu_to_le32(enables);
bbf33d1d 6311 return req;
4ed50ef4
MC
6312}
6313
6314static int
6315bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
780baad4 6316 int ring_grps, int cp_rings, int stats, int vnics)
4ed50ef4 6317{
bbf33d1d 6318 struct hwrm_func_cfg_input *req;
4ed50ef4
MC
6319 int rc;
6320
bbf33d1d
EP
6321 req = __bnxt_hwrm_reserve_pf_rings(bp, tx_rings, rx_rings, ring_grps,
6322 cp_rings, stats, vnics);
6323 if (!req)
6324 return -ENOMEM;
6325
6326 if (!req->enables) {
6327 hwrm_req_drop(bp, req);
391be5c2 6328 return 0;
bbf33d1d 6329 }
391be5c2 6330
bbf33d1d 6331 rc = hwrm_req_send(bp, req);
674f50a5 6332 if (rc)
d4f1420d 6333 return rc;
674f50a5
MC
6334
6335 if (bp->hwrm_spec_code < 0x10601)
6336 bp->hw_resc.resv_tx_rings = tx_rings;
6337
9f90445c 6338 return bnxt_hwrm_get_rings(bp);
674f50a5
MC
6339}
6340
6341static int
6342bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
780baad4 6343 int ring_grps, int cp_rings, int stats, int vnics)
674f50a5 6344{
bbf33d1d 6345 struct hwrm_func_vf_cfg_input *req;
674f50a5
MC
6346 int rc;
6347
f1ca94de 6348 if (!BNXT_NEW_RM(bp)) {
674f50a5 6349 bp->hw_resc.resv_tx_rings = tx_rings;
391be5c2 6350 return 0;
674f50a5 6351 }
391be5c2 6352
bbf33d1d
EP
6353 req = __bnxt_hwrm_reserve_vf_rings(bp, tx_rings, rx_rings, ring_grps,
6354 cp_rings, stats, vnics);
6355 if (!req)
6356 return -ENOMEM;
6357
6358 rc = hwrm_req_send(bp, req);
674f50a5 6359 if (rc)
d4f1420d 6360 return rc;
674f50a5 6361
9f90445c 6362 return bnxt_hwrm_get_rings(bp);
674f50a5
MC
6363}
6364
6365static int bnxt_hwrm_reserve_rings(struct bnxt *bp, int tx, int rx, int grp,
780baad4 6366 int cp, int stat, int vnic)
674f50a5
MC
6367{
6368 if (BNXT_PF(bp))
780baad4
VV
6369 return bnxt_hwrm_reserve_pf_rings(bp, tx, rx, grp, cp, stat,
6370 vnic);
674f50a5 6371 else
780baad4
VV
6372 return bnxt_hwrm_reserve_vf_rings(bp, tx, rx, grp, cp, stat,
6373 vnic);
674f50a5
MC
6374}
6375
b16b6891 6376int bnxt_nq_rings_in_use(struct bnxt *bp)
08654eb2
MC
6377{
6378 int cp = bp->cp_nr_rings;
6379 int ulp_msix, ulp_base;
6380
6381 ulp_msix = bnxt_get_ulp_msix_num(bp);
6382 if (ulp_msix) {
6383 ulp_base = bnxt_get_ulp_msix_base(bp);
6384 cp += ulp_msix;
6385 if ((ulp_base + ulp_msix) > cp)
6386 cp = ulp_base + ulp_msix;
6387 }
6388 return cp;
6389}
6390
c0b8cda0
MC
6391static int bnxt_cp_rings_in_use(struct bnxt *bp)
6392{
6393 int cp;
6394
6395 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
6396 return bnxt_nq_rings_in_use(bp);
6397
6398 cp = bp->tx_nr_rings + bp->rx_nr_rings;
6399 return cp;
6400}
6401
780baad4
VV
6402static int bnxt_get_func_stat_ctxs(struct bnxt *bp)
6403{
d77b1ad8
MC
6404 int ulp_stat = bnxt_get_ulp_stat_ctxs(bp);
6405 int cp = bp->cp_nr_rings;
6406
6407 if (!ulp_stat)
6408 return cp;
6409
6410 if (bnxt_nq_rings_in_use(bp) > cp + bnxt_get_ulp_msix_num(bp))
6411 return bnxt_get_ulp_msix_base(bp) + ulp_stat;
6412
6413 return cp + ulp_stat;
780baad4
VV
6414}
6415
b43b9f53
MC
6416/* Check if a default RSS map needs to be setup. This function is only
6417 * used on older firmware that does not require reserving RX rings.
6418 */
6419static void bnxt_check_rss_tbl_no_rmgr(struct bnxt *bp)
6420{
6421 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
6422
6423 /* The RSS map is valid for RX rings set to resv_rx_rings */
6424 if (hw_resc->resv_rx_rings != bp->rx_nr_rings) {
6425 hw_resc->resv_rx_rings = bp->rx_nr_rings;
6426 if (!netif_is_rxfh_configured(bp->dev))
6427 bnxt_set_dflt_rss_indir_tbl(bp);
6428 }
6429}
6430
4e41dc5d
MC
6431static bool bnxt_need_reserve_rings(struct bnxt *bp)
6432{
6433 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
fbcfc8e4 6434 int cp = bnxt_cp_rings_in_use(bp);
c0b8cda0 6435 int nq = bnxt_nq_rings_in_use(bp);
780baad4 6436 int rx = bp->rx_nr_rings, stat;
4e41dc5d
MC
6437 int vnic = 1, grp = rx;
6438
b43b9f53
MC
6439 if (hw_resc->resv_tx_rings != bp->tx_nr_rings &&
6440 bp->hwrm_spec_code >= 0x10601)
4e41dc5d
MC
6441 return true;
6442
b43b9f53
MC
6443 /* Old firmware does not need RX ring reservations but we still
6444 * need to setup a default RSS map when needed. With new firmware
6445 * we go through RX ring reservations first and then set up the
6446 * RSS map for the successfully reserved RX rings when needed.
6447 */
6448 if (!BNXT_NEW_RM(bp)) {
6449 bnxt_check_rss_tbl_no_rmgr(bp);
6450 return false;
6451 }
41e8d798 6452 if ((bp->flags & BNXT_FLAG_RFS) && !(bp->flags & BNXT_FLAG_CHIP_P5))
4e41dc5d
MC
6453 vnic = rx + 1;
6454 if (bp->flags & BNXT_FLAG_AGG_RINGS)
6455 rx <<= 1;
780baad4 6456 stat = bnxt_get_func_stat_ctxs(bp);
b43b9f53
MC
6457 if (hw_resc->resv_rx_rings != rx || hw_resc->resv_cp_rings != cp ||
6458 hw_resc->resv_vnics != vnic || hw_resc->resv_stat_ctxs != stat ||
6459 (hw_resc->resv_hw_ring_grps != grp &&
6460 !(bp->flags & BNXT_FLAG_CHIP_P5)))
4e41dc5d 6461 return true;
01989c6b
MC
6462 if ((bp->flags & BNXT_FLAG_CHIP_P5) && BNXT_PF(bp) &&
6463 hw_resc->resv_irqs != nq)
6464 return true;
4e41dc5d
MC
6465 return false;
6466}
6467
674f50a5
MC
6468static int __bnxt_reserve_rings(struct bnxt *bp)
6469{
6470 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
c0b8cda0 6471 int cp = bnxt_nq_rings_in_use(bp);
674f50a5
MC
6472 int tx = bp->tx_nr_rings;
6473 int rx = bp->rx_nr_rings;
674f50a5 6474 int grp, rx_rings, rc;
780baad4 6475 int vnic = 1, stat;
674f50a5 6476 bool sh = false;
674f50a5 6477
4e41dc5d 6478 if (!bnxt_need_reserve_rings(bp))
674f50a5
MC
6479 return 0;
6480
6481 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
6482 sh = true;
41e8d798 6483 if ((bp->flags & BNXT_FLAG_RFS) && !(bp->flags & BNXT_FLAG_CHIP_P5))
674f50a5
MC
6484 vnic = rx + 1;
6485 if (bp->flags & BNXT_FLAG_AGG_RINGS)
6486 rx <<= 1;
674f50a5 6487 grp = bp->rx_nr_rings;
780baad4 6488 stat = bnxt_get_func_stat_ctxs(bp);
674f50a5 6489
780baad4 6490 rc = bnxt_hwrm_reserve_rings(bp, tx, rx, grp, cp, stat, vnic);
391be5c2
MC
6491 if (rc)
6492 return rc;
6493
674f50a5 6494 tx = hw_resc->resv_tx_rings;
f1ca94de 6495 if (BNXT_NEW_RM(bp)) {
674f50a5 6496 rx = hw_resc->resv_rx_rings;
c0b8cda0 6497 cp = hw_resc->resv_irqs;
674f50a5
MC
6498 grp = hw_resc->resv_hw_ring_grps;
6499 vnic = hw_resc->resv_vnics;
780baad4 6500 stat = hw_resc->resv_stat_ctxs;
674f50a5
MC
6501 }
6502
6503 rx_rings = rx;
6504 if (bp->flags & BNXT_FLAG_AGG_RINGS) {
6505 if (rx >= 2) {
6506 rx_rings = rx >> 1;
6507 } else {
6508 if (netif_running(bp->dev))
6509 return -ENOMEM;
6510
6511 bp->flags &= ~BNXT_FLAG_AGG_RINGS;
6512 bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
6513 bp->dev->hw_features &= ~NETIF_F_LRO;
6514 bp->dev->features &= ~NETIF_F_LRO;
6515 bnxt_set_ring_params(bp);
6516 }
6517 }
6518 rx_rings = min_t(int, rx_rings, grp);
780baad4
VV
6519 cp = min_t(int, cp, bp->cp_nr_rings);
6520 if (stat > bnxt_get_ulp_stat_ctxs(bp))
6521 stat -= bnxt_get_ulp_stat_ctxs(bp);
6522 cp = min_t(int, cp, stat);
674f50a5
MC
6523 rc = bnxt_trim_rings(bp, &rx_rings, &tx, cp, sh);
6524 if (bp->flags & BNXT_FLAG_AGG_RINGS)
6525 rx = rx_rings << 1;
6526 cp = sh ? max_t(int, tx, rx_rings) : tx + rx_rings;
6527 bp->tx_nr_rings = tx;
bd3191b5
MC
6528
6529 /* If we cannot reserve all the RX rings, reset the RSS map only
6530 * if absolutely necessary
6531 */
6532 if (rx_rings != bp->rx_nr_rings) {
6533 netdev_warn(bp->dev, "Able to reserve only %d out of %d requested RX rings\n",
6534 rx_rings, bp->rx_nr_rings);
4b70dce2 6535 if (netif_is_rxfh_configured(bp->dev) &&
bd3191b5
MC
6536 (bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings) !=
6537 bnxt_get_nr_rss_ctxs(bp, rx_rings) ||
6538 bnxt_get_max_rss_ring(bp) >= rx_rings)) {
6539 netdev_warn(bp->dev, "RSS table entries reverting to default\n");
6540 bp->dev->priv_flags &= ~IFF_RXFH_CONFIGURED;
6541 }
6542 }
674f50a5
MC
6543 bp->rx_nr_rings = rx_rings;
6544 bp->cp_nr_rings = cp;
6545
780baad4 6546 if (!tx || !rx || !cp || !grp || !vnic || !stat)
674f50a5
MC
6547 return -ENOMEM;
6548
5fa65524
EP
6549 if (!netif_is_rxfh_configured(bp->dev))
6550 bnxt_set_dflt_rss_indir_tbl(bp);
6551
391be5c2
MC
6552 return rc;
6553}
6554
8f23d638 6555static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
780baad4
VV
6556 int ring_grps, int cp_rings, int stats,
6557 int vnics)
98fdbe73 6558{
bbf33d1d 6559 struct hwrm_func_vf_cfg_input *req;
6fc2ffdf 6560 u32 flags;
98fdbe73 6561
f1ca94de 6562 if (!BNXT_NEW_RM(bp))
98fdbe73
MC
6563 return 0;
6564
bbf33d1d
EP
6565 req = __bnxt_hwrm_reserve_vf_rings(bp, tx_rings, rx_rings, ring_grps,
6566 cp_rings, stats, vnics);
8f23d638
MC
6567 flags = FUNC_VF_CFG_REQ_FLAGS_TX_ASSETS_TEST |
6568 FUNC_VF_CFG_REQ_FLAGS_RX_ASSETS_TEST |
6569 FUNC_VF_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
8f23d638 6570 FUNC_VF_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
41e8d798
MC
6571 FUNC_VF_CFG_REQ_FLAGS_VNIC_ASSETS_TEST |
6572 FUNC_VF_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST;
6573 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
6574 flags |= FUNC_VF_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST;
8f23d638 6575
bbf33d1d
EP
6576 req->flags = cpu_to_le32(flags);
6577 return hwrm_req_send_silent(bp, req);
8f23d638
MC
6578}
6579
6580static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
780baad4
VV
6581 int ring_grps, int cp_rings, int stats,
6582 int vnics)
8f23d638 6583{
bbf33d1d 6584 struct hwrm_func_cfg_input *req;
6fc2ffdf 6585 u32 flags;
98fdbe73 6586
bbf33d1d
EP
6587 req = __bnxt_hwrm_reserve_pf_rings(bp, tx_rings, rx_rings, ring_grps,
6588 cp_rings, stats, vnics);
8f23d638 6589 flags = FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST;
41e8d798 6590 if (BNXT_NEW_RM(bp)) {
8f23d638
MC
6591 flags |= FUNC_CFG_REQ_FLAGS_RX_ASSETS_TEST |
6592 FUNC_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
8f23d638
MC
6593 FUNC_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
6594 FUNC_CFG_REQ_FLAGS_VNIC_ASSETS_TEST;
41e8d798 6595 if (bp->flags & BNXT_FLAG_CHIP_P5)
0b815023
MC
6596 flags |= FUNC_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST |
6597 FUNC_CFG_REQ_FLAGS_NQ_ASSETS_TEST;
41e8d798
MC
6598 else
6599 flags |= FUNC_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST;
6600 }
6fc2ffdf 6601
bbf33d1d
EP
6602 req->flags = cpu_to_le32(flags);
6603 return hwrm_req_send_silent(bp, req);
98fdbe73
MC
6604}
6605
8f23d638 6606static int bnxt_hwrm_check_rings(struct bnxt *bp, int tx_rings, int rx_rings,
780baad4
VV
6607 int ring_grps, int cp_rings, int stats,
6608 int vnics)
8f23d638
MC
6609{
6610 if (bp->hwrm_spec_code < 0x10801)
6611 return 0;
6612
6613 if (BNXT_PF(bp))
6614 return bnxt_hwrm_check_pf_rings(bp, tx_rings, rx_rings,
780baad4
VV
6615 ring_grps, cp_rings, stats,
6616 vnics);
8f23d638
MC
6617
6618 return bnxt_hwrm_check_vf_rings(bp, tx_rings, rx_rings, ring_grps,
780baad4 6619 cp_rings, stats, vnics);
8f23d638
MC
6620}
6621
74706afa
MC
6622static void bnxt_hwrm_coal_params_qcaps(struct bnxt *bp)
6623{
74706afa 6624 struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
bbf33d1d
EP
6625 struct hwrm_ring_aggint_qcaps_output *resp;
6626 struct hwrm_ring_aggint_qcaps_input *req;
74706afa
MC
6627 int rc;
6628
6629 coal_cap->cmpl_params = BNXT_LEGACY_COAL_CMPL_PARAMS;
6630 coal_cap->num_cmpl_dma_aggr_max = 63;
6631 coal_cap->num_cmpl_dma_aggr_during_int_max = 63;
6632 coal_cap->cmpl_aggr_dma_tmr_max = 65535;
6633 coal_cap->cmpl_aggr_dma_tmr_during_int_max = 65535;
6634 coal_cap->int_lat_tmr_min_max = 65535;
6635 coal_cap->int_lat_tmr_max_max = 65535;
6636 coal_cap->num_cmpl_aggr_int_max = 65535;
6637 coal_cap->timer_units = 80;
6638
6639 if (bp->hwrm_spec_code < 0x10902)
6640 return;
6641
bbf33d1d
EP
6642 if (hwrm_req_init(bp, req, HWRM_RING_AGGINT_QCAPS))
6643 return;
6644
6645 resp = hwrm_req_hold(bp, req);
6646 rc = hwrm_req_send_silent(bp, req);
74706afa
MC
6647 if (!rc) {
6648 coal_cap->cmpl_params = le32_to_cpu(resp->cmpl_params);
58590c8d 6649 coal_cap->nq_params = le32_to_cpu(resp->nq_params);
74706afa
MC
6650 coal_cap->num_cmpl_dma_aggr_max =
6651 le16_to_cpu(resp->num_cmpl_dma_aggr_max);
6652 coal_cap->num_cmpl_dma_aggr_during_int_max =
6653 le16_to_cpu(resp->num_cmpl_dma_aggr_during_int_max);
6654 coal_cap->cmpl_aggr_dma_tmr_max =
6655 le16_to_cpu(resp->cmpl_aggr_dma_tmr_max);
6656 coal_cap->cmpl_aggr_dma_tmr_during_int_max =
6657 le16_to_cpu(resp->cmpl_aggr_dma_tmr_during_int_max);
6658 coal_cap->int_lat_tmr_min_max =
6659 le16_to_cpu(resp->int_lat_tmr_min_max);
6660 coal_cap->int_lat_tmr_max_max =
6661 le16_to_cpu(resp->int_lat_tmr_max_max);
6662 coal_cap->num_cmpl_aggr_int_max =
6663 le16_to_cpu(resp->num_cmpl_aggr_int_max);
6664 coal_cap->timer_units = le16_to_cpu(resp->timer_units);
6665 }
bbf33d1d 6666 hwrm_req_drop(bp, req);
74706afa
MC
6667}
6668
6669static u16 bnxt_usec_to_coal_tmr(struct bnxt *bp, u16 usec)
6670{
6671 struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
6672
6673 return usec * 1000 / coal_cap->timer_units;
6674}
6675
6676static void bnxt_hwrm_set_coal_params(struct bnxt *bp,
6677 struct bnxt_coal *hw_coal,
bb053f52
MC
6678 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
6679{
74706afa 6680 struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
df78ea22 6681 u16 val, tmr, max, flags = hw_coal->flags;
74706afa 6682 u32 cmpl_params = coal_cap->cmpl_params;
f8503969
MC
6683
6684 max = hw_coal->bufs_per_record * 128;
6685 if (hw_coal->budget)
6686 max = hw_coal->bufs_per_record * hw_coal->budget;
74706afa 6687 max = min_t(u16, max, coal_cap->num_cmpl_aggr_int_max);
f8503969
MC
6688
6689 val = clamp_t(u16, hw_coal->coal_bufs, 1, max);
6690 req->num_cmpl_aggr_int = cpu_to_le16(val);
b153cbc5 6691
74706afa 6692 val = min_t(u16, val, coal_cap->num_cmpl_dma_aggr_max);
f8503969
MC
6693 req->num_cmpl_dma_aggr = cpu_to_le16(val);
6694
74706afa
MC
6695 val = clamp_t(u16, hw_coal->coal_bufs_irq, 1,
6696 coal_cap->num_cmpl_dma_aggr_during_int_max);
f8503969
MC
6697 req->num_cmpl_dma_aggr_during_int = cpu_to_le16(val);
6698
74706afa
MC
6699 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks);
6700 tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_max_max);
f8503969
MC
6701 req->int_lat_tmr_max = cpu_to_le16(tmr);
6702
6703 /* min timer set to 1/2 of interrupt timer */
74706afa
MC
6704 if (cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_INT_LAT_TMR_MIN) {
6705 val = tmr / 2;
6706 val = clamp_t(u16, val, 1, coal_cap->int_lat_tmr_min_max);
6707 req->int_lat_tmr_min = cpu_to_le16(val);
6708 req->enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE);
6709 }
f8503969
MC
6710
6711 /* buf timer set to 1/4 of interrupt timer */
74706afa 6712 val = clamp_t(u16, tmr / 4, 1, coal_cap->cmpl_aggr_dma_tmr_max);
f8503969
MC
6713 req->cmpl_aggr_dma_tmr = cpu_to_le16(val);
6714
74706afa
MC
6715 if (cmpl_params &
6716 RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_NUM_CMPL_DMA_AGGR_DURING_INT) {
6717 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks_irq);
6718 val = clamp_t(u16, tmr, 1,
6719 coal_cap->cmpl_aggr_dma_tmr_during_int_max);
6adc4601 6720 req->cmpl_aggr_dma_tmr_during_int = cpu_to_le16(val);
74706afa
MC
6721 req->enables |=
6722 cpu_to_le16(BNXT_COAL_CMPL_AGGR_TMR_DURING_INT_ENABLE);
6723 }
f8503969 6724
74706afa
MC
6725 if ((cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_RING_IDLE) &&
6726 hw_coal->idle_thresh && hw_coal->coal_ticks < hw_coal->idle_thresh)
f8503969 6727 flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE;
bb053f52 6728 req->flags = cpu_to_le16(flags);
74706afa 6729 req->enables |= cpu_to_le16(BNXT_COAL_CMPL_ENABLES);
bb053f52
MC
6730}
6731
58590c8d
MC
6732static int __bnxt_hwrm_set_coal_nq(struct bnxt *bp, struct bnxt_napi *bnapi,
6733 struct bnxt_coal *hw_coal)
6734{
bbf33d1d 6735 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req;
58590c8d
MC
6736 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
6737 struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
6738 u32 nq_params = coal_cap->nq_params;
6739 u16 tmr;
bbf33d1d 6740 int rc;
58590c8d
MC
6741
6742 if (!(nq_params & RING_AGGINT_QCAPS_RESP_NQ_PARAMS_INT_LAT_TMR_MIN))
6743 return 0;
6744
bbf33d1d
EP
6745 rc = hwrm_req_init(bp, req, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
6746 if (rc)
6747 return rc;
6748
6749 req->ring_id = cpu_to_le16(cpr->cp_ring_struct.fw_ring_id);
6750 req->flags =
58590c8d
MC
6751 cpu_to_le16(RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_IS_NQ);
6752
6753 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks) / 2;
6754 tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_min_max);
bbf33d1d
EP
6755 req->int_lat_tmr_min = cpu_to_le16(tmr);
6756 req->enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE);
6757 return hwrm_req_send(bp, req);
58590c8d
MC
6758}
6759
6a8788f2
AG
6760int bnxt_hwrm_set_ring_coal(struct bnxt *bp, struct bnxt_napi *bnapi)
6761{
bbf33d1d 6762 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req_rx;
6a8788f2
AG
6763 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
6764 struct bnxt_coal coal;
bbf33d1d 6765 int rc;
6a8788f2
AG
6766
6767 /* Tick values in micro seconds.
6768 * 1 coal_buf x bufs_per_record = 1 completion record.
6769 */
6770 memcpy(&coal, &bp->rx_coal, sizeof(struct bnxt_coal));
6771
6772 coal.coal_ticks = cpr->rx_ring_coal.coal_ticks;
6773 coal.coal_bufs = cpr->rx_ring_coal.coal_bufs;
6774
6775 if (!bnapi->rx_ring)
6776 return -ENODEV;
6777
bbf33d1d
EP
6778 rc = hwrm_req_init(bp, req_rx, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
6779 if (rc)
6780 return rc;
6a8788f2 6781
bbf33d1d 6782 bnxt_hwrm_set_coal_params(bp, &coal, req_rx);
6a8788f2 6783
bbf33d1d 6784 req_rx->ring_id = cpu_to_le16(bnxt_cp_ring_for_rx(bp, bnapi->rx_ring));
6a8788f2 6785
bbf33d1d 6786 return hwrm_req_send(bp, req_rx);
6a8788f2
AG
6787}
6788
c0c050c5
MC
6789int bnxt_hwrm_set_coal(struct bnxt *bp)
6790{
bbf33d1d
EP
6791 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req_rx, *req_tx,
6792 *req;
6793 int i, rc;
6794
6795 rc = hwrm_req_init(bp, req_rx, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
6796 if (rc)
6797 return rc;
c0c050c5 6798
bbf33d1d
EP
6799 rc = hwrm_req_init(bp, req_tx, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
6800 if (rc) {
6801 hwrm_req_drop(bp, req_rx);
6802 return rc;
6803 }
c0c050c5 6804
bbf33d1d
EP
6805 bnxt_hwrm_set_coal_params(bp, &bp->rx_coal, req_rx);
6806 bnxt_hwrm_set_coal_params(bp, &bp->tx_coal, req_tx);
c0c050c5 6807
bbf33d1d
EP
6808 hwrm_req_hold(bp, req_rx);
6809 hwrm_req_hold(bp, req_tx);
c0c050c5 6810 for (i = 0; i < bp->cp_nr_rings; i++) {
dfc9c94a 6811 struct bnxt_napi *bnapi = bp->bnapi[i];
58590c8d 6812 struct bnxt_coal *hw_coal;
2c61d211 6813 u16 ring_id;
c0c050c5 6814
bbf33d1d 6815 req = req_rx;
2c61d211
MC
6816 if (!bnapi->rx_ring) {
6817 ring_id = bnxt_cp_ring_for_tx(bp, bnapi->tx_ring);
bbf33d1d 6818 req = req_tx;
2c61d211
MC
6819 } else {
6820 ring_id = bnxt_cp_ring_for_rx(bp, bnapi->rx_ring);
6821 }
6822 req->ring_id = cpu_to_le16(ring_id);
dfc9c94a 6823
bbf33d1d 6824 rc = hwrm_req_send(bp, req);
c0c050c5
MC
6825 if (rc)
6826 break;
58590c8d
MC
6827
6828 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
6829 continue;
6830
6831 if (bnapi->rx_ring && bnapi->tx_ring) {
bbf33d1d 6832 req = req_tx;
58590c8d
MC
6833 ring_id = bnxt_cp_ring_for_tx(bp, bnapi->tx_ring);
6834 req->ring_id = cpu_to_le16(ring_id);
bbf33d1d 6835 rc = hwrm_req_send(bp, req);
58590c8d
MC
6836 if (rc)
6837 break;
6838 }
6839 if (bnapi->rx_ring)
6840 hw_coal = &bp->rx_coal;
6841 else
6842 hw_coal = &bp->tx_coal;
6843 __bnxt_hwrm_set_coal_nq(bp, bnapi, hw_coal);
c0c050c5 6844 }
bbf33d1d
EP
6845 hwrm_req_drop(bp, req_rx);
6846 hwrm_req_drop(bp, req_tx);
c0c050c5
MC
6847 return rc;
6848}
6849
3d061591 6850static void bnxt_hwrm_stat_ctx_free(struct bnxt *bp)
c0c050c5 6851{
bbf33d1d
EP
6852 struct hwrm_stat_ctx_clr_stats_input *req0 = NULL;
6853 struct hwrm_stat_ctx_free_input *req;
3d061591 6854 int i;
c0c050c5
MC
6855
6856 if (!bp->bnapi)
3d061591 6857 return;
c0c050c5 6858
3e8060fa 6859 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
3d061591 6860 return;
3e8060fa 6861
bbf33d1d
EP
6862 if (hwrm_req_init(bp, req, HWRM_STAT_CTX_FREE))
6863 return;
6864 if (BNXT_FW_MAJ(bp) <= 20) {
6865 if (hwrm_req_init(bp, req0, HWRM_STAT_CTX_CLR_STATS)) {
6866 hwrm_req_drop(bp, req);
6867 return;
6868 }
6869 hwrm_req_hold(bp, req0);
6870 }
6871 hwrm_req_hold(bp, req);
c0c050c5
MC
6872 for (i = 0; i < bp->cp_nr_rings; i++) {
6873 struct bnxt_napi *bnapi = bp->bnapi[i];
6874 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
6875
6876 if (cpr->hw_stats_ctx_id != INVALID_STATS_CTX_ID) {
bbf33d1d
EP
6877 req->stat_ctx_id = cpu_to_le32(cpr->hw_stats_ctx_id);
6878 if (req0) {
6879 req0->stat_ctx_id = req->stat_ctx_id;
6880 hwrm_req_send(bp, req0);
c2dec363 6881 }
bbf33d1d 6882 hwrm_req_send(bp, req);
c0c050c5
MC
6883
6884 cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
6885 }
6886 }
bbf33d1d
EP
6887 hwrm_req_drop(bp, req);
6888 if (req0)
6889 hwrm_req_drop(bp, req0);
c0c050c5
MC
6890}
6891
6892static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp)
6893{
bbf33d1d
EP
6894 struct hwrm_stat_ctx_alloc_output *resp;
6895 struct hwrm_stat_ctx_alloc_input *req;
6896 int rc, i;
c0c050c5 6897
3e8060fa
PS
6898 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
6899 return 0;
6900
bbf33d1d
EP
6901 rc = hwrm_req_init(bp, req, HWRM_STAT_CTX_ALLOC);
6902 if (rc)
6903 return rc;
c0c050c5 6904
bbf33d1d
EP
6905 req->stats_dma_length = cpu_to_le16(bp->hw_ring_stats_size);
6906 req->update_period_ms = cpu_to_le32(bp->stats_coal_ticks / 1000);
c0c050c5 6907
bbf33d1d 6908 resp = hwrm_req_hold(bp, req);
c0c050c5
MC
6909 for (i = 0; i < bp->cp_nr_rings; i++) {
6910 struct bnxt_napi *bnapi = bp->bnapi[i];
6911 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
6912
bbf33d1d 6913 req->stats_dma_addr = cpu_to_le64(cpr->stats.hw_stats_map);
c0c050c5 6914
bbf33d1d 6915 rc = hwrm_req_send(bp, req);
c0c050c5
MC
6916 if (rc)
6917 break;
6918
6919 cpr->hw_stats_ctx_id = le32_to_cpu(resp->stat_ctx_id);
6920
6921 bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id;
6922 }
bbf33d1d 6923 hwrm_req_drop(bp, req);
89aa8445 6924 return rc;
c0c050c5
MC
6925}
6926
cf6645f8
MC
6927static int bnxt_hwrm_func_qcfg(struct bnxt *bp)
6928{
bbf33d1d
EP
6929 struct hwrm_func_qcfg_output *resp;
6930 struct hwrm_func_qcfg_input *req;
8ae24738 6931 u32 min_db_offset = 0;
9315edca 6932 u16 flags;
cf6645f8
MC
6933 int rc;
6934
bbf33d1d
EP
6935 rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG);
6936 if (rc)
6937 return rc;
6938
6939 req->fid = cpu_to_le16(0xffff);
6940 resp = hwrm_req_hold(bp, req);
6941 rc = hwrm_req_send(bp, req);
cf6645f8
MC
6942 if (rc)
6943 goto func_qcfg_exit;
6944
6945#ifdef CONFIG_BNXT_SRIOV
6946 if (BNXT_VF(bp)) {
cf6645f8
MC
6947 struct bnxt_vf_info *vf = &bp->vf;
6948
6949 vf->vlan = le16_to_cpu(resp->vlan) & VLAN_VID_MASK;
230d1f0d
MC
6950 } else {
6951 bp->pf.registered_vfs = le16_to_cpu(resp->registered_vfs);
cf6645f8
MC
6952 }
6953#endif
9315edca
MC
6954 flags = le16_to_cpu(resp->flags);
6955 if (flags & (FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED |
6956 FUNC_QCFG_RESP_FLAGS_FW_LLDP_AGENT_ENABLED)) {
97381a18 6957 bp->fw_cap |= BNXT_FW_CAP_LLDP_AGENT;
9315edca 6958 if (flags & FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED)
97381a18 6959 bp->fw_cap |= BNXT_FW_CAP_DCBX_AGENT;
9315edca 6960 }
131db499 6961 if (BNXT_PF(bp) && (flags & FUNC_QCFG_RESP_FLAGS_MULTI_HOST))
9315edca 6962 bp->flags |= BNXT_FLAG_MULTI_HOST;
131db499 6963
8d4bd96b
MC
6964 if (flags & FUNC_QCFG_RESP_FLAGS_RING_MONITOR_ENABLED)
6965 bp->fw_cap |= BNXT_FW_CAP_RING_MONITOR;
bc39f885 6966
567b2abe
SB
6967 switch (resp->port_partition_type) {
6968 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0:
6969 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5:
6970 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0:
6971 bp->port_partition_type = resp->port_partition_type;
6972 break;
6973 }
32e8239c
MC
6974 if (bp->hwrm_spec_code < 0x10707 ||
6975 resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEB)
6976 bp->br_mode = BRIDGE_MODE_VEB;
6977 else if (resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEPA)
6978 bp->br_mode = BRIDGE_MODE_VEPA;
6979 else
6980 bp->br_mode = BRIDGE_MODE_UNDEF;
cf6645f8 6981
7eb9bb3a
MC
6982 bp->max_mtu = le16_to_cpu(resp->max_mtu_configured);
6983 if (!bp->max_mtu)
6984 bp->max_mtu = BNXT_MAX_MTU;
6985
8ae24738
MC
6986 if (bp->db_size)
6987 goto func_qcfg_exit;
6988
6989 if (bp->flags & BNXT_FLAG_CHIP_P5) {
6990 if (BNXT_PF(bp))
6991 min_db_offset = DB_PF_OFFSET_P5;
6992 else
6993 min_db_offset = DB_VF_OFFSET_P5;
6994 }
6995 bp->db_size = PAGE_ALIGN(le16_to_cpu(resp->l2_doorbell_bar_size_kb) *
6996 1024);
6997 if (!bp->db_size || bp->db_size > pci_resource_len(bp->pdev, 2) ||
6998 bp->db_size <= min_db_offset)
6999 bp->db_size = pci_resource_len(bp->pdev, 2);
7000
cf6645f8 7001func_qcfg_exit:
bbf33d1d 7002 hwrm_req_drop(bp, req);
cf6645f8
MC
7003 return rc;
7004}
7005
e9696ff3
MC
7006static void bnxt_init_ctx_initializer(struct bnxt_ctx_mem_info *ctx,
7007 struct hwrm_func_backing_store_qcaps_output *resp)
7008{
7009 struct bnxt_mem_init *mem_init;
41435c39 7010 u16 init_mask;
e9696ff3 7011 u8 init_val;
41435c39 7012 u8 *offset;
e9696ff3
MC
7013 int i;
7014
7015 init_val = resp->ctx_kind_initializer;
41435c39
MC
7016 init_mask = le16_to_cpu(resp->ctx_init_mask);
7017 offset = &resp->qp_init_offset;
7018 mem_init = &ctx->mem_init[BNXT_CTX_MEM_INIT_QP];
7019 for (i = 0; i < BNXT_CTX_MEM_INIT_MAX; i++, mem_init++, offset++) {
e9696ff3 7020 mem_init->init_val = init_val;
41435c39
MC
7021 mem_init->offset = BNXT_MEM_INVALID_OFFSET;
7022 if (!init_mask)
7023 continue;
7024 if (i == BNXT_CTX_MEM_INIT_STAT)
7025 offset = &resp->stat_init_offset;
7026 if (init_mask & (1 << i))
7027 mem_init->offset = *offset * 4;
7028 else
7029 mem_init->init_val = 0;
7030 }
7031 ctx->mem_init[BNXT_CTX_MEM_INIT_QP].size = ctx->qp_entry_size;
7032 ctx->mem_init[BNXT_CTX_MEM_INIT_SRQ].size = ctx->srq_entry_size;
7033 ctx->mem_init[BNXT_CTX_MEM_INIT_CQ].size = ctx->cq_entry_size;
7034 ctx->mem_init[BNXT_CTX_MEM_INIT_VNIC].size = ctx->vnic_entry_size;
7035 ctx->mem_init[BNXT_CTX_MEM_INIT_STAT].size = ctx->stat_entry_size;
7036 ctx->mem_init[BNXT_CTX_MEM_INIT_MRAV].size = ctx->mrav_entry_size;
e9696ff3
MC
7037}
7038
98f04cf0
MC
7039static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp)
7040{
bbf33d1d
EP
7041 struct hwrm_func_backing_store_qcaps_output *resp;
7042 struct hwrm_func_backing_store_qcaps_input *req;
98f04cf0
MC
7043 int rc;
7044
7045 if (bp->hwrm_spec_code < 0x10902 || BNXT_VF(bp) || bp->ctx)
7046 return 0;
7047
bbf33d1d
EP
7048 rc = hwrm_req_init(bp, req, HWRM_FUNC_BACKING_STORE_QCAPS);
7049 if (rc)
7050 return rc;
7051
7052 resp = hwrm_req_hold(bp, req);
7053 rc = hwrm_req_send_silent(bp, req);
98f04cf0
MC
7054 if (!rc) {
7055 struct bnxt_ctx_pg_info *ctx_pg;
7056 struct bnxt_ctx_mem_info *ctx;
ac3158cb 7057 int i, tqm_rings;
98f04cf0
MC
7058
7059 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
7060 if (!ctx) {
7061 rc = -ENOMEM;
7062 goto ctx_err;
7063 }
98f04cf0
MC
7064 ctx->qp_max_entries = le32_to_cpu(resp->qp_max_entries);
7065 ctx->qp_min_qp1_entries = le16_to_cpu(resp->qp_min_qp1_entries);
7066 ctx->qp_max_l2_entries = le16_to_cpu(resp->qp_max_l2_entries);
7067 ctx->qp_entry_size = le16_to_cpu(resp->qp_entry_size);
7068 ctx->srq_max_l2_entries = le16_to_cpu(resp->srq_max_l2_entries);
7069 ctx->srq_max_entries = le32_to_cpu(resp->srq_max_entries);
7070 ctx->srq_entry_size = le16_to_cpu(resp->srq_entry_size);
7071 ctx->cq_max_l2_entries = le16_to_cpu(resp->cq_max_l2_entries);
7072 ctx->cq_max_entries = le32_to_cpu(resp->cq_max_entries);
7073 ctx->cq_entry_size = le16_to_cpu(resp->cq_entry_size);
7074 ctx->vnic_max_vnic_entries =
7075 le16_to_cpu(resp->vnic_max_vnic_entries);
7076 ctx->vnic_max_ring_table_entries =
7077 le16_to_cpu(resp->vnic_max_ring_table_entries);
7078 ctx->vnic_entry_size = le16_to_cpu(resp->vnic_entry_size);
7079 ctx->stat_max_entries = le32_to_cpu(resp->stat_max_entries);
7080 ctx->stat_entry_size = le16_to_cpu(resp->stat_entry_size);
7081 ctx->tqm_entry_size = le16_to_cpu(resp->tqm_entry_size);
7082 ctx->tqm_min_entries_per_ring =
7083 le32_to_cpu(resp->tqm_min_entries_per_ring);
7084 ctx->tqm_max_entries_per_ring =
7085 le32_to_cpu(resp->tqm_max_entries_per_ring);
7086 ctx->tqm_entries_multiple = resp->tqm_entries_multiple;
7087 if (!ctx->tqm_entries_multiple)
7088 ctx->tqm_entries_multiple = 1;
7089 ctx->mrav_max_entries = le32_to_cpu(resp->mrav_max_entries);
7090 ctx->mrav_entry_size = le16_to_cpu(resp->mrav_entry_size);
53579e37
DS
7091 ctx->mrav_num_entries_units =
7092 le16_to_cpu(resp->mrav_num_entries_units);
98f04cf0
MC
7093 ctx->tim_entry_size = le16_to_cpu(resp->tim_entry_size);
7094 ctx->tim_max_entries = le32_to_cpu(resp->tim_max_entries);
e9696ff3
MC
7095
7096 bnxt_init_ctx_initializer(ctx, resp);
7097
ac3158cb
MC
7098 ctx->tqm_fp_rings_count = resp->tqm_fp_rings_count;
7099 if (!ctx->tqm_fp_rings_count)
7100 ctx->tqm_fp_rings_count = bp->max_q;
a029a2fe
MC
7101 else if (ctx->tqm_fp_rings_count > BNXT_MAX_TQM_FP_RINGS)
7102 ctx->tqm_fp_rings_count = BNXT_MAX_TQM_FP_RINGS;
ac3158cb 7103
a029a2fe 7104 tqm_rings = ctx->tqm_fp_rings_count + BNXT_MAX_TQM_SP_RINGS;
ac3158cb
MC
7105 ctx_pg = kcalloc(tqm_rings, sizeof(*ctx_pg), GFP_KERNEL);
7106 if (!ctx_pg) {
7107 kfree(ctx);
7108 rc = -ENOMEM;
7109 goto ctx_err;
7110 }
7111 for (i = 0; i < tqm_rings; i++, ctx_pg++)
7112 ctx->tqm_mem[i] = ctx_pg;
7113 bp->ctx = ctx;
98f04cf0
MC
7114 } else {
7115 rc = 0;
7116 }
7117ctx_err:
bbf33d1d 7118 hwrm_req_drop(bp, req);
98f04cf0
MC
7119 return rc;
7120}
7121
1b9394e5
MC
7122static void bnxt_hwrm_set_pg_attr(struct bnxt_ring_mem_info *rmem, u8 *pg_attr,
7123 __le64 *pg_dir)
7124{
be6d755f
EP
7125 if (!rmem->nr_pages)
7126 return;
7127
702279d2 7128 BNXT_SET_CTX_PAGE_ATTR(*pg_attr);
08fe9d18
MC
7129 if (rmem->depth >= 1) {
7130 if (rmem->depth == 2)
7131 *pg_attr |= 2;
7132 else
7133 *pg_attr |= 1;
1b9394e5
MC
7134 *pg_dir = cpu_to_le64(rmem->pg_tbl_map);
7135 } else {
7136 *pg_dir = cpu_to_le64(rmem->dma_arr[0]);
7137 }
7138}
7139
7140#define FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES \
7141 (FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP | \
7142 FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ | \
7143 FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ | \
7144 FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC | \
7145 FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT)
7146
7147static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables)
7148{
bbf33d1d 7149 struct hwrm_func_backing_store_cfg_input *req;
1b9394e5
MC
7150 struct bnxt_ctx_mem_info *ctx = bp->ctx;
7151 struct bnxt_ctx_pg_info *ctx_pg;
bbf33d1d
EP
7152 void **__req = (void **)&req;
7153 u32 req_len = sizeof(*req);
1b9394e5
MC
7154 __le32 *num_entries;
7155 __le64 *pg_dir;
53579e37 7156 u32 flags = 0;
1b9394e5 7157 u8 *pg_attr;
1b9394e5 7158 u32 ena;
bbf33d1d 7159 int rc;
9f90445c 7160 int i;
1b9394e5
MC
7161
7162 if (!ctx)
7163 return 0;
7164
16db6323
MC
7165 if (req_len > bp->hwrm_max_ext_req_len)
7166 req_len = BNXT_BACKING_STORE_CFG_LEGACY_LEN;
bbf33d1d
EP
7167 rc = __hwrm_req_init(bp, __req, HWRM_FUNC_BACKING_STORE_CFG, req_len);
7168 if (rc)
7169 return rc;
1b9394e5 7170
bbf33d1d 7171 req->enables = cpu_to_le32(enables);
1b9394e5
MC
7172 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP) {
7173 ctx_pg = &ctx->qp_mem;
bbf33d1d
EP
7174 req->qp_num_entries = cpu_to_le32(ctx_pg->entries);
7175 req->qp_num_qp1_entries = cpu_to_le16(ctx->qp_min_qp1_entries);
7176 req->qp_num_l2_entries = cpu_to_le16(ctx->qp_max_l2_entries);
7177 req->qp_entry_size = cpu_to_le16(ctx->qp_entry_size);
1b9394e5 7178 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
bbf33d1d
EP
7179 &req->qpc_pg_size_qpc_lvl,
7180 &req->qpc_page_dir);
1b9394e5
MC
7181 }
7182 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ) {
7183 ctx_pg = &ctx->srq_mem;
bbf33d1d
EP
7184 req->srq_num_entries = cpu_to_le32(ctx_pg->entries);
7185 req->srq_num_l2_entries = cpu_to_le16(ctx->srq_max_l2_entries);
7186 req->srq_entry_size = cpu_to_le16(ctx->srq_entry_size);
1b9394e5 7187 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
bbf33d1d
EP
7188 &req->srq_pg_size_srq_lvl,
7189 &req->srq_page_dir);
1b9394e5
MC
7190 }
7191 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ) {
7192 ctx_pg = &ctx->cq_mem;
bbf33d1d
EP
7193 req->cq_num_entries = cpu_to_le32(ctx_pg->entries);
7194 req->cq_num_l2_entries = cpu_to_le16(ctx->cq_max_l2_entries);
7195 req->cq_entry_size = cpu_to_le16(ctx->cq_entry_size);
7196 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
7197 &req->cq_pg_size_cq_lvl,
7198 &req->cq_page_dir);
1b9394e5
MC
7199 }
7200 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC) {
7201 ctx_pg = &ctx->vnic_mem;
bbf33d1d 7202 req->vnic_num_vnic_entries =
1b9394e5 7203 cpu_to_le16(ctx->vnic_max_vnic_entries);
bbf33d1d 7204 req->vnic_num_ring_table_entries =
1b9394e5 7205 cpu_to_le16(ctx->vnic_max_ring_table_entries);
bbf33d1d 7206 req->vnic_entry_size = cpu_to_le16(ctx->vnic_entry_size);
1b9394e5 7207 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
bbf33d1d
EP
7208 &req->vnic_pg_size_vnic_lvl,
7209 &req->vnic_page_dir);
1b9394e5
MC
7210 }
7211 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT) {
7212 ctx_pg = &ctx->stat_mem;
bbf33d1d
EP
7213 req->stat_num_entries = cpu_to_le32(ctx->stat_max_entries);
7214 req->stat_entry_size = cpu_to_le16(ctx->stat_entry_size);
1b9394e5 7215 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
bbf33d1d
EP
7216 &req->stat_pg_size_stat_lvl,
7217 &req->stat_page_dir);
1b9394e5 7218 }
cf6daed0
MC
7219 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV) {
7220 ctx_pg = &ctx->mrav_mem;
bbf33d1d 7221 req->mrav_num_entries = cpu_to_le32(ctx_pg->entries);
53579e37
DS
7222 if (ctx->mrav_num_entries_units)
7223 flags |=
7224 FUNC_BACKING_STORE_CFG_REQ_FLAGS_MRAV_RESERVATION_SPLIT;
bbf33d1d 7225 req->mrav_entry_size = cpu_to_le16(ctx->mrav_entry_size);
cf6daed0 7226 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
bbf33d1d
EP
7227 &req->mrav_pg_size_mrav_lvl,
7228 &req->mrav_page_dir);
cf6daed0
MC
7229 }
7230 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM) {
7231 ctx_pg = &ctx->tim_mem;
bbf33d1d
EP
7232 req->tim_num_entries = cpu_to_le32(ctx_pg->entries);
7233 req->tim_entry_size = cpu_to_le16(ctx->tim_entry_size);
cf6daed0 7234 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
bbf33d1d
EP
7235 &req->tim_pg_size_tim_lvl,
7236 &req->tim_page_dir);
cf6daed0 7237 }
bbf33d1d
EP
7238 for (i = 0, num_entries = &req->tqm_sp_num_entries,
7239 pg_attr = &req->tqm_sp_pg_size_tqm_sp_lvl,
7240 pg_dir = &req->tqm_sp_page_dir,
1b9394e5 7241 ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP;
a029a2fe
MC
7242 i < BNXT_MAX_TQM_RINGS;
7243 i++, num_entries++, pg_attr++, pg_dir++, ena <<= 1) {
1b9394e5
MC
7244 if (!(enables & ena))
7245 continue;
7246
bbf33d1d 7247 req->tqm_entry_size = cpu_to_le16(ctx->tqm_entry_size);
1b9394e5
MC
7248 ctx_pg = ctx->tqm_mem[i];
7249 *num_entries = cpu_to_le32(ctx_pg->entries);
7250 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, pg_attr, pg_dir);
7251 }
bbf33d1d
EP
7252 req->flags = cpu_to_le32(flags);
7253 return hwrm_req_send(bp, req);
1b9394e5
MC
7254}
7255
98f04cf0 7256static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp,
08fe9d18 7257 struct bnxt_ctx_pg_info *ctx_pg)
98f04cf0
MC
7258{
7259 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
7260
98f04cf0
MC
7261 rmem->page_size = BNXT_PAGE_SIZE;
7262 rmem->pg_arr = ctx_pg->ctx_pg_arr;
7263 rmem->dma_arr = ctx_pg->ctx_dma_arr;
1b9394e5 7264 rmem->flags = BNXT_RMEM_VALID_PTE_FLAG;
08fe9d18
MC
7265 if (rmem->depth >= 1)
7266 rmem->flags |= BNXT_RMEM_USE_FULL_PAGE_FLAG;
98f04cf0
MC
7267 return bnxt_alloc_ring(bp, rmem);
7268}
7269
08fe9d18
MC
7270static int bnxt_alloc_ctx_pg_tbls(struct bnxt *bp,
7271 struct bnxt_ctx_pg_info *ctx_pg, u32 mem_size,
e9696ff3 7272 u8 depth, struct bnxt_mem_init *mem_init)
08fe9d18
MC
7273{
7274 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
7275 int rc;
7276
7277 if (!mem_size)
bbf211b1 7278 return -EINVAL;
08fe9d18
MC
7279
7280 ctx_pg->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
7281 if (ctx_pg->nr_pages > MAX_CTX_TOTAL_PAGES) {
7282 ctx_pg->nr_pages = 0;
7283 return -EINVAL;
7284 }
7285 if (ctx_pg->nr_pages > MAX_CTX_PAGES || depth > 1) {
7286 int nr_tbls, i;
7287
7288 rmem->depth = 2;
7289 ctx_pg->ctx_pg_tbl = kcalloc(MAX_CTX_PAGES, sizeof(ctx_pg),
7290 GFP_KERNEL);
7291 if (!ctx_pg->ctx_pg_tbl)
7292 return -ENOMEM;
7293 nr_tbls = DIV_ROUND_UP(ctx_pg->nr_pages, MAX_CTX_PAGES);
7294 rmem->nr_pages = nr_tbls;
7295 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg);
7296 if (rc)
7297 return rc;
7298 for (i = 0; i < nr_tbls; i++) {
7299 struct bnxt_ctx_pg_info *pg_tbl;
7300
7301 pg_tbl = kzalloc(sizeof(*pg_tbl), GFP_KERNEL);
7302 if (!pg_tbl)
7303 return -ENOMEM;
7304 ctx_pg->ctx_pg_tbl[i] = pg_tbl;
7305 rmem = &pg_tbl->ring_mem;
7306 rmem->pg_tbl = ctx_pg->ctx_pg_arr[i];
7307 rmem->pg_tbl_map = ctx_pg->ctx_dma_arr[i];
7308 rmem->depth = 1;
7309 rmem->nr_pages = MAX_CTX_PAGES;
e9696ff3 7310 rmem->mem_init = mem_init;
6ef982de
MC
7311 if (i == (nr_tbls - 1)) {
7312 int rem = ctx_pg->nr_pages % MAX_CTX_PAGES;
7313
7314 if (rem)
7315 rmem->nr_pages = rem;
7316 }
08fe9d18
MC
7317 rc = bnxt_alloc_ctx_mem_blk(bp, pg_tbl);
7318 if (rc)
7319 break;
7320 }
7321 } else {
7322 rmem->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
7323 if (rmem->nr_pages > 1 || depth)
7324 rmem->depth = 1;
e9696ff3 7325 rmem->mem_init = mem_init;
08fe9d18
MC
7326 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg);
7327 }
7328 return rc;
7329}
7330
7331static void bnxt_free_ctx_pg_tbls(struct bnxt *bp,
7332 struct bnxt_ctx_pg_info *ctx_pg)
7333{
7334 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
7335
7336 if (rmem->depth > 1 || ctx_pg->nr_pages > MAX_CTX_PAGES ||
7337 ctx_pg->ctx_pg_tbl) {
7338 int i, nr_tbls = rmem->nr_pages;
7339
7340 for (i = 0; i < nr_tbls; i++) {
7341 struct bnxt_ctx_pg_info *pg_tbl;
7342 struct bnxt_ring_mem_info *rmem2;
7343
7344 pg_tbl = ctx_pg->ctx_pg_tbl[i];
7345 if (!pg_tbl)
7346 continue;
7347 rmem2 = &pg_tbl->ring_mem;
7348 bnxt_free_ring(bp, rmem2);
7349 ctx_pg->ctx_pg_arr[i] = NULL;
7350 kfree(pg_tbl);
7351 ctx_pg->ctx_pg_tbl[i] = NULL;
7352 }
7353 kfree(ctx_pg->ctx_pg_tbl);
7354 ctx_pg->ctx_pg_tbl = NULL;
7355 }
7356 bnxt_free_ring(bp, rmem);
7357 ctx_pg->nr_pages = 0;
7358}
7359
228ea8c1 7360void bnxt_free_ctx_mem(struct bnxt *bp)
98f04cf0
MC
7361{
7362 struct bnxt_ctx_mem_info *ctx = bp->ctx;
7363 int i;
7364
7365 if (!ctx)
7366 return;
7367
7368 if (ctx->tqm_mem[0]) {
ac3158cb 7369 for (i = 0; i < ctx->tqm_fp_rings_count + 1; i++)
08fe9d18 7370 bnxt_free_ctx_pg_tbls(bp, ctx->tqm_mem[i]);
98f04cf0
MC
7371 kfree(ctx->tqm_mem[0]);
7372 ctx->tqm_mem[0] = NULL;
7373 }
7374
cf6daed0
MC
7375 bnxt_free_ctx_pg_tbls(bp, &ctx->tim_mem);
7376 bnxt_free_ctx_pg_tbls(bp, &ctx->mrav_mem);
08fe9d18
MC
7377 bnxt_free_ctx_pg_tbls(bp, &ctx->stat_mem);
7378 bnxt_free_ctx_pg_tbls(bp, &ctx->vnic_mem);
7379 bnxt_free_ctx_pg_tbls(bp, &ctx->cq_mem);
7380 bnxt_free_ctx_pg_tbls(bp, &ctx->srq_mem);
7381 bnxt_free_ctx_pg_tbls(bp, &ctx->qp_mem);
98f04cf0
MC
7382 ctx->flags &= ~BNXT_CTX_FLAG_INITED;
7383}
7384
7385static int bnxt_alloc_ctx_mem(struct bnxt *bp)
7386{
7387 struct bnxt_ctx_pg_info *ctx_pg;
7388 struct bnxt_ctx_mem_info *ctx;
e9696ff3 7389 struct bnxt_mem_init *init;
1b9394e5 7390 u32 mem_size, ena, entries;
c7dd7ab4 7391 u32 entries_sp, min;
53579e37 7392 u32 num_mr, num_ah;
cf6daed0
MC
7393 u32 extra_srqs = 0;
7394 u32 extra_qps = 0;
7395 u8 pg_lvl = 1;
98f04cf0
MC
7396 int i, rc;
7397
7398 rc = bnxt_hwrm_func_backing_store_qcaps(bp);
7399 if (rc) {
7400 netdev_err(bp->dev, "Failed querying context mem capability, rc = %d.\n",
7401 rc);
7402 return rc;
7403 }
7404 ctx = bp->ctx;
7405 if (!ctx || (ctx->flags & BNXT_CTX_FLAG_INITED))
7406 return 0;
7407
d629522e 7408 if ((bp->flags & BNXT_FLAG_ROCE_CAP) && !is_kdump_kernel()) {
cf6daed0
MC
7409 pg_lvl = 2;
7410 extra_qps = 65536;
7411 extra_srqs = 8192;
7412 }
7413
98f04cf0 7414 ctx_pg = &ctx->qp_mem;
cf6daed0
MC
7415 ctx_pg->entries = ctx->qp_min_qp1_entries + ctx->qp_max_l2_entries +
7416 extra_qps;
be6d755f
EP
7417 if (ctx->qp_entry_size) {
7418 mem_size = ctx->qp_entry_size * ctx_pg->entries;
e9696ff3
MC
7419 init = &ctx->mem_init[BNXT_CTX_MEM_INIT_QP];
7420 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, init);
be6d755f
EP
7421 if (rc)
7422 return rc;
7423 }
98f04cf0
MC
7424
7425 ctx_pg = &ctx->srq_mem;
cf6daed0 7426 ctx_pg->entries = ctx->srq_max_l2_entries + extra_srqs;
be6d755f
EP
7427 if (ctx->srq_entry_size) {
7428 mem_size = ctx->srq_entry_size * ctx_pg->entries;
e9696ff3
MC
7429 init = &ctx->mem_init[BNXT_CTX_MEM_INIT_SRQ];
7430 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, init);
be6d755f
EP
7431 if (rc)
7432 return rc;
7433 }
98f04cf0
MC
7434
7435 ctx_pg = &ctx->cq_mem;
cf6daed0 7436 ctx_pg->entries = ctx->cq_max_l2_entries + extra_qps * 2;
be6d755f
EP
7437 if (ctx->cq_entry_size) {
7438 mem_size = ctx->cq_entry_size * ctx_pg->entries;
e9696ff3
MC
7439 init = &ctx->mem_init[BNXT_CTX_MEM_INIT_CQ];
7440 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, init);
be6d755f
EP
7441 if (rc)
7442 return rc;
7443 }
98f04cf0
MC
7444
7445 ctx_pg = &ctx->vnic_mem;
7446 ctx_pg->entries = ctx->vnic_max_vnic_entries +
7447 ctx->vnic_max_ring_table_entries;
be6d755f
EP
7448 if (ctx->vnic_entry_size) {
7449 mem_size = ctx->vnic_entry_size * ctx_pg->entries;
e9696ff3
MC
7450 init = &ctx->mem_init[BNXT_CTX_MEM_INIT_VNIC];
7451 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, init);
be6d755f
EP
7452 if (rc)
7453 return rc;
7454 }
98f04cf0
MC
7455
7456 ctx_pg = &ctx->stat_mem;
7457 ctx_pg->entries = ctx->stat_max_entries;
be6d755f
EP
7458 if (ctx->stat_entry_size) {
7459 mem_size = ctx->stat_entry_size * ctx_pg->entries;
e9696ff3
MC
7460 init = &ctx->mem_init[BNXT_CTX_MEM_INIT_STAT];
7461 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, init);
be6d755f
EP
7462 if (rc)
7463 return rc;
7464 }
98f04cf0 7465
cf6daed0
MC
7466 ena = 0;
7467 if (!(bp->flags & BNXT_FLAG_ROCE_CAP))
7468 goto skip_rdma;
7469
7470 ctx_pg = &ctx->mrav_mem;
53579e37
DS
7471 /* 128K extra is needed to accommodate static AH context
7472 * allocation by f/w.
7473 */
7474 num_mr = 1024 * 256;
7475 num_ah = 1024 * 128;
7476 ctx_pg->entries = num_mr + num_ah;
be6d755f
EP
7477 if (ctx->mrav_entry_size) {
7478 mem_size = ctx->mrav_entry_size * ctx_pg->entries;
e9696ff3
MC
7479 init = &ctx->mem_init[BNXT_CTX_MEM_INIT_MRAV];
7480 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 2, init);
be6d755f
EP
7481 if (rc)
7482 return rc;
7483 }
cf6daed0 7484 ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV;
53579e37
DS
7485 if (ctx->mrav_num_entries_units)
7486 ctx_pg->entries =
7487 ((num_mr / ctx->mrav_num_entries_units) << 16) |
7488 (num_ah / ctx->mrav_num_entries_units);
cf6daed0
MC
7489
7490 ctx_pg = &ctx->tim_mem;
7491 ctx_pg->entries = ctx->qp_mem.entries;
be6d755f
EP
7492 if (ctx->tim_entry_size) {
7493 mem_size = ctx->tim_entry_size * ctx_pg->entries;
e9696ff3 7494 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, NULL);
be6d755f
EP
7495 if (rc)
7496 return rc;
7497 }
cf6daed0
MC
7498 ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM;
7499
7500skip_rdma:
c7dd7ab4
MC
7501 min = ctx->tqm_min_entries_per_ring;
7502 entries_sp = ctx->vnic_max_vnic_entries + ctx->qp_max_l2_entries +
7503 2 * (extra_qps + ctx->qp_min_qp1_entries) + min;
7504 entries_sp = roundup(entries_sp, ctx->tqm_entries_multiple);
c12e1643 7505 entries = ctx->qp_max_l2_entries + 2 * (extra_qps + ctx->qp_min_qp1_entries);
98f04cf0 7506 entries = roundup(entries, ctx->tqm_entries_multiple);
c7dd7ab4 7507 entries = clamp_t(u32, entries, min, ctx->tqm_max_entries_per_ring);
ac3158cb 7508 for (i = 0; i < ctx->tqm_fp_rings_count + 1; i++) {
98f04cf0 7509 ctx_pg = ctx->tqm_mem[i];
c7dd7ab4 7510 ctx_pg->entries = i ? entries : entries_sp;
be6d755f
EP
7511 if (ctx->tqm_entry_size) {
7512 mem_size = ctx->tqm_entry_size * ctx_pg->entries;
7513 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1,
e9696ff3 7514 NULL);
be6d755f
EP
7515 if (rc)
7516 return rc;
7517 }
1b9394e5 7518 ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP << i;
98f04cf0 7519 }
1b9394e5
MC
7520 ena |= FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES;
7521 rc = bnxt_hwrm_func_backing_store_cfg(bp, ena);
0b5b561c 7522 if (rc) {
1b9394e5
MC
7523 netdev_err(bp->dev, "Failed configuring context mem, rc = %d.\n",
7524 rc);
0b5b561c
MC
7525 return rc;
7526 }
7527 ctx->flags |= BNXT_CTX_FLAG_INITED;
98f04cf0
MC
7528 return 0;
7529}
7530
db4723b3 7531int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all)
be0dd9c4 7532{
bbf33d1d
EP
7533 struct hwrm_func_resource_qcaps_output *resp;
7534 struct hwrm_func_resource_qcaps_input *req;
be0dd9c4
MC
7535 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
7536 int rc;
7537
bbf33d1d
EP
7538 rc = hwrm_req_init(bp, req, HWRM_FUNC_RESOURCE_QCAPS);
7539 if (rc)
7540 return rc;
be0dd9c4 7541
bbf33d1d
EP
7542 req->fid = cpu_to_le16(0xffff);
7543 resp = hwrm_req_hold(bp, req);
7544 rc = hwrm_req_send_silent(bp, req);
d4f1420d 7545 if (rc)
be0dd9c4 7546 goto hwrm_func_resc_qcaps_exit;
be0dd9c4 7547
db4723b3
MC
7548 hw_resc->max_tx_sch_inputs = le16_to_cpu(resp->max_tx_scheduler_inputs);
7549 if (!all)
7550 goto hwrm_func_resc_qcaps_exit;
7551
be0dd9c4
MC
7552 hw_resc->min_rsscos_ctxs = le16_to_cpu(resp->min_rsscos_ctx);
7553 hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
7554 hw_resc->min_cp_rings = le16_to_cpu(resp->min_cmpl_rings);
7555 hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
7556 hw_resc->min_tx_rings = le16_to_cpu(resp->min_tx_rings);
7557 hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
7558 hw_resc->min_rx_rings = le16_to_cpu(resp->min_rx_rings);
7559 hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
7560 hw_resc->min_hw_ring_grps = le16_to_cpu(resp->min_hw_ring_grps);
7561 hw_resc->max_hw_ring_grps = le16_to_cpu(resp->max_hw_ring_grps);
7562 hw_resc->min_l2_ctxs = le16_to_cpu(resp->min_l2_ctxs);
7563 hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
7564 hw_resc->min_vnics = le16_to_cpu(resp->min_vnics);
7565 hw_resc->max_vnics = le16_to_cpu(resp->max_vnics);
7566 hw_resc->min_stat_ctxs = le16_to_cpu(resp->min_stat_ctx);
7567 hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
7568
9c1fabdf
MC
7569 if (bp->flags & BNXT_FLAG_CHIP_P5) {
7570 u16 max_msix = le16_to_cpu(resp->max_msix);
7571
f7588cd8 7572 hw_resc->max_nqs = max_msix;
9c1fabdf
MC
7573 hw_resc->max_hw_ring_grps = hw_resc->max_rx_rings;
7574 }
7575
4673d664
MC
7576 if (BNXT_PF(bp)) {
7577 struct bnxt_pf_info *pf = &bp->pf;
7578
7579 pf->vf_resv_strategy =
7580 le16_to_cpu(resp->vf_reservation_strategy);
bf82736d 7581 if (pf->vf_resv_strategy > BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC)
4673d664
MC
7582 pf->vf_resv_strategy = BNXT_VF_RESV_STRATEGY_MAXIMAL;
7583 }
be0dd9c4 7584hwrm_func_resc_qcaps_exit:
bbf33d1d 7585 hwrm_req_drop(bp, req);
be0dd9c4
MC
7586 return rc;
7587}
7588
ae5c42f0
MC
7589static int __bnxt_hwrm_ptp_qcfg(struct bnxt *bp)
7590{
bbf33d1d
EP
7591 struct hwrm_port_mac_ptp_qcfg_output *resp;
7592 struct hwrm_port_mac_ptp_qcfg_input *req;
ae5c42f0 7593 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
24ac1ecd 7594 bool phc_cfg;
ae5c42f0
MC
7595 u8 flags;
7596 int rc;
7597
e8b51a1a 7598 if (bp->hwrm_spec_code < 0x10801 || !BNXT_CHIP_P5_THOR(bp)) {
ae5c42f0
MC
7599 rc = -ENODEV;
7600 goto no_ptp;
7601 }
7602
bbf33d1d 7603 rc = hwrm_req_init(bp, req, HWRM_PORT_MAC_PTP_QCFG);
ae5c42f0
MC
7604 if (rc)
7605 goto no_ptp;
7606
bbf33d1d
EP
7607 req->port_id = cpu_to_le16(bp->pf.port_id);
7608 resp = hwrm_req_hold(bp, req);
7609 rc = hwrm_req_send(bp, req);
7610 if (rc)
7611 goto exit;
7612
ae5c42f0
MC
7613 flags = resp->flags;
7614 if (!(flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_HWRM_ACCESS)) {
7615 rc = -ENODEV;
bbf33d1d 7616 goto exit;
ae5c42f0
MC
7617 }
7618 if (!ptp) {
7619 ptp = kzalloc(sizeof(*ptp), GFP_KERNEL);
bbf33d1d
EP
7620 if (!ptp) {
7621 rc = -ENOMEM;
7622 goto exit;
7623 }
ae5c42f0
MC
7624 ptp->bp = bp;
7625 bp->ptp_cfg = ptp;
7626 }
7627 if (flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_PARTIAL_DIRECT_ACCESS_REF_CLOCK) {
7628 ptp->refclk_regs[0] = le32_to_cpu(resp->ts_ref_clock_reg_lower);
7629 ptp->refclk_regs[1] = le32_to_cpu(resp->ts_ref_clock_reg_upper);
7630 } else if (bp->flags & BNXT_FLAG_CHIP_P5) {
7631 ptp->refclk_regs[0] = BNXT_TS_REG_TIMESYNC_TS0_LOWER;
7632 ptp->refclk_regs[1] = BNXT_TS_REG_TIMESYNC_TS0_UPPER;
7633 } else {
7634 rc = -ENODEV;
bbf33d1d 7635 goto exit;
ae5c42f0 7636 }
24ac1ecd
PC
7637 phc_cfg = (flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_RTC_CONFIGURED) != 0;
7638 rc = bnxt_ptp_init(bp, phc_cfg);
bbf33d1d
EP
7639 if (rc)
7640 netdev_warn(bp->dev, "PTP initialization failed.\n");
7641exit:
7642 hwrm_req_drop(bp, req);
a521c8a0
MC
7643 if (!rc)
7644 return 0;
7645
ae5c42f0 7646no_ptp:
a521c8a0 7647 bnxt_ptp_clear(bp);
ae5c42f0
MC
7648 kfree(ptp);
7649 bp->ptp_cfg = NULL;
7650 return rc;
7651}
7652
be0dd9c4 7653static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
c0c050c5 7654{
bbf33d1d
EP
7655 struct hwrm_func_qcaps_output *resp;
7656 struct hwrm_func_qcaps_input *req;
6a4f2947 7657 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
66ed81dc 7658 u32 flags, flags_ext, flags_ext2;
bbf33d1d 7659 int rc;
c0c050c5 7660
bbf33d1d
EP
7661 rc = hwrm_req_init(bp, req, HWRM_FUNC_QCAPS);
7662 if (rc)
7663 return rc;
c0c050c5 7664
bbf33d1d
EP
7665 req->fid = cpu_to_le16(0xffff);
7666 resp = hwrm_req_hold(bp, req);
7667 rc = hwrm_req_send(bp, req);
c0c050c5
MC
7668 if (rc)
7669 goto hwrm_func_qcaps_exit;
7670
6a4f2947
MC
7671 flags = le32_to_cpu(resp->flags);
7672 if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED)
e4060d30 7673 bp->flags |= BNXT_FLAG_ROCEV1_CAP;
6a4f2947 7674 if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED)
e4060d30 7675 bp->flags |= BNXT_FLAG_ROCEV2_CAP;
55e4398d
VV
7676 if (flags & FUNC_QCAPS_RESP_FLAGS_PCIE_STATS_SUPPORTED)
7677 bp->fw_cap |= BNXT_FW_CAP_PCIE_STATS_SUPPORTED;
0a3f4e4f
VV
7678 if (flags & FUNC_QCAPS_RESP_FLAGS_HOT_RESET_CAPABLE)
7679 bp->fw_cap |= BNXT_FW_CAP_HOT_RESET;
6154532f
VV
7680 if (flags & FUNC_QCAPS_RESP_FLAGS_EXT_STATS_SUPPORTED)
7681 bp->fw_cap |= BNXT_FW_CAP_EXT_STATS_SUPPORTED;
07f83d72
MC
7682 if (flags & FUNC_QCAPS_RESP_FLAGS_ERROR_RECOVERY_CAPABLE)
7683 bp->fw_cap |= BNXT_FW_CAP_ERROR_RECOVERY;
4037eb71
VV
7684 if (flags & FUNC_QCAPS_RESP_FLAGS_ERR_RECOVER_RELOAD)
7685 bp->fw_cap |= BNXT_FW_CAP_ERR_RECOVER_RELOAD;
1da63ddd
EP
7686 if (!(flags & FUNC_QCAPS_RESP_FLAGS_VLAN_ACCELERATION_TX_DISABLED))
7687 bp->fw_cap |= BNXT_FW_CAP_VLAN_TX_INSERT;
80194db9
VV
7688 if (flags & FUNC_QCAPS_RESP_FLAGS_DBG_QCAPS_CMD_SUPPORTED)
7689 bp->fw_cap |= BNXT_FW_CAP_DBG_QCAPS;
1da63ddd
EP
7690
7691 flags_ext = le32_to_cpu(resp->flags_ext);
7692 if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_EXT_HW_STATS_SUPPORTED)
7693 bp->fw_cap |= BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED;
caf3eedb
PC
7694 if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_PTP_PPS_SUPPORTED))
7695 bp->fw_cap |= BNXT_FW_CAP_PTP_PPS;
24ac1ecd
PC
7696 if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_PTP_64BIT_RTC_SUPPORTED)
7697 bp->fw_cap |= BNXT_FW_CAP_PTP_RTC;
892a662f
EP
7698 if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_HOT_RESET_IF_SUPPORT))
7699 bp->fw_cap |= BNXT_FW_CAP_HOT_RESET_IF;
3c415339
EP
7700 if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_FW_LIVEPATCH_SUPPORTED))
7701 bp->fw_cap |= BNXT_FW_CAP_LIVEPATCH;
e4060d30 7702
66ed81dc
PC
7703 flags_ext2 = le32_to_cpu(resp->flags_ext2);
7704 if (flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_RX_ALL_PKTS_TIMESTAMPS_SUPPORTED)
7705 bp->fw_cap |= BNXT_FW_CAP_RX_ALL_PKT_TS;
7706
7cc5a20e 7707 bp->tx_push_thresh = 0;
fed7edd1
MC
7708 if ((flags & FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED) &&
7709 BNXT_FW_MAJ(bp) > 217)
7cc5a20e
MC
7710 bp->tx_push_thresh = BNXT_TX_PUSH_THRESH;
7711
6a4f2947
MC
7712 hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
7713 hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
7714 hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
7715 hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
7716 hw_resc->max_hw_ring_grps = le32_to_cpu(resp->max_hw_ring_grps);
7717 if (!hw_resc->max_hw_ring_grps)
7718 hw_resc->max_hw_ring_grps = hw_resc->max_tx_rings;
7719 hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
7720 hw_resc->max_vnics = le16_to_cpu(resp->max_vnics);
7721 hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
7722
c0c050c5
MC
7723 if (BNXT_PF(bp)) {
7724 struct bnxt_pf_info *pf = &bp->pf;
7725
7726 pf->fw_fid = le16_to_cpu(resp->fid);
7727 pf->port_id = le16_to_cpu(resp->port_id);
11f15ed3 7728 memcpy(pf->mac_addr, resp->mac_address, ETH_ALEN);
c0c050c5
MC
7729 pf->first_vf_id = le16_to_cpu(resp->first_vf_id);
7730 pf->max_vfs = le16_to_cpu(resp->max_vfs);
7731 pf->max_encap_records = le32_to_cpu(resp->max_encap_records);
7732 pf->max_decap_records = le32_to_cpu(resp->max_decap_records);
7733 pf->max_tx_em_flows = le32_to_cpu(resp->max_tx_em_flows);
7734 pf->max_tx_wm_flows = le32_to_cpu(resp->max_tx_wm_flows);
7735 pf->max_rx_em_flows = le32_to_cpu(resp->max_rx_em_flows);
7736 pf->max_rx_wm_flows = le32_to_cpu(resp->max_rx_wm_flows);
ba642ab7 7737 bp->flags &= ~BNXT_FLAG_WOL_CAP;
6a4f2947 7738 if (flags & FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED)
c1ef146a 7739 bp->flags |= BNXT_FLAG_WOL_CAP;
de5bf194 7740 if (flags & FUNC_QCAPS_RESP_FLAGS_PTP_SUPPORTED) {
edc52873 7741 bp->fw_cap |= BNXT_FW_CAP_PTP;
de5bf194 7742 } else {
a521c8a0 7743 bnxt_ptp_clear(bp);
de5bf194
MC
7744 kfree(bp->ptp_cfg);
7745 bp->ptp_cfg = NULL;
7746 }
c0c050c5 7747 } else {
379a80a1 7748#ifdef CONFIG_BNXT_SRIOV
c0c050c5
MC
7749 struct bnxt_vf_info *vf = &bp->vf;
7750
7751 vf->fw_fid = le16_to_cpu(resp->fid);
7cc5a20e 7752 memcpy(vf->mac_addr, resp->mac_address, ETH_ALEN);
379a80a1 7753#endif
c0c050c5
MC
7754 }
7755
c0c050c5 7756hwrm_func_qcaps_exit:
bbf33d1d 7757 hwrm_req_drop(bp, req);
c0c050c5
MC
7758 return rc;
7759}
7760
80194db9
VV
7761static void bnxt_hwrm_dbg_qcaps(struct bnxt *bp)
7762{
7763 struct hwrm_dbg_qcaps_output *resp;
7764 struct hwrm_dbg_qcaps_input *req;
7765 int rc;
7766
7767 bp->fw_dbg_cap = 0;
7768 if (!(bp->fw_cap & BNXT_FW_CAP_DBG_QCAPS))
7769 return;
7770
7771 rc = hwrm_req_init(bp, req, HWRM_DBG_QCAPS);
7772 if (rc)
7773 return;
7774
7775 req->fid = cpu_to_le16(0xffff);
7776 resp = hwrm_req_hold(bp, req);
7777 rc = hwrm_req_send(bp, req);
7778 if (rc)
7779 goto hwrm_dbg_qcaps_exit;
7780
7781 bp->fw_dbg_cap = le32_to_cpu(resp->flags);
7782
7783hwrm_dbg_qcaps_exit:
7784 hwrm_req_drop(bp, req);
7785}
7786
804fba4e
MC
7787static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp);
7788
c5b744d3 7789int bnxt_hwrm_func_qcaps(struct bnxt *bp)
be0dd9c4
MC
7790{
7791 int rc;
7792
7793 rc = __bnxt_hwrm_func_qcaps(bp);
7794 if (rc)
7795 return rc;
80194db9
VV
7796
7797 bnxt_hwrm_dbg_qcaps(bp);
7798
804fba4e
MC
7799 rc = bnxt_hwrm_queue_qportcfg(bp);
7800 if (rc) {
7801 netdev_err(bp->dev, "hwrm query qportcfg failure rc: %d\n", rc);
7802 return rc;
7803 }
be0dd9c4 7804 if (bp->hwrm_spec_code >= 0x10803) {
98f04cf0
MC
7805 rc = bnxt_alloc_ctx_mem(bp);
7806 if (rc)
7807 return rc;
db4723b3 7808 rc = bnxt_hwrm_func_resc_qcaps(bp, true);
be0dd9c4 7809 if (!rc)
97381a18 7810 bp->fw_cap |= BNXT_FW_CAP_NEW_RM;
be0dd9c4
MC
7811 }
7812 return 0;
7813}
7814
e969ae5b
MC
7815static int bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(struct bnxt *bp)
7816{
e969ae5b 7817 struct hwrm_cfa_adv_flow_mgnt_qcaps_output *resp;
bbf33d1d 7818 struct hwrm_cfa_adv_flow_mgnt_qcaps_input *req;
e969ae5b 7819 u32 flags;
bbf33d1d 7820 int rc;
e969ae5b
MC
7821
7822 if (!(bp->fw_cap & BNXT_FW_CAP_CFA_ADV_FLOW))
7823 return 0;
7824
bbf33d1d
EP
7825 rc = hwrm_req_init(bp, req, HWRM_CFA_ADV_FLOW_MGNT_QCAPS);
7826 if (rc)
7827 return rc;
e969ae5b 7828
bbf33d1d
EP
7829 resp = hwrm_req_hold(bp, req);
7830 rc = hwrm_req_send(bp, req);
e969ae5b
MC
7831 if (rc)
7832 goto hwrm_cfa_adv_qcaps_exit;
7833
7834 flags = le32_to_cpu(resp->flags);
7835 if (flags &
41136ab3
MC
7836 CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_V2_SUPPORTED)
7837 bp->fw_cap |= BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2;
e969ae5b
MC
7838
7839hwrm_cfa_adv_qcaps_exit:
bbf33d1d 7840 hwrm_req_drop(bp, req);
e969ae5b
MC
7841 return rc;
7842}
7843
3e9ec2bb
EP
7844static int __bnxt_alloc_fw_health(struct bnxt *bp)
7845{
7846 if (bp->fw_health)
7847 return 0;
7848
7849 bp->fw_health = kzalloc(sizeof(*bp->fw_health), GFP_KERNEL);
7850 if (!bp->fw_health)
7851 return -ENOMEM;
7852
8cc95ceb 7853 mutex_init(&bp->fw_health->lock);
3e9ec2bb
EP
7854 return 0;
7855}
7856
7857static int bnxt_alloc_fw_health(struct bnxt *bp)
7858{
7859 int rc;
7860
7861 if (!(bp->fw_cap & BNXT_FW_CAP_HOT_RESET) &&
7862 !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
7863 return 0;
7864
7865 rc = __bnxt_alloc_fw_health(bp);
7866 if (rc) {
7867 bp->fw_cap &= ~BNXT_FW_CAP_HOT_RESET;
7868 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
7869 return rc;
7870 }
7871
7872 return 0;
7873}
7874
ba02629f
EP
7875static void __bnxt_map_fw_health_reg(struct bnxt *bp, u32 reg)
7876{
7877 writel(reg & BNXT_GRC_BASE_MASK, bp->bar0 +
7878 BNXT_GRCPF_REG_WINDOW_BASE_OUT +
7879 BNXT_FW_HEALTH_WIN_MAP_OFF);
7880}
7881
43a440c4
MC
7882static void bnxt_inv_fw_health_reg(struct bnxt *bp)
7883{
7884 struct bnxt_fw_health *fw_health = bp->fw_health;
7885 u32 reg_type;
7886
8cc95ceb 7887 if (!fw_health)
43a440c4
MC
7888 return;
7889
7890 reg_type = BNXT_FW_HEALTH_REG_TYPE(fw_health->regs[BNXT_FW_HEALTH_REG]);
7891 if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC)
7892 fw_health->status_reliable = false;
8cc95ceb
EP
7893
7894 reg_type = BNXT_FW_HEALTH_REG_TYPE(fw_health->regs[BNXT_FW_RESET_CNT_REG]);
7895 if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC)
7896 fw_health->resets_reliable = false;
43a440c4
MC
7897}
7898
ba02629f
EP
7899static void bnxt_try_map_fw_health_reg(struct bnxt *bp)
7900{
7901 void __iomem *hs;
7902 u32 status_loc;
7903 u32 reg_type;
7904 u32 sig;
7905
43a440c4
MC
7906 if (bp->fw_health)
7907 bp->fw_health->status_reliable = false;
7908
ba02629f
EP
7909 __bnxt_map_fw_health_reg(bp, HCOMM_STATUS_STRUCT_LOC);
7910 hs = bp->bar0 + BNXT_FW_HEALTH_WIN_OFF(HCOMM_STATUS_STRUCT_LOC);
7911
7912 sig = readl(hs + offsetof(struct hcomm_status, sig_ver));
7913 if ((sig & HCOMM_STATUS_SIGNATURE_MASK) != HCOMM_STATUS_SIGNATURE_VAL) {
d1cbd165
MC
7914 if (!bp->chip_num) {
7915 __bnxt_map_fw_health_reg(bp, BNXT_GRC_REG_BASE);
7916 bp->chip_num = readl(bp->bar0 +
7917 BNXT_FW_HEALTH_WIN_BASE +
7918 BNXT_GRC_REG_CHIP_NUM);
7919 }
43a440c4 7920 if (!BNXT_CHIP_P5(bp))
d1cbd165 7921 return;
43a440c4 7922
d1cbd165
MC
7923 status_loc = BNXT_GRC_REG_STATUS_P5 |
7924 BNXT_FW_HEALTH_REG_TYPE_BAR0;
7925 } else {
7926 status_loc = readl(hs + offsetof(struct hcomm_status,
7927 fw_status_loc));
ba02629f
EP
7928 }
7929
7930 if (__bnxt_alloc_fw_health(bp)) {
7931 netdev_warn(bp->dev, "no memory for firmware status checks\n");
7932 return;
7933 }
7934
ba02629f
EP
7935 bp->fw_health->regs[BNXT_FW_HEALTH_REG] = status_loc;
7936 reg_type = BNXT_FW_HEALTH_REG_TYPE(status_loc);
7937 if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC) {
7938 __bnxt_map_fw_health_reg(bp, status_loc);
7939 bp->fw_health->mapped_regs[BNXT_FW_HEALTH_REG] =
7940 BNXT_FW_HEALTH_WIN_OFF(status_loc);
7941 }
7942
7943 bp->fw_health->status_reliable = true;
7944}
7945
9ffbd677
MC
7946static int bnxt_map_fw_health_regs(struct bnxt *bp)
7947{
7948 struct bnxt_fw_health *fw_health = bp->fw_health;
7949 u32 reg_base = 0xffffffff;
7950 int i;
7951
43a440c4 7952 bp->fw_health->status_reliable = false;
8cc95ceb 7953 bp->fw_health->resets_reliable = false;
9ffbd677
MC
7954 /* Only pre-map the monitoring GRC registers using window 3 */
7955 for (i = 0; i < 4; i++) {
7956 u32 reg = fw_health->regs[i];
7957
7958 if (BNXT_FW_HEALTH_REG_TYPE(reg) != BNXT_FW_HEALTH_REG_TYPE_GRC)
7959 continue;
7960 if (reg_base == 0xffffffff)
7961 reg_base = reg & BNXT_GRC_BASE_MASK;
7962 if ((reg & BNXT_GRC_BASE_MASK) != reg_base)
7963 return -ERANGE;
ba02629f 7964 fw_health->mapped_regs[i] = BNXT_FW_HEALTH_WIN_OFF(reg);
9ffbd677 7965 }
43a440c4 7966 bp->fw_health->status_reliable = true;
8cc95ceb 7967 bp->fw_health->resets_reliable = true;
9ffbd677
MC
7968 if (reg_base == 0xffffffff)
7969 return 0;
7970
ba02629f 7971 __bnxt_map_fw_health_reg(bp, reg_base);
9ffbd677
MC
7972 return 0;
7973}
7974
0e0e3c53
KA
7975static void bnxt_remap_fw_health_regs(struct bnxt *bp)
7976{
7977 if (!bp->fw_health)
7978 return;
7979
7980 if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) {
7981 bp->fw_health->status_reliable = true;
7982 bp->fw_health->resets_reliable = true;
7983 } else {
7984 bnxt_try_map_fw_health_reg(bp);
7985 }
7986}
7987
07f83d72
MC
7988static int bnxt_hwrm_error_recovery_qcfg(struct bnxt *bp)
7989{
07f83d72 7990 struct bnxt_fw_health *fw_health = bp->fw_health;
bbf33d1d
EP
7991 struct hwrm_error_recovery_qcfg_output *resp;
7992 struct hwrm_error_recovery_qcfg_input *req;
07f83d72
MC
7993 int rc, i;
7994
7995 if (!(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
7996 return 0;
7997
bbf33d1d
EP
7998 rc = hwrm_req_init(bp, req, HWRM_ERROR_RECOVERY_QCFG);
7999 if (rc)
8000 return rc;
8001
8002 resp = hwrm_req_hold(bp, req);
8003 rc = hwrm_req_send(bp, req);
07f83d72
MC
8004 if (rc)
8005 goto err_recovery_out;
07f83d72
MC
8006 fw_health->flags = le32_to_cpu(resp->flags);
8007 if ((fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) &&
8008 !(bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL)) {
8009 rc = -EINVAL;
8010 goto err_recovery_out;
8011 }
8012 fw_health->polling_dsecs = le32_to_cpu(resp->driver_polling_freq);
8013 fw_health->master_func_wait_dsecs =
8014 le32_to_cpu(resp->master_func_wait_period);
8015 fw_health->normal_func_wait_dsecs =
8016 le32_to_cpu(resp->normal_func_wait_period);
8017 fw_health->post_reset_wait_dsecs =
8018 le32_to_cpu(resp->master_func_wait_period_after_reset);
8019 fw_health->post_reset_max_wait_dsecs =
8020 le32_to_cpu(resp->max_bailout_time_after_reset);
8021 fw_health->regs[BNXT_FW_HEALTH_REG] =
8022 le32_to_cpu(resp->fw_health_status_reg);
8023 fw_health->regs[BNXT_FW_HEARTBEAT_REG] =
8024 le32_to_cpu(resp->fw_heartbeat_reg);
8025 fw_health->regs[BNXT_FW_RESET_CNT_REG] =
8026 le32_to_cpu(resp->fw_reset_cnt_reg);
8027 fw_health->regs[BNXT_FW_RESET_INPROG_REG] =
8028 le32_to_cpu(resp->reset_inprogress_reg);
8029 fw_health->fw_reset_inprog_reg_mask =
8030 le32_to_cpu(resp->reset_inprogress_reg_mask);
8031 fw_health->fw_reset_seq_cnt = resp->reg_array_cnt;
8032 if (fw_health->fw_reset_seq_cnt >= 16) {
8033 rc = -EINVAL;
8034 goto err_recovery_out;
8035 }
8036 for (i = 0; i < fw_health->fw_reset_seq_cnt; i++) {
8037 fw_health->fw_reset_seq_regs[i] =
8038 le32_to_cpu(resp->reset_reg[i]);
8039 fw_health->fw_reset_seq_vals[i] =
8040 le32_to_cpu(resp->reset_reg_val[i]);
8041 fw_health->fw_reset_seq_delay_msec[i] =
8042 resp->delay_after_reset[i];
8043 }
8044err_recovery_out:
bbf33d1d 8045 hwrm_req_drop(bp, req);
9ffbd677
MC
8046 if (!rc)
8047 rc = bnxt_map_fw_health_regs(bp);
07f83d72
MC
8048 if (rc)
8049 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
8050 return rc;
8051}
8052
c0c050c5
MC
8053static int bnxt_hwrm_func_reset(struct bnxt *bp)
8054{
bbf33d1d
EP
8055 struct hwrm_func_reset_input *req;
8056 int rc;
c0c050c5 8057
bbf33d1d
EP
8058 rc = hwrm_req_init(bp, req, HWRM_FUNC_RESET);
8059 if (rc)
8060 return rc;
c0c050c5 8061
bbf33d1d
EP
8062 req->enables = 0;
8063 hwrm_req_timeout(bp, req, HWRM_RESET_TIMEOUT);
8064 return hwrm_req_send(bp, req);
c0c050c5
MC
8065}
8066
4933f675
VV
8067static void bnxt_nvm_cfg_ver_get(struct bnxt *bp)
8068{
8069 struct hwrm_nvm_get_dev_info_output nvm_info;
8070
8071 if (!bnxt_hwrm_nvm_get_dev_info(bp, &nvm_info))
8072 snprintf(bp->nvm_cfg_ver, FW_VER_STR_LEN, "%d.%d.%d",
8073 nvm_info.nvm_cfg_ver_maj, nvm_info.nvm_cfg_ver_min,
8074 nvm_info.nvm_cfg_ver_upd);
8075}
8076
c0c050c5
MC
8077static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
8078{
bbf33d1d
EP
8079 struct hwrm_queue_qportcfg_output *resp;
8080 struct hwrm_queue_qportcfg_input *req;
aabfc016
MC
8081 u8 i, j, *qptr;
8082 bool no_rdma;
bbf33d1d 8083 int rc = 0;
c0c050c5 8084
bbf33d1d
EP
8085 rc = hwrm_req_init(bp, req, HWRM_QUEUE_QPORTCFG);
8086 if (rc)
8087 return rc;
c0c050c5 8088
bbf33d1d
EP
8089 resp = hwrm_req_hold(bp, req);
8090 rc = hwrm_req_send(bp, req);
c0c050c5
MC
8091 if (rc)
8092 goto qportcfg_exit;
8093
8094 if (!resp->max_configurable_queues) {
8095 rc = -EINVAL;
8096 goto qportcfg_exit;
8097 }
8098 bp->max_tc = resp->max_configurable_queues;
87c374de 8099 bp->max_lltc = resp->max_configurable_lossless_queues;
c0c050c5
MC
8100 if (bp->max_tc > BNXT_MAX_QUEUE)
8101 bp->max_tc = BNXT_MAX_QUEUE;
8102
aabfc016
MC
8103 no_rdma = !(bp->flags & BNXT_FLAG_ROCE_CAP);
8104 qptr = &resp->queue_id0;
8105 for (i = 0, j = 0; i < bp->max_tc; i++) {
98f04cf0
MC
8106 bp->q_info[j].queue_id = *qptr;
8107 bp->q_ids[i] = *qptr++;
aabfc016
MC
8108 bp->q_info[j].queue_profile = *qptr++;
8109 bp->tc_to_qidx[j] = j;
8110 if (!BNXT_CNPQ(bp->q_info[j].queue_profile) ||
8111 (no_rdma && BNXT_PF(bp)))
8112 j++;
8113 }
98f04cf0 8114 bp->max_q = bp->max_tc;
aabfc016
MC
8115 bp->max_tc = max_t(u8, j, 1);
8116
441cabbb
MC
8117 if (resp->queue_cfg_info & QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG)
8118 bp->max_tc = 1;
8119
87c374de
MC
8120 if (bp->max_lltc > bp->max_tc)
8121 bp->max_lltc = bp->max_tc;
8122
c0c050c5 8123qportcfg_exit:
bbf33d1d 8124 hwrm_req_drop(bp, req);
c0c050c5
MC
8125 return rc;
8126}
8127
7b370ad7 8128static int bnxt_hwrm_poll(struct bnxt *bp)
c0c050c5 8129{
bbf33d1d 8130 struct hwrm_ver_get_input *req;
ba642ab7 8131 int rc;
c0c050c5 8132
bbf33d1d
EP
8133 rc = hwrm_req_init(bp, req, HWRM_VER_GET);
8134 if (rc)
8135 return rc;
ba642ab7 8136
bbf33d1d
EP
8137 req->hwrm_intf_maj = HWRM_VERSION_MAJOR;
8138 req->hwrm_intf_min = HWRM_VERSION_MINOR;
8139 req->hwrm_intf_upd = HWRM_VERSION_UPDATE;
8140
8141 hwrm_req_flags(bp, req, BNXT_HWRM_CTX_SILENT | BNXT_HWRM_FULL_WAIT);
8142 rc = hwrm_req_send(bp, req);
ba642ab7
MC
8143 return rc;
8144}
8145
8146static int bnxt_hwrm_ver_get(struct bnxt *bp)
8147{
bbf33d1d
EP
8148 struct hwrm_ver_get_output *resp;
8149 struct hwrm_ver_get_input *req;
d0ad2ea2 8150 u16 fw_maj, fw_min, fw_bld, fw_rsv;
b7a444f0 8151 u32 dev_caps_cfg, hwrm_ver;
d0ad2ea2 8152 int rc, len;
ba642ab7 8153
bbf33d1d
EP
8154 rc = hwrm_req_init(bp, req, HWRM_VER_GET);
8155 if (rc)
8156 return rc;
8157
8158 hwrm_req_flags(bp, req, BNXT_HWRM_FULL_WAIT);
ba642ab7 8159 bp->hwrm_max_req_len = HWRM_MAX_REQ_LEN;
bbf33d1d
EP
8160 req->hwrm_intf_maj = HWRM_VERSION_MAJOR;
8161 req->hwrm_intf_min = HWRM_VERSION_MINOR;
8162 req->hwrm_intf_upd = HWRM_VERSION_UPDATE;
7b370ad7 8163
bbf33d1d
EP
8164 resp = hwrm_req_hold(bp, req);
8165 rc = hwrm_req_send(bp, req);
c0c050c5
MC
8166 if (rc)
8167 goto hwrm_ver_get_exit;
8168
8169 memcpy(&bp->ver_resp, resp, sizeof(struct hwrm_ver_get_output));
8170
894aa69a
MC
8171 bp->hwrm_spec_code = resp->hwrm_intf_maj_8b << 16 |
8172 resp->hwrm_intf_min_8b << 8 |
8173 resp->hwrm_intf_upd_8b;
8174 if (resp->hwrm_intf_maj_8b < 1) {
c193554e 8175 netdev_warn(bp->dev, "HWRM interface %d.%d.%d is older than 1.0.0.\n",
894aa69a
MC
8176 resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
8177 resp->hwrm_intf_upd_8b);
c193554e 8178 netdev_warn(bp->dev, "Please update firmware with HWRM interface 1.0.0 or newer.\n");
c0c050c5 8179 }
b7a444f0
VV
8180
8181 hwrm_ver = HWRM_VERSION_MAJOR << 16 | HWRM_VERSION_MINOR << 8 |
8182 HWRM_VERSION_UPDATE;
8183
8184 if (bp->hwrm_spec_code > hwrm_ver)
8185 snprintf(bp->hwrm_ver_supp, FW_VER_STR_LEN, "%d.%d.%d",
8186 HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR,
8187 HWRM_VERSION_UPDATE);
8188 else
8189 snprintf(bp->hwrm_ver_supp, FW_VER_STR_LEN, "%d.%d.%d",
8190 resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
8191 resp->hwrm_intf_upd_8b);
8192
d0ad2ea2
MC
8193 fw_maj = le16_to_cpu(resp->hwrm_fw_major);
8194 if (bp->hwrm_spec_code > 0x10803 && fw_maj) {
8195 fw_min = le16_to_cpu(resp->hwrm_fw_minor);
8196 fw_bld = le16_to_cpu(resp->hwrm_fw_build);
8197 fw_rsv = le16_to_cpu(resp->hwrm_fw_patch);
8198 len = FW_VER_STR_LEN;
8199 } else {
8200 fw_maj = resp->hwrm_fw_maj_8b;
8201 fw_min = resp->hwrm_fw_min_8b;
8202 fw_bld = resp->hwrm_fw_bld_8b;
8203 fw_rsv = resp->hwrm_fw_rsvd_8b;
8204 len = BC_HWRM_STR_LEN;
8205 }
8206 bp->fw_ver_code = BNXT_FW_VER_CODE(fw_maj, fw_min, fw_bld, fw_rsv);
8207 snprintf(bp->fw_ver_str, len, "%d.%d.%d.%d", fw_maj, fw_min, fw_bld,
8208 fw_rsv);
c0c050c5 8209
691aa620
VV
8210 if (strlen(resp->active_pkg_name)) {
8211 int fw_ver_len = strlen(bp->fw_ver_str);
8212
8213 snprintf(bp->fw_ver_str + fw_ver_len,
8214 FW_VER_STR_LEN - fw_ver_len - 1, "/pkg %s",
8215 resp->active_pkg_name);
8216 bp->fw_cap |= BNXT_FW_CAP_PKG_VER;
8217 }
8218
ff4fe81d
MC
8219 bp->hwrm_cmd_timeout = le16_to_cpu(resp->def_req_timeout);
8220 if (!bp->hwrm_cmd_timeout)
8221 bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
bce9a0b7
EP
8222 bp->hwrm_cmd_max_timeout = le16_to_cpu(resp->max_req_timeout) * 1000;
8223 if (!bp->hwrm_cmd_max_timeout)
8224 bp->hwrm_cmd_max_timeout = HWRM_CMD_MAX_TIMEOUT;
8225 else if (bp->hwrm_cmd_max_timeout > HWRM_CMD_MAX_TIMEOUT)
8226 netdev_warn(bp->dev, "Device requests max timeout of %d seconds, may trigger hung task watchdog\n",
8227 bp->hwrm_cmd_max_timeout / 1000);
ff4fe81d 8228
1dfddc41 8229 if (resp->hwrm_intf_maj_8b >= 1) {
e6ef2699 8230 bp->hwrm_max_req_len = le16_to_cpu(resp->max_req_win_len);
1dfddc41
MC
8231 bp->hwrm_max_ext_req_len = le16_to_cpu(resp->max_ext_req_len);
8232 }
8233 if (bp->hwrm_max_ext_req_len < HWRM_MAX_REQ_LEN)
8234 bp->hwrm_max_ext_req_len = HWRM_MAX_REQ_LEN;
e6ef2699 8235
659c805c 8236 bp->chip_num = le16_to_cpu(resp->chip_num);
5313845f 8237 bp->chip_rev = resp->chip_rev;
3e8060fa
PS
8238 if (bp->chip_num == CHIP_NUM_58700 && !resp->chip_rev &&
8239 !resp->chip_metal)
8240 bp->flags |= BNXT_FLAG_CHIP_NITRO_A0;
659c805c 8241
e605db80
DK
8242 dev_caps_cfg = le32_to_cpu(resp->dev_caps_cfg);
8243 if ((dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
8244 (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_REQUIRED))
97381a18 8245 bp->fw_cap |= BNXT_FW_CAP_SHORT_CMD;
e605db80 8246
760b6d33
VD
8247 if (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_KONG_MB_CHNL_SUPPORTED)
8248 bp->fw_cap |= BNXT_FW_CAP_KONG_MB_CHNL;
8249
abd43a13
VD
8250 if (dev_caps_cfg &
8251 VER_GET_RESP_DEV_CAPS_CFG_FLOW_HANDLE_64BIT_SUPPORTED)
8252 bp->fw_cap |= BNXT_FW_CAP_OVS_64BIT_HANDLE;
8253
2a516444
MC
8254 if (dev_caps_cfg &
8255 VER_GET_RESP_DEV_CAPS_CFG_TRUSTED_VF_SUPPORTED)
8256 bp->fw_cap |= BNXT_FW_CAP_TRUSTED_VF;
8257
e969ae5b
MC
8258 if (dev_caps_cfg &
8259 VER_GET_RESP_DEV_CAPS_CFG_CFA_ADV_FLOW_MGNT_SUPPORTED)
8260 bp->fw_cap |= BNXT_FW_CAP_CFA_ADV_FLOW;
8261
c0c050c5 8262hwrm_ver_get_exit:
bbf33d1d 8263 hwrm_req_drop(bp, req);
c0c050c5
MC
8264 return rc;
8265}
8266
5ac67d8b
RS
8267int bnxt_hwrm_fw_set_time(struct bnxt *bp)
8268{
bbf33d1d 8269 struct hwrm_fw_set_time_input *req;
7dfaa7bc
AB
8270 struct tm tm;
8271 time64_t now = ktime_get_real_seconds();
bbf33d1d 8272 int rc;
5ac67d8b 8273
ca2c39e2
MC
8274 if ((BNXT_VF(bp) && bp->hwrm_spec_code < 0x10901) ||
8275 bp->hwrm_spec_code < 0x10400)
5ac67d8b
RS
8276 return -EOPNOTSUPP;
8277
7dfaa7bc 8278 time64_to_tm(now, 0, &tm);
bbf33d1d
EP
8279 rc = hwrm_req_init(bp, req, HWRM_FW_SET_TIME);
8280 if (rc)
8281 return rc;
8282
8283 req->year = cpu_to_le16(1900 + tm.tm_year);
8284 req->month = 1 + tm.tm_mon;
8285 req->day = tm.tm_mday;
8286 req->hour = tm.tm_hour;
8287 req->minute = tm.tm_min;
8288 req->second = tm.tm_sec;
8289 return hwrm_req_send(bp, req);
5ac67d8b
RS
8290}
8291
fea6b333
MC
8292static void bnxt_add_one_ctr(u64 hw, u64 *sw, u64 mask)
8293{
8294 u64 sw_tmp;
8295
fa97f303 8296 hw &= mask;
fea6b333
MC
8297 sw_tmp = (*sw & ~mask) | hw;
8298 if (hw < (*sw & mask))
8299 sw_tmp += mask + 1;
8300 WRITE_ONCE(*sw, sw_tmp);
8301}
8302
8303static void __bnxt_accumulate_stats(__le64 *hw_stats, u64 *sw_stats, u64 *masks,
8304 int count, bool ignore_zero)
8305{
8306 int i;
8307
8308 for (i = 0; i < count; i++) {
8309 u64 hw = le64_to_cpu(READ_ONCE(hw_stats[i]));
8310
8311 if (ignore_zero && !hw)
8312 continue;
8313
8314 if (masks[i] == -1ULL)
8315 sw_stats[i] = hw;
8316 else
8317 bnxt_add_one_ctr(hw, &sw_stats[i], masks[i]);
8318 }
8319}
8320
8321static void bnxt_accumulate_stats(struct bnxt_stats_mem *stats)
8322{
8323 if (!stats->hw_stats)
8324 return;
8325
8326 __bnxt_accumulate_stats(stats->hw_stats, stats->sw_stats,
8327 stats->hw_masks, stats->len / 8, false);
8328}
8329
8330static void bnxt_accumulate_all_stats(struct bnxt *bp)
8331{
8332 struct bnxt_stats_mem *ring0_stats;
8333 bool ignore_zero = false;
8334 int i;
8335
8336 /* Chip bug. Counter intermittently becomes 0. */
8337 if (bp->flags & BNXT_FLAG_CHIP_P5)
8338 ignore_zero = true;
8339
8340 for (i = 0; i < bp->cp_nr_rings; i++) {
8341 struct bnxt_napi *bnapi = bp->bnapi[i];
8342 struct bnxt_cp_ring_info *cpr;
8343 struct bnxt_stats_mem *stats;
8344
8345 cpr = &bnapi->cp_ring;
8346 stats = &cpr->stats;
8347 if (!i)
8348 ring0_stats = stats;
8349 __bnxt_accumulate_stats(stats->hw_stats, stats->sw_stats,
8350 ring0_stats->hw_masks,
8351 ring0_stats->len / 8, ignore_zero);
8352 }
8353 if (bp->flags & BNXT_FLAG_PORT_STATS) {
8354 struct bnxt_stats_mem *stats = &bp->port_stats;
8355 __le64 *hw_stats = stats->hw_stats;
8356 u64 *sw_stats = stats->sw_stats;
8357 u64 *masks = stats->hw_masks;
8358 int cnt;
8359
8360 cnt = sizeof(struct rx_port_stats) / 8;
8361 __bnxt_accumulate_stats(hw_stats, sw_stats, masks, cnt, false);
8362
8363 hw_stats += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
8364 sw_stats += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
8365 masks += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
8366 cnt = sizeof(struct tx_port_stats) / 8;
8367 __bnxt_accumulate_stats(hw_stats, sw_stats, masks, cnt, false);
8368 }
8369 if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
8370 bnxt_accumulate_stats(&bp->rx_port_stats_ext);
8371 bnxt_accumulate_stats(&bp->tx_port_stats_ext);
8372 }
8373}
8374
531d1d26 8375static int bnxt_hwrm_port_qstats(struct bnxt *bp, u8 flags)
3bdf56c4 8376{
bbf33d1d 8377 struct hwrm_port_qstats_input *req;
3bdf56c4 8378 struct bnxt_pf_info *pf = &bp->pf;
bbf33d1d 8379 int rc;
3bdf56c4
MC
8380
8381 if (!(bp->flags & BNXT_FLAG_PORT_STATS))
8382 return 0;
8383
531d1d26
MC
8384 if (flags && !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED))
8385 return -EOPNOTSUPP;
8386
bbf33d1d
EP
8387 rc = hwrm_req_init(bp, req, HWRM_PORT_QSTATS);
8388 if (rc)
8389 return rc;
8390
8391 req->flags = flags;
8392 req->port_id = cpu_to_le16(pf->port_id);
8393 req->tx_stat_host_addr = cpu_to_le64(bp->port_stats.hw_stats_map +
177a6cde 8394 BNXT_TX_PORT_STATS_BYTE_OFFSET);
bbf33d1d
EP
8395 req->rx_stat_host_addr = cpu_to_le64(bp->port_stats.hw_stats_map);
8396 return hwrm_req_send(bp, req);
3bdf56c4
MC
8397}
8398
531d1d26 8399static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp, u8 flags)
00db3cba 8400{
bbf33d1d
EP
8401 struct hwrm_queue_pri2cos_qcfg_output *resp_qc;
8402 struct hwrm_queue_pri2cos_qcfg_input *req_qc;
8403 struct hwrm_port_qstats_ext_output *resp_qs;
8404 struct hwrm_port_qstats_ext_input *req_qs;
00db3cba 8405 struct bnxt_pf_info *pf = &bp->pf;
ad361adf 8406 u32 tx_stat_size;
36e53349 8407 int rc;
00db3cba
VV
8408
8409 if (!(bp->flags & BNXT_FLAG_PORT_STATS_EXT))
8410 return 0;
8411
531d1d26
MC
8412 if (flags && !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED))
8413 return -EOPNOTSUPP;
8414
bbf33d1d
EP
8415 rc = hwrm_req_init(bp, req_qs, HWRM_PORT_QSTATS_EXT);
8416 if (rc)
8417 return rc;
8418
8419 req_qs->flags = flags;
8420 req_qs->port_id = cpu_to_le16(pf->port_id);
8421 req_qs->rx_stat_size = cpu_to_le16(sizeof(struct rx_port_stats_ext));
8422 req_qs->rx_stat_host_addr = cpu_to_le64(bp->rx_port_stats_ext.hw_stats_map);
177a6cde
MC
8423 tx_stat_size = bp->tx_port_stats_ext.hw_stats ?
8424 sizeof(struct tx_port_stats_ext) : 0;
bbf33d1d
EP
8425 req_qs->tx_stat_size = cpu_to_le16(tx_stat_size);
8426 req_qs->tx_stat_host_addr = cpu_to_le64(bp->tx_port_stats_ext.hw_stats_map);
8427 resp_qs = hwrm_req_hold(bp, req_qs);
8428 rc = hwrm_req_send(bp, req_qs);
36e53349 8429 if (!rc) {
bbf33d1d
EP
8430 bp->fw_rx_stats_ext_size =
8431 le16_to_cpu(resp_qs->rx_stat_size) / 8;
21e70778
MC
8432 if (BNXT_FW_MAJ(bp) < 220 &&
8433 bp->fw_rx_stats_ext_size > BNXT_RX_STATS_EXT_NUM_LEGACY)
8434 bp->fw_rx_stats_ext_size = BNXT_RX_STATS_EXT_NUM_LEGACY;
8435
ad361adf 8436 bp->fw_tx_stats_ext_size = tx_stat_size ?
bbf33d1d 8437 le16_to_cpu(resp_qs->tx_stat_size) / 8 : 0;
36e53349
MC
8438 } else {
8439 bp->fw_rx_stats_ext_size = 0;
8440 bp->fw_tx_stats_ext_size = 0;
8441 }
bbf33d1d
EP
8442 hwrm_req_drop(bp, req_qs);
8443
531d1d26 8444 if (flags)
bbf33d1d 8445 return rc;
531d1d26 8446
e37fed79
MC
8447 if (bp->fw_tx_stats_ext_size <=
8448 offsetof(struct tx_port_stats_ext, pfc_pri0_tx_duration_us) / 8) {
e37fed79
MC
8449 bp->pri2cos_valid = 0;
8450 return rc;
8451 }
8452
bbf33d1d
EP
8453 rc = hwrm_req_init(bp, req_qc, HWRM_QUEUE_PRI2COS_QCFG);
8454 if (rc)
8455 return rc;
e37fed79 8456
bbf33d1d
EP
8457 req_qc->flags = cpu_to_le32(QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN);
8458
8459 resp_qc = hwrm_req_hold(bp, req_qc);
8460 rc = hwrm_req_send(bp, req_qc);
e37fed79 8461 if (!rc) {
e37fed79
MC
8462 u8 *pri2cos;
8463 int i, j;
8464
bbf33d1d 8465 pri2cos = &resp_qc->pri0_cos_queue_id;
e37fed79
MC
8466 for (i = 0; i < 8; i++) {
8467 u8 queue_id = pri2cos[i];
a24ec322 8468 u8 queue_idx;
e37fed79 8469
a24ec322
MC
8470 /* Per port queue IDs start from 0, 10, 20, etc */
8471 queue_idx = queue_id % 10;
8472 if (queue_idx > BNXT_MAX_QUEUE) {
8473 bp->pri2cos_valid = false;
bbf33d1d
EP
8474 hwrm_req_drop(bp, req_qc);
8475 return rc;
a24ec322 8476 }
e37fed79
MC
8477 for (j = 0; j < bp->max_q; j++) {
8478 if (bp->q_ids[j] == queue_id)
a24ec322 8479 bp->pri2cos_idx[i] = queue_idx;
e37fed79
MC
8480 }
8481 }
bbf33d1d 8482 bp->pri2cos_valid = true;
e37fed79 8483 }
bbf33d1d
EP
8484 hwrm_req_drop(bp, req_qc);
8485
36e53349 8486 return rc;
00db3cba
VV
8487}
8488
c0c050c5
MC
8489static void bnxt_hwrm_free_tunnel_ports(struct bnxt *bp)
8490{
7ae9dc35
MC
8491 bnxt_hwrm_tunnel_dst_port_free(bp,
8492 TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
8493 bnxt_hwrm_tunnel_dst_port_free(bp,
8494 TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
c0c050c5
MC
8495}
8496
8497static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa)
8498{
8499 int rc, i;
8500 u32 tpa_flags = 0;
8501
8502 if (set_tpa)
8503 tpa_flags = bp->flags & BNXT_FLAG_TPA;
b340dc68 8504 else if (BNXT_NO_FW_ACCESS(bp))
b4fff207 8505 return 0;
c0c050c5
MC
8506 for (i = 0; i < bp->nr_vnics; i++) {
8507 rc = bnxt_hwrm_vnic_set_tpa(bp, i, tpa_flags);
8508 if (rc) {
8509 netdev_err(bp->dev, "hwrm vnic set tpa failure rc for vnic %d: %x\n",
23e12c89 8510 i, rc);
c0c050c5
MC
8511 return rc;
8512 }
8513 }
8514 return 0;
8515}
8516
8517static void bnxt_hwrm_clear_vnic_rss(struct bnxt *bp)
8518{
8519 int i;
8520
8521 for (i = 0; i < bp->nr_vnics; i++)
8522 bnxt_hwrm_vnic_set_rss(bp, i, false);
8523}
8524
a46ecb11 8525static void bnxt_clear_vnic(struct bnxt *bp)
c0c050c5 8526{
a46ecb11
MC
8527 if (!bp->vnic_info)
8528 return;
8529
8530 bnxt_hwrm_clear_vnic_filter(bp);
8531 if (!(bp->flags & BNXT_FLAG_CHIP_P5)) {
c0c050c5
MC
8532 /* clear all RSS setting before free vnic ctx */
8533 bnxt_hwrm_clear_vnic_rss(bp);
8534 bnxt_hwrm_vnic_ctx_free(bp);
c0c050c5 8535 }
a46ecb11
MC
8536 /* before free the vnic, undo the vnic tpa settings */
8537 if (bp->flags & BNXT_FLAG_TPA)
8538 bnxt_set_tpa(bp, false);
8539 bnxt_hwrm_vnic_free(bp);
8540 if (bp->flags & BNXT_FLAG_CHIP_P5)
8541 bnxt_hwrm_vnic_ctx_free(bp);
8542}
8543
8544static void bnxt_hwrm_resource_free(struct bnxt *bp, bool close_path,
8545 bool irq_re_init)
8546{
8547 bnxt_clear_vnic(bp);
c0c050c5
MC
8548 bnxt_hwrm_ring_free(bp, close_path);
8549 bnxt_hwrm_ring_grp_free(bp);
8550 if (irq_re_init) {
8551 bnxt_hwrm_stat_ctx_free(bp);
8552 bnxt_hwrm_free_tunnel_ports(bp);
8553 }
8554}
8555
39d8ba2e
MC
8556static int bnxt_hwrm_set_br_mode(struct bnxt *bp, u16 br_mode)
8557{
bbf33d1d
EP
8558 struct hwrm_func_cfg_input *req;
8559 u8 evb_mode;
8560 int rc;
39d8ba2e 8561
39d8ba2e 8562 if (br_mode == BRIDGE_MODE_VEB)
bbf33d1d 8563 evb_mode = FUNC_CFG_REQ_EVB_MODE_VEB;
39d8ba2e 8564 else if (br_mode == BRIDGE_MODE_VEPA)
bbf33d1d 8565 evb_mode = FUNC_CFG_REQ_EVB_MODE_VEPA;
39d8ba2e
MC
8566 else
8567 return -EINVAL;
bbf33d1d
EP
8568
8569 rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG);
8570 if (rc)
8571 return rc;
8572
8573 req->fid = cpu_to_le16(0xffff);
8574 req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_EVB_MODE);
8575 req->evb_mode = evb_mode;
8576 return hwrm_req_send(bp, req);
39d8ba2e
MC
8577}
8578
c3480a60
MC
8579static int bnxt_hwrm_set_cache_line_size(struct bnxt *bp, int size)
8580{
bbf33d1d
EP
8581 struct hwrm_func_cfg_input *req;
8582 int rc;
c3480a60
MC
8583
8584 if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10803)
8585 return 0;
8586
bbf33d1d
EP
8587 rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG);
8588 if (rc)
8589 return rc;
8590
8591 req->fid = cpu_to_le16(0xffff);
8592 req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_CACHE_LINESIZE);
8593 req->options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_64;
c3480a60 8594 if (size == 128)
bbf33d1d 8595 req->options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_128;
c3480a60 8596
bbf33d1d 8597 return hwrm_req_send(bp, req);
c3480a60
MC
8598}
8599
7b3af4f7 8600static int __bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
c0c050c5 8601{
ae10ae74 8602 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
c0c050c5
MC
8603 int rc;
8604
ae10ae74
MC
8605 if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG)
8606 goto skip_rss_ctx;
8607
c0c050c5 8608 /* allocate context for vnic */
94ce9caa 8609 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 0);
c0c050c5
MC
8610 if (rc) {
8611 netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
8612 vnic_id, rc);
8613 goto vnic_setup_err;
8614 }
8615 bp->rsscos_nr_ctxs++;
8616
94ce9caa
PS
8617 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
8618 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 1);
8619 if (rc) {
8620 netdev_err(bp->dev, "hwrm vnic %d cos ctx alloc failure rc: %x\n",
8621 vnic_id, rc);
8622 goto vnic_setup_err;
8623 }
8624 bp->rsscos_nr_ctxs++;
8625 }
8626
ae10ae74 8627skip_rss_ctx:
c0c050c5
MC
8628 /* configure default vnic, ring grp */
8629 rc = bnxt_hwrm_vnic_cfg(bp, vnic_id);
8630 if (rc) {
8631 netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
8632 vnic_id, rc);
8633 goto vnic_setup_err;
8634 }
8635
8636 /* Enable RSS hashing on vnic */
8637 rc = bnxt_hwrm_vnic_set_rss(bp, vnic_id, true);
8638 if (rc) {
8639 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %x\n",
8640 vnic_id, rc);
8641 goto vnic_setup_err;
8642 }
8643
8644 if (bp->flags & BNXT_FLAG_AGG_RINGS) {
8645 rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id);
8646 if (rc) {
8647 netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
8648 vnic_id, rc);
8649 }
8650 }
8651
8652vnic_setup_err:
8653 return rc;
8654}
8655
7b3af4f7
MC
8656static int __bnxt_setup_vnic_p5(struct bnxt *bp, u16 vnic_id)
8657{
8658 int rc, i, nr_ctxs;
8659
f9f6a3fb 8660 nr_ctxs = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings);
7b3af4f7
MC
8661 for (i = 0; i < nr_ctxs; i++) {
8662 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, i);
8663 if (rc) {
8664 netdev_err(bp->dev, "hwrm vnic %d ctx %d alloc failure rc: %x\n",
8665 vnic_id, i, rc);
8666 break;
8667 }
8668 bp->rsscos_nr_ctxs++;
8669 }
8670 if (i < nr_ctxs)
8671 return -ENOMEM;
8672
8673 rc = bnxt_hwrm_vnic_set_rss_p5(bp, vnic_id, true);
8674 if (rc) {
8675 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %d\n",
8676 vnic_id, rc);
8677 return rc;
8678 }
8679 rc = bnxt_hwrm_vnic_cfg(bp, vnic_id);
8680 if (rc) {
8681 netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
8682 vnic_id, rc);
8683 return rc;
8684 }
8685 if (bp->flags & BNXT_FLAG_AGG_RINGS) {
8686 rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id);
8687 if (rc) {
8688 netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
8689 vnic_id, rc);
8690 }
8691 }
8692 return rc;
8693}
8694
8695static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
8696{
8697 if (bp->flags & BNXT_FLAG_CHIP_P5)
8698 return __bnxt_setup_vnic_p5(bp, vnic_id);
8699 else
8700 return __bnxt_setup_vnic(bp, vnic_id);
8701}
8702
c0c050c5
MC
8703static int bnxt_alloc_rfs_vnics(struct bnxt *bp)
8704{
8705#ifdef CONFIG_RFS_ACCEL
8706 int i, rc = 0;
8707
9b3d15e6
MC
8708 if (bp->flags & BNXT_FLAG_CHIP_P5)
8709 return 0;
8710
c0c050c5 8711 for (i = 0; i < bp->rx_nr_rings; i++) {
ae10ae74 8712 struct bnxt_vnic_info *vnic;
c0c050c5
MC
8713 u16 vnic_id = i + 1;
8714 u16 ring_id = i;
8715
8716 if (vnic_id >= bp->nr_vnics)
8717 break;
8718
ae10ae74
MC
8719 vnic = &bp->vnic_info[vnic_id];
8720 vnic->flags |= BNXT_VNIC_RFS_FLAG;
8721 if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
8722 vnic->flags |= BNXT_VNIC_RFS_NEW_RSS_FLAG;
b81a90d3 8723 rc = bnxt_hwrm_vnic_alloc(bp, vnic_id, ring_id, 1);
c0c050c5
MC
8724 if (rc) {
8725 netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
8726 vnic_id, rc);
8727 break;
8728 }
8729 rc = bnxt_setup_vnic(bp, vnic_id);
8730 if (rc)
8731 break;
8732 }
8733 return rc;
8734#else
8735 return 0;
8736#endif
8737}
8738
dd85fc0a 8739/* Allow PF, trusted VFs and VFs with default VLAN to be in promiscuous mode */
17c71ac3
MC
8740static bool bnxt_promisc_ok(struct bnxt *bp)
8741{
8742#ifdef CONFIG_BNXT_SRIOV
dd85fc0a 8743 if (BNXT_VF(bp) && !bp->vf.vlan && !bnxt_is_trusted_vf(bp, &bp->vf))
17c71ac3
MC
8744 return false;
8745#endif
8746 return true;
8747}
8748
dc52c6c7
PS
8749static int bnxt_setup_nitroa0_vnic(struct bnxt *bp)
8750{
8751 unsigned int rc = 0;
8752
8753 rc = bnxt_hwrm_vnic_alloc(bp, 1, bp->rx_nr_rings - 1, 1);
8754 if (rc) {
8755 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
8756 rc);
8757 return rc;
8758 }
8759
8760 rc = bnxt_hwrm_vnic_cfg(bp, 1);
8761 if (rc) {
8762 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
8763 rc);
8764 return rc;
8765 }
8766 return rc;
8767}
8768
b664f008 8769static int bnxt_cfg_rx_mode(struct bnxt *);
7d2837dd 8770static bool bnxt_mc_list_updated(struct bnxt *, u32 *);
b664f008 8771
c0c050c5
MC
8772static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
8773{
7d2837dd 8774 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
c0c050c5 8775 int rc = 0;
76595193 8776 unsigned int rx_nr_rings = bp->rx_nr_rings;
c0c050c5
MC
8777
8778 if (irq_re_init) {
8779 rc = bnxt_hwrm_stat_ctx_alloc(bp);
8780 if (rc) {
8781 netdev_err(bp->dev, "hwrm stat ctx alloc failure rc: %x\n",
8782 rc);
8783 goto err_out;
8784 }
8785 }
8786
8787 rc = bnxt_hwrm_ring_alloc(bp);
8788 if (rc) {
8789 netdev_err(bp->dev, "hwrm ring alloc failure rc: %x\n", rc);
8790 goto err_out;
8791 }
8792
8793 rc = bnxt_hwrm_ring_grp_alloc(bp);
8794 if (rc) {
8795 netdev_err(bp->dev, "hwrm_ring_grp alloc failure: %x\n", rc);
8796 goto err_out;
8797 }
8798
76595193
PS
8799 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
8800 rx_nr_rings--;
8801
c0c050c5 8802 /* default vnic 0 */
76595193 8803 rc = bnxt_hwrm_vnic_alloc(bp, 0, 0, rx_nr_rings);
c0c050c5
MC
8804 if (rc) {
8805 netdev_err(bp->dev, "hwrm vnic alloc failure rc: %x\n", rc);
8806 goto err_out;
8807 }
8808
1a9e4f50
SK
8809 if (BNXT_VF(bp))
8810 bnxt_hwrm_func_qcfg(bp);
8811
c0c050c5
MC
8812 rc = bnxt_setup_vnic(bp, 0);
8813 if (rc)
8814 goto err_out;
98a4322b
EP
8815 if (bp->fw_cap & BNXT_FW_CAP_RSS_HASH_TYPE_DELTA)
8816 bnxt_hwrm_update_rss_hash_cfg(bp);
c0c050c5
MC
8817
8818 if (bp->flags & BNXT_FLAG_RFS) {
8819 rc = bnxt_alloc_rfs_vnics(bp);
8820 if (rc)
8821 goto err_out;
8822 }
8823
8824 if (bp->flags & BNXT_FLAG_TPA) {
8825 rc = bnxt_set_tpa(bp, true);
8826 if (rc)
8827 goto err_out;
8828 }
8829
8830 if (BNXT_VF(bp))
8831 bnxt_update_vf_mac(bp);
8832
8833 /* Filter for default vnic 0 */
8834 rc = bnxt_hwrm_set_vnic_filter(bp, 0, 0, bp->dev->dev_addr);
8835 if (rc) {
662c9b22
EP
8836 if (BNXT_VF(bp) && rc == -ENODEV)
8837 netdev_err(bp->dev, "Cannot configure L2 filter while PF is unavailable\n");
8838 else
8839 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc);
c0c050c5
MC
8840 goto err_out;
8841 }
7d2837dd 8842 vnic->uc_filter_count = 1;
c0c050c5 8843
30e33848 8844 vnic->rx_mask = 0;
cfcab3b3
MC
8845 if (test_bit(BNXT_STATE_HALF_OPEN, &bp->state))
8846 goto skip_rx_mask;
8847
30e33848
MC
8848 if (bp->dev->flags & IFF_BROADCAST)
8849 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
c0c050c5 8850
dd85fc0a 8851 if (bp->dev->flags & IFF_PROMISC)
7d2837dd
MC
8852 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
8853
8854 if (bp->dev->flags & IFF_ALLMULTI) {
8855 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
8856 vnic->mc_list_count = 0;
8cdb1592 8857 } else if (bp->dev->flags & IFF_MULTICAST) {
7d2837dd
MC
8858 u32 mask = 0;
8859
8860 bnxt_mc_list_updated(bp, &mask);
8861 vnic->rx_mask |= mask;
8862 }
c0c050c5 8863
b664f008
MC
8864 rc = bnxt_cfg_rx_mode(bp);
8865 if (rc)
c0c050c5 8866 goto err_out;
c0c050c5 8867
cfcab3b3 8868skip_rx_mask:
c0c050c5
MC
8869 rc = bnxt_hwrm_set_coal(bp);
8870 if (rc)
8871 netdev_warn(bp->dev, "HWRM set coalescing failure rc: %x\n",
dc52c6c7
PS
8872 rc);
8873
8874 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
8875 rc = bnxt_setup_nitroa0_vnic(bp);
8876 if (rc)
8877 netdev_err(bp->dev, "Special vnic setup failure for NS2 A0 rc: %x\n",
8878 rc);
8879 }
c0c050c5 8880
cf6645f8
MC
8881 if (BNXT_VF(bp)) {
8882 bnxt_hwrm_func_qcfg(bp);
8883 netdev_update_features(bp->dev);
8884 }
8885
c0c050c5
MC
8886 return 0;
8887
8888err_out:
8889 bnxt_hwrm_resource_free(bp, 0, true);
8890
8891 return rc;
8892}
8893
8894static int bnxt_shutdown_nic(struct bnxt *bp, bool irq_re_init)
8895{
8896 bnxt_hwrm_resource_free(bp, 1, irq_re_init);
8897 return 0;
8898}
8899
8900static int bnxt_init_nic(struct bnxt *bp, bool irq_re_init)
8901{
2247925f 8902 bnxt_init_cp_rings(bp);
c0c050c5
MC
8903 bnxt_init_rx_rings(bp);
8904 bnxt_init_tx_rings(bp);
8905 bnxt_init_ring_grps(bp, irq_re_init);
8906 bnxt_init_vnics(bp);
8907
8908 return bnxt_init_chip(bp, irq_re_init);
8909}
8910
c0c050c5
MC
8911static int bnxt_set_real_num_queues(struct bnxt *bp)
8912{
8913 int rc;
8914 struct net_device *dev = bp->dev;
8915
5f449249
MC
8916 rc = netif_set_real_num_tx_queues(dev, bp->tx_nr_rings -
8917 bp->tx_nr_rings_xdp);
c0c050c5
MC
8918 if (rc)
8919 return rc;
8920
8921 rc = netif_set_real_num_rx_queues(dev, bp->rx_nr_rings);
8922 if (rc)
8923 return rc;
8924
8925#ifdef CONFIG_RFS_ACCEL
45019a18 8926 if (bp->flags & BNXT_FLAG_RFS)
c0c050c5 8927 dev->rx_cpu_rmap = alloc_irq_cpu_rmap(bp->rx_nr_rings);
c0c050c5
MC
8928#endif
8929
8930 return rc;
8931}
8932
6e6c5a57
MC
8933static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
8934 bool shared)
8935{
8936 int _rx = *rx, _tx = *tx;
8937
8938 if (shared) {
8939 *rx = min_t(int, _rx, max);
8940 *tx = min_t(int, _tx, max);
8941 } else {
8942 if (max < 2)
8943 return -ENOMEM;
8944
8945 while (_rx + _tx > max) {
8946 if (_rx > _tx && _rx > 1)
8947 _rx--;
8948 else if (_tx > 1)
8949 _tx--;
8950 }
8951 *rx = _rx;
8952 *tx = _tx;
8953 }
8954 return 0;
8955}
8956
7809592d
MC
8957static void bnxt_setup_msix(struct bnxt *bp)
8958{
8959 const int len = sizeof(bp->irq_tbl[0].name);
8960 struct net_device *dev = bp->dev;
8961 int tcs, i;
8962
8963 tcs = netdev_get_num_tc(dev);
18e4960c 8964 if (tcs) {
d1e7925e 8965 int i, off, count;
7809592d 8966
d1e7925e
MC
8967 for (i = 0; i < tcs; i++) {
8968 count = bp->tx_nr_rings_per_tc;
8969 off = i * count;
8970 netdev_set_tc_queue(dev, i, count, off);
7809592d
MC
8971 }
8972 }
8973
8974 for (i = 0; i < bp->cp_nr_rings; i++) {
e5811b8c 8975 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
7809592d
MC
8976 char *attr;
8977
8978 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
8979 attr = "TxRx";
8980 else if (i < bp->rx_nr_rings)
8981 attr = "rx";
8982 else
8983 attr = "tx";
8984
e5811b8c
MC
8985 snprintf(bp->irq_tbl[map_idx].name, len, "%s-%s-%d", dev->name,
8986 attr, i);
8987 bp->irq_tbl[map_idx].handler = bnxt_msix;
7809592d
MC
8988 }
8989}
8990
8991static void bnxt_setup_inta(struct bnxt *bp)
8992{
8993 const int len = sizeof(bp->irq_tbl[0].name);
8994
8995 if (netdev_get_num_tc(bp->dev))
8996 netdev_reset_tc(bp->dev);
8997
8998 snprintf(bp->irq_tbl[0].name, len, "%s-%s-%d", bp->dev->name, "TxRx",
8999 0);
9000 bp->irq_tbl[0].handler = bnxt_inta;
9001}
9002
20d7d1c5
EP
9003static int bnxt_init_int_mode(struct bnxt *bp);
9004
7809592d
MC
9005static int bnxt_setup_int_mode(struct bnxt *bp)
9006{
9007 int rc;
9008
20d7d1c5
EP
9009 if (!bp->irq_tbl) {
9010 rc = bnxt_init_int_mode(bp);
9011 if (rc || !bp->irq_tbl)
9012 return rc ?: -ENODEV;
9013 }
9014
7809592d
MC
9015 if (bp->flags & BNXT_FLAG_USING_MSIX)
9016 bnxt_setup_msix(bp);
9017 else
9018 bnxt_setup_inta(bp);
9019
9020 rc = bnxt_set_real_num_queues(bp);
9021 return rc;
9022}
9023
b7429954 9024#ifdef CONFIG_RFS_ACCEL
8079e8f1
MC
9025static unsigned int bnxt_get_max_func_rss_ctxs(struct bnxt *bp)
9026{
6a4f2947 9027 return bp->hw_resc.max_rsscos_ctxs;
8079e8f1
MC
9028}
9029
9030static unsigned int bnxt_get_max_func_vnics(struct bnxt *bp)
9031{
6a4f2947 9032 return bp->hw_resc.max_vnics;
8079e8f1 9033}
b7429954 9034#endif
8079e8f1 9035
e4060d30
MC
9036unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp)
9037{
6a4f2947 9038 return bp->hw_resc.max_stat_ctxs;
e4060d30
MC
9039}
9040
9041unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp)
9042{
6a4f2947 9043 return bp->hw_resc.max_cp_rings;
e4060d30
MC
9044}
9045
e916b081 9046static unsigned int bnxt_get_max_func_cp_rings_for_en(struct bnxt *bp)
a588e458 9047{
c0b8cda0
MC
9048 unsigned int cp = bp->hw_resc.max_cp_rings;
9049
9050 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
9051 cp -= bnxt_get_ulp_msix_num(bp);
9052
9053 return cp;
a588e458
MC
9054}
9055
ad95c27b 9056static unsigned int bnxt_get_max_func_irqs(struct bnxt *bp)
7809592d 9057{
6a4f2947
MC
9058 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
9059
f7588cd8
MC
9060 if (bp->flags & BNXT_FLAG_CHIP_P5)
9061 return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_nqs);
9062
6a4f2947 9063 return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_cp_rings);
7809592d
MC
9064}
9065
30f52947 9066static void bnxt_set_max_func_irqs(struct bnxt *bp, unsigned int max_irqs)
33c2657e 9067{
6a4f2947 9068 bp->hw_resc.max_irqs = max_irqs;
33c2657e
MC
9069}
9070
e916b081
MC
9071unsigned int bnxt_get_avail_cp_rings_for_en(struct bnxt *bp)
9072{
9073 unsigned int cp;
9074
9075 cp = bnxt_get_max_func_cp_rings_for_en(bp);
9076 if (bp->flags & BNXT_FLAG_CHIP_P5)
9077 return cp - bp->rx_nr_rings - bp->tx_nr_rings;
9078 else
9079 return cp - bp->cp_nr_rings;
9080}
9081
c027c6b4
VV
9082unsigned int bnxt_get_avail_stat_ctxs_for_en(struct bnxt *bp)
9083{
d77b1ad8 9084 return bnxt_get_max_func_stat_ctxs(bp) - bnxt_get_func_stat_ctxs(bp);
c027c6b4
VV
9085}
9086
fbcfc8e4
MC
9087int bnxt_get_avail_msix(struct bnxt *bp, int num)
9088{
9089 int max_cp = bnxt_get_max_func_cp_rings(bp);
9090 int max_irq = bnxt_get_max_func_irqs(bp);
9091 int total_req = bp->cp_nr_rings + num;
9092 int max_idx, avail_msix;
9093
75720e63
MC
9094 max_idx = bp->total_irqs;
9095 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
9096 max_idx = min_t(int, bp->total_irqs, max_cp);
fbcfc8e4 9097 avail_msix = max_idx - bp->cp_nr_rings;
f1ca94de 9098 if (!BNXT_NEW_RM(bp) || avail_msix >= num)
fbcfc8e4
MC
9099 return avail_msix;
9100
9101 if (max_irq < total_req) {
9102 num = max_irq - bp->cp_nr_rings;
9103 if (num <= 0)
9104 return 0;
9105 }
9106 return num;
9107}
9108
08654eb2
MC
9109static int bnxt_get_num_msix(struct bnxt *bp)
9110{
f1ca94de 9111 if (!BNXT_NEW_RM(bp))
08654eb2
MC
9112 return bnxt_get_max_func_irqs(bp);
9113
c0b8cda0 9114 return bnxt_nq_rings_in_use(bp);
08654eb2
MC
9115}
9116
7809592d 9117static int bnxt_init_msix(struct bnxt *bp)
c0c050c5 9118{
fbcfc8e4 9119 int i, total_vecs, max, rc = 0, min = 1, ulp_msix;
7809592d 9120 struct msix_entry *msix_ent;
c0c050c5 9121
08654eb2
MC
9122 total_vecs = bnxt_get_num_msix(bp);
9123 max = bnxt_get_max_func_irqs(bp);
9124 if (total_vecs > max)
9125 total_vecs = max;
9126
2773dfb2
MC
9127 if (!total_vecs)
9128 return 0;
9129
c0c050c5
MC
9130 msix_ent = kcalloc(total_vecs, sizeof(struct msix_entry), GFP_KERNEL);
9131 if (!msix_ent)
9132 return -ENOMEM;
9133
9134 for (i = 0; i < total_vecs; i++) {
9135 msix_ent[i].entry = i;
9136 msix_ent[i].vector = 0;
9137 }
9138
01657bcd
MC
9139 if (!(bp->flags & BNXT_FLAG_SHARED_RINGS))
9140 min = 2;
9141
9142 total_vecs = pci_enable_msix_range(bp->pdev, msix_ent, min, total_vecs);
fbcfc8e4
MC
9143 ulp_msix = bnxt_get_ulp_msix_num(bp);
9144 if (total_vecs < 0 || total_vecs < ulp_msix) {
c0c050c5
MC
9145 rc = -ENODEV;
9146 goto msix_setup_exit;
9147 }
9148
9149 bp->irq_tbl = kcalloc(total_vecs, sizeof(struct bnxt_irq), GFP_KERNEL);
9150 if (bp->irq_tbl) {
7809592d
MC
9151 for (i = 0; i < total_vecs; i++)
9152 bp->irq_tbl[i].vector = msix_ent[i].vector;
c0c050c5 9153
7809592d 9154 bp->total_irqs = total_vecs;
c0c050c5 9155 /* Trim rings based upon num of vectors allocated */
6e6c5a57 9156 rc = bnxt_trim_rings(bp, &bp->rx_nr_rings, &bp->tx_nr_rings,
fbcfc8e4 9157 total_vecs - ulp_msix, min == 1);
6e6c5a57
MC
9158 if (rc)
9159 goto msix_setup_exit;
9160
7809592d
MC
9161 bp->cp_nr_rings = (min == 1) ?
9162 max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
9163 bp->tx_nr_rings + bp->rx_nr_rings;
c0c050c5 9164
c0c050c5
MC
9165 } else {
9166 rc = -ENOMEM;
9167 goto msix_setup_exit;
9168 }
9169 bp->flags |= BNXT_FLAG_USING_MSIX;
9170 kfree(msix_ent);
9171 return 0;
9172
9173msix_setup_exit:
7809592d
MC
9174 netdev_err(bp->dev, "bnxt_init_msix err: %x\n", rc);
9175 kfree(bp->irq_tbl);
9176 bp->irq_tbl = NULL;
c0c050c5
MC
9177 pci_disable_msix(bp->pdev);
9178 kfree(msix_ent);
9179 return rc;
9180}
9181
7809592d 9182static int bnxt_init_inta(struct bnxt *bp)
c0c050c5 9183{
33dbcf60 9184 bp->irq_tbl = kzalloc(sizeof(struct bnxt_irq), GFP_KERNEL);
7809592d
MC
9185 if (!bp->irq_tbl)
9186 return -ENOMEM;
9187
9188 bp->total_irqs = 1;
c0c050c5
MC
9189 bp->rx_nr_rings = 1;
9190 bp->tx_nr_rings = 1;
9191 bp->cp_nr_rings = 1;
01657bcd 9192 bp->flags |= BNXT_FLAG_SHARED_RINGS;
c0c050c5 9193 bp->irq_tbl[0].vector = bp->pdev->irq;
7809592d 9194 return 0;
c0c050c5
MC
9195}
9196
7809592d 9197static int bnxt_init_int_mode(struct bnxt *bp)
c0c050c5 9198{
20d7d1c5 9199 int rc = -ENODEV;
c0c050c5
MC
9200
9201 if (bp->flags & BNXT_FLAG_MSIX_CAP)
7809592d 9202 rc = bnxt_init_msix(bp);
c0c050c5 9203
1fa72e29 9204 if (!(bp->flags & BNXT_FLAG_USING_MSIX) && BNXT_PF(bp)) {
c0c050c5 9205 /* fallback to INTA */
7809592d 9206 rc = bnxt_init_inta(bp);
c0c050c5
MC
9207 }
9208 return rc;
9209}
9210
7809592d
MC
9211static void bnxt_clear_int_mode(struct bnxt *bp)
9212{
9213 if (bp->flags & BNXT_FLAG_USING_MSIX)
9214 pci_disable_msix(bp->pdev);
9215
9216 kfree(bp->irq_tbl);
9217 bp->irq_tbl = NULL;
9218 bp->flags &= ~BNXT_FLAG_USING_MSIX;
9219}
9220
1b3f0b75 9221int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init)
674f50a5 9222{
674f50a5 9223 int tcs = netdev_get_num_tc(bp->dev);
1b3f0b75 9224 bool irq_cleared = false;
674f50a5
MC
9225 int rc;
9226
9227 if (!bnxt_need_reserve_rings(bp))
9228 return 0;
9229
1b3f0b75
MC
9230 if (irq_re_init && BNXT_NEW_RM(bp) &&
9231 bnxt_get_num_msix(bp) != bp->total_irqs) {
ec86f14e 9232 bnxt_ulp_irq_stop(bp);
674f50a5 9233 bnxt_clear_int_mode(bp);
1b3f0b75 9234 irq_cleared = true;
36d65be9
MC
9235 }
9236 rc = __bnxt_reserve_rings(bp);
1b3f0b75 9237 if (irq_cleared) {
36d65be9
MC
9238 if (!rc)
9239 rc = bnxt_init_int_mode(bp);
ec86f14e 9240 bnxt_ulp_irq_restart(bp, rc);
36d65be9
MC
9241 }
9242 if (rc) {
9243 netdev_err(bp->dev, "ring reservation/IRQ init failure rc: %d\n", rc);
9244 return rc;
674f50a5 9245 }
2038cc59
MC
9246 if (tcs && (bp->tx_nr_rings_per_tc * tcs !=
9247 bp->tx_nr_rings - bp->tx_nr_rings_xdp)) {
674f50a5
MC
9248 netdev_err(bp->dev, "tx ring reservation failure\n");
9249 netdev_reset_tc(bp->dev);
2038cc59
MC
9250 if (bp->tx_nr_rings_xdp)
9251 bp->tx_nr_rings_per_tc = bp->tx_nr_rings_xdp;
9252 else
9253 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
674f50a5
MC
9254 return -ENOMEM;
9255 }
674f50a5
MC
9256 return 0;
9257}
9258
c0c050c5
MC
9259static void bnxt_free_irq(struct bnxt *bp)
9260{
9261 struct bnxt_irq *irq;
9262 int i;
9263
9264#ifdef CONFIG_RFS_ACCEL
9265 free_irq_cpu_rmap(bp->dev->rx_cpu_rmap);
9266 bp->dev->rx_cpu_rmap = NULL;
9267#endif
cb98526b 9268 if (!bp->irq_tbl || !bp->bnapi)
c0c050c5
MC
9269 return;
9270
9271 for (i = 0; i < bp->cp_nr_rings; i++) {
e5811b8c
MC
9272 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
9273
9274 irq = &bp->irq_tbl[map_idx];
56f0fd80
VV
9275 if (irq->requested) {
9276 if (irq->have_cpumask) {
9277 irq_set_affinity_hint(irq->vector, NULL);
9278 free_cpumask_var(irq->cpu_mask);
9279 irq->have_cpumask = 0;
9280 }
c0c050c5 9281 free_irq(irq->vector, bp->bnapi[i]);
56f0fd80
VV
9282 }
9283
c0c050c5
MC
9284 irq->requested = 0;
9285 }
c0c050c5
MC
9286}
9287
9288static int bnxt_request_irq(struct bnxt *bp)
9289{
b81a90d3 9290 int i, j, rc = 0;
c0c050c5
MC
9291 unsigned long flags = 0;
9292#ifdef CONFIG_RFS_ACCEL
e5811b8c 9293 struct cpu_rmap *rmap;
c0c050c5
MC
9294#endif
9295
e5811b8c
MC
9296 rc = bnxt_setup_int_mode(bp);
9297 if (rc) {
9298 netdev_err(bp->dev, "bnxt_setup_int_mode err: %x\n",
9299 rc);
9300 return rc;
9301 }
9302#ifdef CONFIG_RFS_ACCEL
9303 rmap = bp->dev->rx_cpu_rmap;
9304#endif
c0c050c5
MC
9305 if (!(bp->flags & BNXT_FLAG_USING_MSIX))
9306 flags = IRQF_SHARED;
9307
b81a90d3 9308 for (i = 0, j = 0; i < bp->cp_nr_rings; i++) {
e5811b8c
MC
9309 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
9310 struct bnxt_irq *irq = &bp->irq_tbl[map_idx];
9311
c0c050c5 9312#ifdef CONFIG_RFS_ACCEL
b81a90d3 9313 if (rmap && bp->bnapi[i]->rx_ring) {
c0c050c5
MC
9314 rc = irq_cpu_rmap_add(rmap, irq->vector);
9315 if (rc)
9316 netdev_warn(bp->dev, "failed adding irq rmap for ring %d\n",
b81a90d3
MC
9317 j);
9318 j++;
c0c050c5
MC
9319 }
9320#endif
9321 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
9322 bp->bnapi[i]);
9323 if (rc)
9324 break;
9325
9326 irq->requested = 1;
56f0fd80
VV
9327
9328 if (zalloc_cpumask_var(&irq->cpu_mask, GFP_KERNEL)) {
9329 int numa_node = dev_to_node(&bp->pdev->dev);
9330
9331 irq->have_cpumask = 1;
9332 cpumask_set_cpu(cpumask_local_spread(i, numa_node),
9333 irq->cpu_mask);
9334 rc = irq_set_affinity_hint(irq->vector, irq->cpu_mask);
9335 if (rc) {
9336 netdev_warn(bp->dev,
9337 "Set affinity failed, IRQ = %d\n",
9338 irq->vector);
9339 break;
9340 }
9341 }
c0c050c5
MC
9342 }
9343 return rc;
9344}
9345
9346static void bnxt_del_napi(struct bnxt *bp)
9347{
9348 int i;
9349
9350 if (!bp->bnapi)
9351 return;
9352
9353 for (i = 0; i < bp->cp_nr_rings; i++) {
9354 struct bnxt_napi *bnapi = bp->bnapi[i];
9355
5198d545 9356 __netif_napi_del(&bnapi->napi);
c0c050c5 9357 }
5198d545 9358 /* We called __netif_napi_del(), we need
e5f6f564
ED
9359 * to respect an RCU grace period before freeing napi structures.
9360 */
9361 synchronize_net();
c0c050c5
MC
9362}
9363
9364static void bnxt_init_napi(struct bnxt *bp)
9365{
9366 int i;
10bbdaf5 9367 unsigned int cp_nr_rings = bp->cp_nr_rings;
c0c050c5
MC
9368 struct bnxt_napi *bnapi;
9369
9370 if (bp->flags & BNXT_FLAG_USING_MSIX) {
0fcec985
MC
9371 int (*poll_fn)(struct napi_struct *, int) = bnxt_poll;
9372
9373 if (bp->flags & BNXT_FLAG_CHIP_P5)
9374 poll_fn = bnxt_poll_p5;
9375 else if (BNXT_CHIP_TYPE_NITRO_A0(bp))
10bbdaf5
PS
9376 cp_nr_rings--;
9377 for (i = 0; i < cp_nr_rings; i++) {
c0c050c5 9378 bnapi = bp->bnapi[i];
b48b89f9 9379 netif_napi_add(bp->dev, &bnapi->napi, poll_fn);
c0c050c5 9380 }
10bbdaf5
PS
9381 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
9382 bnapi = bp->bnapi[cp_nr_rings];
9383 netif_napi_add(bp->dev, &bnapi->napi,
b48b89f9 9384 bnxt_poll_nitroa0);
10bbdaf5 9385 }
c0c050c5
MC
9386 } else {
9387 bnapi = bp->bnapi[0];
b48b89f9 9388 netif_napi_add(bp->dev, &bnapi->napi, bnxt_poll);
c0c050c5
MC
9389 }
9390}
9391
9392static void bnxt_disable_napi(struct bnxt *bp)
9393{
9394 int i;
9395
e340a5c4
MC
9396 if (!bp->bnapi ||
9397 test_and_set_bit(BNXT_STATE_NAPI_DISABLED, &bp->state))
c0c050c5
MC
9398 return;
9399
0bc0b97f 9400 for (i = 0; i < bp->cp_nr_rings; i++) {
d38c19b1
MC
9401 struct bnxt_napi *bnapi = bp->bnapi[i];
9402 struct bnxt_cp_ring_info *cpr;
0bc0b97f 9403
d38c19b1 9404 cpr = &bnapi->cp_ring;
8becd196
MC
9405 if (bnapi->tx_fault)
9406 cpr->sw_stats.tx.tx_resets++;
d38c19b1
MC
9407 if (bnapi->in_reset)
9408 cpr->sw_stats.rx.rx_resets++;
9409 napi_disable(&bnapi->napi);
9410 if (bnapi->rx_ring)
0bc0b97f 9411 cancel_work_sync(&cpr->dim.work);
0bc0b97f 9412 }
c0c050c5
MC
9413}
9414
9415static void bnxt_enable_napi(struct bnxt *bp)
9416{
9417 int i;
9418
e340a5c4 9419 clear_bit(BNXT_STATE_NAPI_DISABLED, &bp->state);
c0c050c5 9420 for (i = 0; i < bp->cp_nr_rings; i++) {
8a27d4b9
MC
9421 struct bnxt_napi *bnapi = bp->bnapi[i];
9422 struct bnxt_cp_ring_info *cpr;
9423
2b56b3d9
JK
9424 bnapi->tx_fault = 0;
9425
8a27d4b9 9426 cpr = &bnapi->cp_ring;
8a27d4b9 9427 bnapi->in_reset = false;
6a8788f2 9428
37b61cda
JK
9429 bnapi->tx_pkts = 0;
9430
8a27d4b9 9431 if (bnapi->rx_ring) {
6a8788f2 9432 INIT_WORK(&cpr->dim.work, bnxt_dim_work);
c002bd52 9433 cpr->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
6a8788f2 9434 }
8a27d4b9 9435 napi_enable(&bnapi->napi);
c0c050c5
MC
9436 }
9437}
9438
7df4ae9f 9439void bnxt_tx_disable(struct bnxt *bp)
c0c050c5
MC
9440{
9441 int i;
c0c050c5 9442 struct bnxt_tx_ring_info *txr;
c0c050c5 9443
b6ab4b01 9444 if (bp->tx_ring) {
c0c050c5 9445 for (i = 0; i < bp->tx_nr_rings; i++) {
b6ab4b01 9446 txr = &bp->tx_ring[i];
3c603136 9447 WRITE_ONCE(txr->dev_state, BNXT_DEV_STATE_CLOSING);
c0c050c5
MC
9448 }
9449 }
3c603136
JK
9450 /* Make sure napi polls see @dev_state change */
9451 synchronize_net();
132e0b65
EP
9452 /* Drop carrier first to prevent TX timeout */
9453 netif_carrier_off(bp->dev);
c0c050c5
MC
9454 /* Stop all TX queues */
9455 netif_tx_disable(bp->dev);
c0c050c5
MC
9456}
9457
7df4ae9f 9458void bnxt_tx_enable(struct bnxt *bp)
c0c050c5
MC
9459{
9460 int i;
c0c050c5 9461 struct bnxt_tx_ring_info *txr;
c0c050c5
MC
9462
9463 for (i = 0; i < bp->tx_nr_rings; i++) {
b6ab4b01 9464 txr = &bp->tx_ring[i];
3c603136 9465 WRITE_ONCE(txr->dev_state, 0);
c0c050c5 9466 }
3c603136
JK
9467 /* Make sure napi polls see @dev_state change */
9468 synchronize_net();
c0c050c5 9469 netif_tx_wake_all_queues(bp->dev);
0f5a4841 9470 if (BNXT_LINK_IS_UP(bp))
c0c050c5
MC
9471 netif_carrier_on(bp->dev);
9472}
9473
2046e3c3
MC
9474static char *bnxt_report_fec(struct bnxt_link_info *link_info)
9475{
9476 u8 active_fec = link_info->active_fec_sig_mode &
9477 PORT_PHY_QCFG_RESP_ACTIVE_FEC_MASK;
9478
9479 switch (active_fec) {
9480 default:
9481 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_NONE_ACTIVE:
9482 return "None";
9483 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE74_ACTIVE:
9484 return "Clause 74 BaseR";
9485 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE91_ACTIVE:
9486 return "Clause 91 RS(528,514)";
9487 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_1XN_ACTIVE:
9488 return "Clause 91 RS544_1XN";
9489 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_IEEE_ACTIVE:
9490 return "Clause 91 RS(544,514)";
9491 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_1XN_ACTIVE:
9492 return "Clause 91 RS272_1XN";
9493 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_IEEE_ACTIVE:
9494 return "Clause 91 RS(272,257)";
9495 }
9496}
9497
228ea8c1 9498void bnxt_report_link(struct bnxt *bp)
c0c050c5 9499{
0f5a4841 9500 if (BNXT_LINK_IS_UP(bp)) {
1d2deb61 9501 const char *signal = "";
c0c050c5 9502 const char *flow_ctrl;
1d2deb61 9503 const char *duplex;
38a21b34
DK
9504 u32 speed;
9505 u16 fec;
c0c050c5
MC
9506
9507 netif_carrier_on(bp->dev);
8eddb3e7
MC
9508 speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed);
9509 if (speed == SPEED_UNKNOWN) {
9510 netdev_info(bp->dev, "NIC Link is Up, speed unknown\n");
9511 return;
9512 }
c0c050c5
MC
9513 if (bp->link_info.duplex == BNXT_LINK_DUPLEX_FULL)
9514 duplex = "full";
9515 else
9516 duplex = "half";
9517 if (bp->link_info.pause == BNXT_LINK_PAUSE_BOTH)
9518 flow_ctrl = "ON - receive & transmit";
9519 else if (bp->link_info.pause == BNXT_LINK_PAUSE_TX)
9520 flow_ctrl = "ON - transmit";
9521 else if (bp->link_info.pause == BNXT_LINK_PAUSE_RX)
9522 flow_ctrl = "ON - receive";
9523 else
9524 flow_ctrl = "none";
1d2deb61
EP
9525 if (bp->link_info.phy_qcfg_resp.option_flags &
9526 PORT_PHY_QCFG_RESP_OPTION_FLAGS_SIGNAL_MODE_KNOWN) {
9527 u8 sig_mode = bp->link_info.active_fec_sig_mode &
9528 PORT_PHY_QCFG_RESP_SIGNAL_MODE_MASK;
9529 switch (sig_mode) {
9530 case PORT_PHY_QCFG_RESP_SIGNAL_MODE_NRZ:
9531 signal = "(NRZ) ";
9532 break;
9533 case PORT_PHY_QCFG_RESP_SIGNAL_MODE_PAM4:
9534 signal = "(PAM4) ";
9535 break;
9536 default:
9537 break;
9538 }
9539 }
9540 netdev_info(bp->dev, "NIC Link is Up, %u Mbps %s%s duplex, Flow control: %s\n",
9541 speed, signal, duplex, flow_ctrl);
b0d28207 9542 if (bp->phy_flags & BNXT_PHY_FL_EEE_CAP)
170ce013
MC
9543 netdev_info(bp->dev, "EEE is %s\n",
9544 bp->eee.eee_active ? "active" :
9545 "not active");
e70c752f
MC
9546 fec = bp->link_info.fec_cfg;
9547 if (!(fec & PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED))
2046e3c3 9548 netdev_info(bp->dev, "FEC autoneg %s encoding: %s\n",
e70c752f 9549 (fec & BNXT_FEC_AUTONEG) ? "on" : "off",
2046e3c3 9550 bnxt_report_fec(&bp->link_info));
c0c050c5
MC
9551 } else {
9552 netif_carrier_off(bp->dev);
9553 netdev_err(bp->dev, "NIC Link is Down\n");
9554 }
9555}
9556
3128e811
MC
9557static bool bnxt_phy_qcaps_no_speed(struct hwrm_port_phy_qcaps_output *resp)
9558{
9559 if (!resp->supported_speeds_auto_mode &&
9560 !resp->supported_speeds_force_mode &&
9561 !resp->supported_pam4_speeds_auto_mode &&
9562 !resp->supported_pam4_speeds_force_mode)
9563 return true;
9564 return false;
9565}
9566
170ce013
MC
9567static int bnxt_hwrm_phy_qcaps(struct bnxt *bp)
9568{
93ed8117 9569 struct bnxt_link_info *link_info = &bp->link_info;
bbf33d1d
EP
9570 struct hwrm_port_phy_qcaps_output *resp;
9571 struct hwrm_port_phy_qcaps_input *req;
9572 int rc = 0;
170ce013
MC
9573
9574 if (bp->hwrm_spec_code < 0x10201)
9575 return 0;
9576
bbf33d1d
EP
9577 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_QCAPS);
9578 if (rc)
9579 return rc;
170ce013 9580
bbf33d1d
EP
9581 resp = hwrm_req_hold(bp, req);
9582 rc = hwrm_req_send(bp, req);
170ce013
MC
9583 if (rc)
9584 goto hwrm_phy_qcaps_exit;
9585
9a3bc77e 9586 bp->phy_flags = resp->flags | (le16_to_cpu(resp->flags2) << 8);
acb20054 9587 if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED) {
170ce013
MC
9588 struct ethtool_eee *eee = &bp->eee;
9589 u16 fw_speeds = le16_to_cpu(resp->supported_speeds_eee_mode);
9590
170ce013
MC
9591 eee->supported = _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
9592 bp->lpi_tmr_lo = le32_to_cpu(resp->tx_lpi_timer_low) &
9593 PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_MASK;
9594 bp->lpi_tmr_hi = le32_to_cpu(resp->valid_tx_lpi_timer_high) &
9595 PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK;
9596 }
fea6b333 9597
3128e811
MC
9598 if (bp->hwrm_spec_code >= 0x10a01) {
9599 if (bnxt_phy_qcaps_no_speed(resp)) {
9600 link_info->phy_state = BNXT_PHY_STATE_DISABLED;
9601 netdev_warn(bp->dev, "Ethernet link disabled\n");
9602 } else if (link_info->phy_state == BNXT_PHY_STATE_DISABLED) {
9603 link_info->phy_state = BNXT_PHY_STATE_ENABLED;
9604 netdev_info(bp->dev, "Ethernet link enabled\n");
9605 /* Phy re-enabled, reprobe the speeds */
9606 link_info->support_auto_speeds = 0;
9607 link_info->support_pam4_auto_speeds = 0;
9608 }
9609 }
520ad89a
MC
9610 if (resp->supported_speeds_auto_mode)
9611 link_info->support_auto_speeds =
9612 le16_to_cpu(resp->supported_speeds_auto_mode);
d058426e
EP
9613 if (resp->supported_pam4_speeds_auto_mode)
9614 link_info->support_pam4_auto_speeds =
9615 le16_to_cpu(resp->supported_pam4_speeds_auto_mode);
170ce013 9616
d5430d31
MC
9617 bp->port_count = resp->port_cnt;
9618
170ce013 9619hwrm_phy_qcaps_exit:
bbf33d1d 9620 hwrm_req_drop(bp, req);
170ce013
MC
9621 return rc;
9622}
9623
c916062a
EP
9624static bool bnxt_support_dropped(u16 advertising, u16 supported)
9625{
9626 u16 diff = advertising ^ supported;
9627
9628 return ((supported | diff) != supported);
9629}
9630
ccd6a9dc 9631int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
c0c050c5 9632{
c0c050c5 9633 struct bnxt_link_info *link_info = &bp->link_info;
bbf33d1d
EP
9634 struct hwrm_port_phy_qcfg_output *resp;
9635 struct hwrm_port_phy_qcfg_input *req;
0f5a4841 9636 u8 link_state = link_info->link_state;
d058426e 9637 bool support_changed = false;
bbf33d1d 9638 int rc;
c0c050c5 9639
bbf33d1d
EP
9640 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_QCFG);
9641 if (rc)
9642 return rc;
c0c050c5 9643
bbf33d1d
EP
9644 resp = hwrm_req_hold(bp, req);
9645 rc = hwrm_req_send(bp, req);
c0c050c5 9646 if (rc) {
bbf33d1d 9647 hwrm_req_drop(bp, req);
662c9b22
EP
9648 if (BNXT_VF(bp) && rc == -ENODEV) {
9649 netdev_warn(bp->dev, "Cannot obtain link state while PF unavailable.\n");
9650 rc = 0;
9651 }
c0c050c5
MC
9652 return rc;
9653 }
9654
9655 memcpy(&link_info->phy_qcfg_resp, resp, sizeof(*resp));
9656 link_info->phy_link_status = resp->link;
acb20054
MC
9657 link_info->duplex = resp->duplex_cfg;
9658 if (bp->hwrm_spec_code >= 0x10800)
9659 link_info->duplex = resp->duplex_state;
c0c050c5
MC
9660 link_info->pause = resp->pause;
9661 link_info->auto_mode = resp->auto_mode;
9662 link_info->auto_pause_setting = resp->auto_pause;
3277360e 9663 link_info->lp_pause = resp->link_partner_adv_pause;
c0c050c5 9664 link_info->force_pause_setting = resp->force_pause;
acb20054 9665 link_info->duplex_setting = resp->duplex_cfg;
c0c050c5
MC
9666 if (link_info->phy_link_status == BNXT_LINK_LINK)
9667 link_info->link_speed = le16_to_cpu(resp->link_speed);
9668 else
9669 link_info->link_speed = 0;
9670 link_info->force_link_speed = le16_to_cpu(resp->force_link_speed);
d058426e
EP
9671 link_info->force_pam4_link_speed =
9672 le16_to_cpu(resp->force_pam4_link_speed);
c0c050c5 9673 link_info->support_speeds = le16_to_cpu(resp->support_speeds);
d058426e 9674 link_info->support_pam4_speeds = le16_to_cpu(resp->support_pam4_speeds);
c0c050c5 9675 link_info->auto_link_speeds = le16_to_cpu(resp->auto_link_speed_mask);
d058426e
EP
9676 link_info->auto_pam4_link_speeds =
9677 le16_to_cpu(resp->auto_pam4_link_speed_mask);
3277360e
MC
9678 link_info->lp_auto_link_speeds =
9679 le16_to_cpu(resp->link_partner_adv_speeds);
d058426e
EP
9680 link_info->lp_auto_pam4_link_speeds =
9681 resp->link_partner_pam4_adv_speeds;
c0c050c5
MC
9682 link_info->preemphasis = le32_to_cpu(resp->preemphasis);
9683 link_info->phy_ver[0] = resp->phy_maj;
9684 link_info->phy_ver[1] = resp->phy_min;
9685 link_info->phy_ver[2] = resp->phy_bld;
9686 link_info->media_type = resp->media_type;
03efbec0 9687 link_info->phy_type = resp->phy_type;
11f15ed3 9688 link_info->transceiver = resp->xcvr_pkg_type;
170ce013
MC
9689 link_info->phy_addr = resp->eee_config_phy_addr &
9690 PORT_PHY_QCFG_RESP_PHY_ADDR_MASK;
42ee18fe 9691 link_info->module_status = resp->module_status;
170ce013 9692
b0d28207 9693 if (bp->phy_flags & BNXT_PHY_FL_EEE_CAP) {
170ce013
MC
9694 struct ethtool_eee *eee = &bp->eee;
9695 u16 fw_speeds;
9696
9697 eee->eee_active = 0;
9698 if (resp->eee_config_phy_addr &
9699 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ACTIVE) {
9700 eee->eee_active = 1;
9701 fw_speeds = le16_to_cpu(
9702 resp->link_partner_adv_eee_link_speed_mask);
9703 eee->lp_advertised =
9704 _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
9705 }
9706
9707 /* Pull initial EEE config */
9708 if (!chng_link_state) {
9709 if (resp->eee_config_phy_addr &
9710 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ENABLED)
9711 eee->eee_enabled = 1;
c0c050c5 9712
170ce013
MC
9713 fw_speeds = le16_to_cpu(resp->adv_eee_link_speed_mask);
9714 eee->advertised =
9715 _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
9716
9717 if (resp->eee_config_phy_addr &
9718 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_TX_LPI) {
9719 __le32 tmr;
9720
9721 eee->tx_lpi_enabled = 1;
9722 tmr = resp->xcvr_identifier_type_tx_lpi_timer;
9723 eee->tx_lpi_timer = le32_to_cpu(tmr) &
9724 PORT_PHY_QCFG_RESP_TX_LPI_TIMER_MASK;
9725 }
9726 }
9727 }
e70c752f
MC
9728
9729 link_info->fec_cfg = PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED;
8b277589 9730 if (bp->hwrm_spec_code >= 0x10504) {
e70c752f 9731 link_info->fec_cfg = le16_to_cpu(resp->fec_cfg);
8b277589
MC
9732 link_info->active_fec_sig_mode = resp->active_fec_signal_mode;
9733 }
c0c050c5
MC
9734 /* TODO: need to add more logic to report VF link */
9735 if (chng_link_state) {
9736 if (link_info->phy_link_status == BNXT_LINK_LINK)
0f5a4841 9737 link_info->link_state = BNXT_LINK_STATE_UP;
c0c050c5 9738 else
0f5a4841
EP
9739 link_info->link_state = BNXT_LINK_STATE_DOWN;
9740 if (link_state != link_info->link_state)
c0c050c5
MC
9741 bnxt_report_link(bp);
9742 } else {
0f5a4841
EP
9743 /* always link down if not require to update link state */
9744 link_info->link_state = BNXT_LINK_STATE_DOWN;
c0c050c5 9745 }
bbf33d1d 9746 hwrm_req_drop(bp, req);
286ef9d6 9747
c7e457f4 9748 if (!BNXT_PHY_CFG_ABLE(bp))
dac04907
MC
9749 return 0;
9750
c916062a
EP
9751 /* Check if any advertised speeds are no longer supported. The caller
9752 * holds the link_lock mutex, so we can modify link_info settings.
9753 */
9754 if (bnxt_support_dropped(link_info->advertising,
9755 link_info->support_auto_speeds)) {
286ef9d6 9756 link_info->advertising = link_info->support_auto_speeds;
d058426e 9757 support_changed = true;
286ef9d6 9758 }
d058426e
EP
9759 if (bnxt_support_dropped(link_info->advertising_pam4,
9760 link_info->support_pam4_auto_speeds)) {
9761 link_info->advertising_pam4 = link_info->support_pam4_auto_speeds;
9762 support_changed = true;
9763 }
9764 if (support_changed && (link_info->autoneg & BNXT_AUTONEG_SPEED))
9765 bnxt_hwrm_set_link_setting(bp, true, false);
c0c050c5
MC
9766 return 0;
9767}
9768
10289bec
MC
9769static void bnxt_get_port_module_status(struct bnxt *bp)
9770{
9771 struct bnxt_link_info *link_info = &bp->link_info;
9772 struct hwrm_port_phy_qcfg_output *resp = &link_info->phy_qcfg_resp;
9773 u8 module_status;
9774
9775 if (bnxt_update_link(bp, true))
9776 return;
9777
9778 module_status = link_info->module_status;
9779 switch (module_status) {
9780 case PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX:
9781 case PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN:
9782 case PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG:
9783 netdev_warn(bp->dev, "Unqualified SFP+ module detected on port %d\n",
9784 bp->pf.port_id);
9785 if (bp->hwrm_spec_code >= 0x10201) {
9786 netdev_warn(bp->dev, "Module part number %s\n",
9787 resp->phy_vendor_partnumber);
9788 }
9789 if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX)
9790 netdev_warn(bp->dev, "TX is disabled\n");
9791 if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN)
9792 netdev_warn(bp->dev, "SFP+ module is shutdown\n");
9793 }
9794}
9795
c0c050c5
MC
9796static void
9797bnxt_hwrm_set_pause_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req)
9798{
9799 if (bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) {
c9ee9516
MC
9800 if (bp->hwrm_spec_code >= 0x10201)
9801 req->auto_pause =
9802 PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE;
c0c050c5
MC
9803 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
9804 req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_RX;
9805 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
49b5c7a1 9806 req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_TX;
c0c050c5
MC
9807 req->enables |=
9808 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
9809 } else {
9810 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
9811 req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_RX;
9812 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
9813 req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_TX;
9814 req->enables |=
9815 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE);
c9ee9516
MC
9816 if (bp->hwrm_spec_code >= 0x10201) {
9817 req->auto_pause = req->force_pause;
9818 req->enables |= cpu_to_le32(
9819 PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
9820 }
c0c050c5
MC
9821 }
9822}
9823
d058426e 9824static void bnxt_hwrm_set_link_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req)
c0c050c5 9825{
d058426e
EP
9826 if (bp->link_info.autoneg & BNXT_AUTONEG_SPEED) {
9827 req->auto_mode |= PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK;
9828 if (bp->link_info.advertising) {
9829 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK);
9830 req->auto_link_speed_mask = cpu_to_le16(bp->link_info.advertising);
9831 }
9832 if (bp->link_info.advertising_pam4) {
9833 req->enables |=
9834 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAM4_LINK_SPEED_MASK);
9835 req->auto_link_pam4_speed_mask =
9836 cpu_to_le16(bp->link_info.advertising_pam4);
9837 }
c0c050c5 9838 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE);
d058426e 9839 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG);
c0c050c5 9840 } else {
c0c050c5 9841 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE);
d058426e
EP
9842 if (bp->link_info.req_signal_mode == BNXT_SIG_MODE_PAM4) {
9843 req->force_pam4_link_speed = cpu_to_le16(bp->link_info.req_link_speed);
9844 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAM4_LINK_SPEED);
9845 } else {
9846 req->force_link_speed = cpu_to_le16(bp->link_info.req_link_speed);
9847 }
c0c050c5
MC
9848 }
9849
c0c050c5
MC
9850 /* tell chimp that the setting takes effect immediately */
9851 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESET_PHY);
9852}
9853
9854int bnxt_hwrm_set_pause(struct bnxt *bp)
9855{
bbf33d1d 9856 struct hwrm_port_phy_cfg_input *req;
c0c050c5
MC
9857 int rc;
9858
bbf33d1d
EP
9859 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG);
9860 if (rc)
9861 return rc;
9862
9863 bnxt_hwrm_set_pause_common(bp, req);
c0c050c5
MC
9864
9865 if ((bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) ||
9866 bp->link_info.force_link_chng)
bbf33d1d 9867 bnxt_hwrm_set_link_common(bp, req);
c0c050c5 9868
bbf33d1d 9869 rc = hwrm_req_send(bp, req);
c0c050c5
MC
9870 if (!rc && !(bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL)) {
9871 /* since changing of pause setting doesn't trigger any link
9872 * change event, the driver needs to update the current pause
9873 * result upon successfully return of the phy_cfg command
9874 */
9875 bp->link_info.pause =
9876 bp->link_info.force_pause_setting = bp->link_info.req_flow_ctrl;
9877 bp->link_info.auto_pause_setting = 0;
9878 if (!bp->link_info.force_link_chng)
9879 bnxt_report_link(bp);
9880 }
9881 bp->link_info.force_link_chng = false;
c0c050c5
MC
9882 return rc;
9883}
9884
939f7f0c
MC
9885static void bnxt_hwrm_set_eee(struct bnxt *bp,
9886 struct hwrm_port_phy_cfg_input *req)
9887{
9888 struct ethtool_eee *eee = &bp->eee;
9889
9890 if (eee->eee_enabled) {
9891 u16 eee_speeds;
9892 u32 flags = PORT_PHY_CFG_REQ_FLAGS_EEE_ENABLE;
9893
9894 if (eee->tx_lpi_enabled)
9895 flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_ENABLE;
9896 else
9897 flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_DISABLE;
9898
9899 req->flags |= cpu_to_le32(flags);
9900 eee_speeds = bnxt_get_fw_auto_link_speeds(eee->advertised);
9901 req->eee_link_speed_mask = cpu_to_le16(eee_speeds);
9902 req->tx_lpi_timer = cpu_to_le32(eee->tx_lpi_timer);
9903 } else {
9904 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE);
9905 }
9906}
9907
9908int bnxt_hwrm_set_link_setting(struct bnxt *bp, bool set_pause, bool set_eee)
c0c050c5 9909{
bbf33d1d
EP
9910 struct hwrm_port_phy_cfg_input *req;
9911 int rc;
9912
9913 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG);
9914 if (rc)
9915 return rc;
c0c050c5 9916
c0c050c5 9917 if (set_pause)
bbf33d1d 9918 bnxt_hwrm_set_pause_common(bp, req);
c0c050c5 9919
bbf33d1d 9920 bnxt_hwrm_set_link_common(bp, req);
939f7f0c
MC
9921
9922 if (set_eee)
bbf33d1d
EP
9923 bnxt_hwrm_set_eee(bp, req);
9924 return hwrm_req_send(bp, req);
c0c050c5
MC
9925}
9926
33f7d55f
MC
9927static int bnxt_hwrm_shutdown_link(struct bnxt *bp)
9928{
bbf33d1d
EP
9929 struct hwrm_port_phy_cfg_input *req;
9930 int rc;
33f7d55f 9931
567b2abe 9932 if (!BNXT_SINGLE_PF(bp))
33f7d55f
MC
9933 return 0;
9934
d5ca9905
MC
9935 if (pci_num_vf(bp->pdev) &&
9936 !(bp->phy_flags & BNXT_PHY_FL_FW_MANAGED_LKDN))
33f7d55f
MC
9937 return 0;
9938
bbf33d1d
EP
9939 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG);
9940 if (rc)
9941 return rc;
9942
9943 req->flags = cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DWN);
0f5a4841
EP
9944 rc = hwrm_req_send(bp, req);
9945 if (!rc) {
9946 mutex_lock(&bp->link_lock);
9947 /* Device is not obliged link down in certain scenarios, even
9948 * when forced. Setting the state unknown is consistent with
9949 * driver startup and will force link state to be reported
9950 * during subsequent open based on PORT_PHY_QCFG.
9951 */
9952 bp->link_info.link_state = BNXT_LINK_STATE_UNKNOWN;
9953 mutex_unlock(&bp->link_lock);
9954 }
9955 return rc;
33f7d55f
MC
9956}
9957
b187e4ba
EP
9958static int bnxt_fw_reset_via_optee(struct bnxt *bp)
9959{
9960#ifdef CONFIG_TEE_BNXT_FW
9961 int rc = tee_bnxt_fw_load();
9962
9963 if (rc)
9964 netdev_err(bp->dev, "Failed FW reset via OP-TEE, rc=%d\n", rc);
9965
9966 return rc;
9967#else
9968 netdev_err(bp->dev, "OP-TEE not supported\n");
9969 return -ENODEV;
9970#endif
9971}
9972
9973static int bnxt_try_recover_fw(struct bnxt *bp)
9974{
9975 if (bp->fw_health && bp->fw_health->status_reliable) {
d1cbd165
MC
9976 int retry = 0, rc;
9977 u32 sts;
9978
d1cbd165 9979 do {
d1cbd165 9980 sts = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
7b370ad7 9981 rc = bnxt_hwrm_poll(bp);
17e1be34
MC
9982 if (!BNXT_FW_IS_BOOTING(sts) &&
9983 !BNXT_FW_IS_RECOVERING(sts))
d1cbd165
MC
9984 break;
9985 retry++;
9986 } while (rc == -EBUSY && retry < BNXT_FW_RETRY);
b187e4ba 9987
d1cbd165
MC
9988 if (!BNXT_FW_IS_HEALTHY(sts)) {
9989 netdev_err(bp->dev,
9990 "Firmware not responding, status: 0x%x\n",
9991 sts);
9992 rc = -ENODEV;
9993 }
b187e4ba
EP
9994 if (sts & FW_STATUS_REG_CRASHED_NO_MASTER) {
9995 netdev_warn(bp->dev, "Firmware recover via OP-TEE requested\n");
9996 return bnxt_fw_reset_via_optee(bp);
9997 }
d1cbd165 9998 return rc;
b187e4ba
EP
9999 }
10000
10001 return -ENODEV;
10002}
10003
b4c66425 10004static void bnxt_clear_reservations(struct bnxt *bp, bool fw_reset)
d900aadd
EP
10005{
10006 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
d900aadd
EP
10007
10008 if (!BNXT_NEW_RM(bp))
b4c66425 10009 return; /* no resource reservations required */
d900aadd
EP
10010
10011 hw_resc->resv_cp_rings = 0;
10012 hw_resc->resv_stat_ctxs = 0;
10013 hw_resc->resv_irqs = 0;
10014 hw_resc->resv_tx_rings = 0;
10015 hw_resc->resv_rx_rings = 0;
10016 hw_resc->resv_hw_ring_grps = 0;
10017 hw_resc->resv_vnics = 0;
10018 if (!fw_reset) {
10019 bp->tx_nr_rings = 0;
10020 bp->rx_nr_rings = 0;
10021 }
b4c66425
VG
10022}
10023
10024int bnxt_cancel_reservations(struct bnxt *bp, bool fw_reset)
10025{
10026 int rc;
10027
10028 if (!BNXT_NEW_RM(bp))
10029 return 0; /* no resource reservations required */
10030
10031 rc = bnxt_hwrm_func_resc_qcaps(bp, true);
10032 if (rc)
10033 netdev_err(bp->dev, "resc_qcaps failed\n");
10034
10035 bnxt_clear_reservations(bp, fw_reset);
d900aadd
EP
10036
10037 return rc;
10038}
10039
25e1acd6
MC
10040static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
10041{
bbf33d1d
EP
10042 struct hwrm_func_drv_if_change_output *resp;
10043 struct hwrm_func_drv_if_change_input *req;
20d7d1c5
EP
10044 bool fw_reset = !bp->irq_tbl;
10045 bool resc_reinit = false;
5d06eb5c 10046 int rc, retry = 0;
ec5d31e3 10047 u32 flags = 0;
25e1acd6
MC
10048
10049 if (!(bp->fw_cap & BNXT_FW_CAP_IF_CHANGE))
10050 return 0;
10051
bbf33d1d
EP
10052 rc = hwrm_req_init(bp, req, HWRM_FUNC_DRV_IF_CHANGE);
10053 if (rc)
10054 return rc;
10055
25e1acd6 10056 if (up)
bbf33d1d
EP
10057 req->flags = cpu_to_le32(FUNC_DRV_IF_CHANGE_REQ_FLAGS_UP);
10058 resp = hwrm_req_hold(bp, req);
10059
10060 hwrm_req_flags(bp, req, BNXT_HWRM_FULL_WAIT);
5d06eb5c 10061 while (retry < BNXT_FW_IF_RETRY) {
bbf33d1d 10062 rc = hwrm_req_send(bp, req);
5d06eb5c
VV
10063 if (rc != -EAGAIN)
10064 break;
10065
10066 msleep(50);
10067 retry++;
10068 }
5d06eb5c 10069
bbf33d1d
EP
10070 if (rc == -EAGAIN) {
10071 hwrm_req_drop(bp, req);
5d06eb5c 10072 return rc;
bbf33d1d
EP
10073 } else if (!rc) {
10074 flags = le32_to_cpu(resp->flags);
10075 } else if (up) {
b187e4ba
EP
10076 rc = bnxt_try_recover_fw(bp);
10077 fw_reset = true;
10078 }
bbf33d1d 10079 hwrm_req_drop(bp, req);
ec5d31e3
MC
10080 if (rc)
10081 return rc;
25e1acd6 10082
43a440c4
MC
10083 if (!up) {
10084 bnxt_inv_fw_health_reg(bp);
ec5d31e3 10085 return 0;
43a440c4 10086 }
25e1acd6 10087
ec5d31e3
MC
10088 if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_RESC_CHANGE)
10089 resc_reinit = true;
4279414b
MC
10090 if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_HOT_FW_RESET_DONE ||
10091 test_bit(BNXT_STATE_FW_RESET_DET, &bp->state))
ec5d31e3 10092 fw_reset = true;
0e0e3c53
KA
10093 else
10094 bnxt_remap_fw_health_regs(bp);
ec5d31e3 10095
3bc7d4a3
MC
10096 if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state) && !fw_reset) {
10097 netdev_err(bp->dev, "RESET_DONE not set during FW reset.\n");
20d7d1c5 10098 set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
3bc7d4a3
MC
10099 return -ENODEV;
10100 }
ec5d31e3
MC
10101 if (resc_reinit || fw_reset) {
10102 if (fw_reset) {
2924ad95 10103 set_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
f3a6d206
VV
10104 if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
10105 bnxt_ulp_stop(bp);
325f85f3
MC
10106 bnxt_free_ctx_mem(bp);
10107 kfree(bp->ctx);
10108 bp->ctx = NULL;
843d699d 10109 bnxt_dcb_free(bp);
ec5d31e3
MC
10110 rc = bnxt_fw_init_one(bp);
10111 if (rc) {
2924ad95 10112 clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
ec5d31e3
MC
10113 set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
10114 return rc;
10115 }
10116 bnxt_clear_int_mode(bp);
10117 rc = bnxt_init_int_mode(bp);
10118 if (rc) {
2924ad95 10119 clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
ec5d31e3
MC
10120 netdev_err(bp->dev, "init int mode failed\n");
10121 return rc;
10122 }
ec5d31e3 10123 }
d900aadd 10124 rc = bnxt_cancel_reservations(bp, fw_reset);
25e1acd6 10125 }
15a7deb8 10126 return rc;
25e1acd6
MC
10127}
10128
5ad2cbee
MC
10129static int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
10130{
bbf33d1d
EP
10131 struct hwrm_port_led_qcaps_output *resp;
10132 struct hwrm_port_led_qcaps_input *req;
5ad2cbee
MC
10133 struct bnxt_pf_info *pf = &bp->pf;
10134 int rc;
10135
ba642ab7 10136 bp->num_leds = 0;
5ad2cbee
MC
10137 if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10601)
10138 return 0;
10139
bbf33d1d
EP
10140 rc = hwrm_req_init(bp, req, HWRM_PORT_LED_QCAPS);
10141 if (rc)
10142 return rc;
10143
10144 req->port_id = cpu_to_le16(pf->port_id);
10145 resp = hwrm_req_hold(bp, req);
10146 rc = hwrm_req_send(bp, req);
5ad2cbee 10147 if (rc) {
bbf33d1d 10148 hwrm_req_drop(bp, req);
5ad2cbee
MC
10149 return rc;
10150 }
10151 if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) {
10152 int i;
10153
10154 bp->num_leds = resp->num_leds;
10155 memcpy(bp->leds, &resp->led0_id, sizeof(bp->leds[0]) *
10156 bp->num_leds);
10157 for (i = 0; i < bp->num_leds; i++) {
10158 struct bnxt_led_info *led = &bp->leds[i];
10159 __le16 caps = led->led_state_caps;
10160
10161 if (!led->led_group_id ||
10162 !BNXT_LED_ALT_BLINK_CAP(caps)) {
10163 bp->num_leds = 0;
10164 break;
10165 }
10166 }
10167 }
bbf33d1d 10168 hwrm_req_drop(bp, req);
5ad2cbee
MC
10169 return 0;
10170}
10171
5282db6c
MC
10172int bnxt_hwrm_alloc_wol_fltr(struct bnxt *bp)
10173{
bbf33d1d
EP
10174 struct hwrm_wol_filter_alloc_output *resp;
10175 struct hwrm_wol_filter_alloc_input *req;
5282db6c
MC
10176 int rc;
10177
bbf33d1d
EP
10178 rc = hwrm_req_init(bp, req, HWRM_WOL_FILTER_ALLOC);
10179 if (rc)
10180 return rc;
10181
10182 req->port_id = cpu_to_le16(bp->pf.port_id);
10183 req->wol_type = WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT;
10184 req->enables = cpu_to_le32(WOL_FILTER_ALLOC_REQ_ENABLES_MAC_ADDRESS);
10185 memcpy(req->mac_address, bp->dev->dev_addr, ETH_ALEN);
10186
10187 resp = hwrm_req_hold(bp, req);
10188 rc = hwrm_req_send(bp, req);
5282db6c
MC
10189 if (!rc)
10190 bp->wol_filter_id = resp->wol_filter_id;
bbf33d1d 10191 hwrm_req_drop(bp, req);
5282db6c
MC
10192 return rc;
10193}
10194
10195int bnxt_hwrm_free_wol_fltr(struct bnxt *bp)
10196{
bbf33d1d
EP
10197 struct hwrm_wol_filter_free_input *req;
10198 int rc;
5282db6c 10199
bbf33d1d
EP
10200 rc = hwrm_req_init(bp, req, HWRM_WOL_FILTER_FREE);
10201 if (rc)
10202 return rc;
10203
10204 req->port_id = cpu_to_le16(bp->pf.port_id);
10205 req->enables = cpu_to_le32(WOL_FILTER_FREE_REQ_ENABLES_WOL_FILTER_ID);
10206 req->wol_filter_id = bp->wol_filter_id;
10207
10208 return hwrm_req_send(bp, req);
5282db6c
MC
10209}
10210
c1ef146a
MC
10211static u16 bnxt_hwrm_get_wol_fltrs(struct bnxt *bp, u16 handle)
10212{
bbf33d1d
EP
10213 struct hwrm_wol_filter_qcfg_output *resp;
10214 struct hwrm_wol_filter_qcfg_input *req;
c1ef146a
MC
10215 u16 next_handle = 0;
10216 int rc;
10217
bbf33d1d
EP
10218 rc = hwrm_req_init(bp, req, HWRM_WOL_FILTER_QCFG);
10219 if (rc)
10220 return rc;
10221
10222 req->port_id = cpu_to_le16(bp->pf.port_id);
10223 req->handle = cpu_to_le16(handle);
10224 resp = hwrm_req_hold(bp, req);
10225 rc = hwrm_req_send(bp, req);
c1ef146a
MC
10226 if (!rc) {
10227 next_handle = le16_to_cpu(resp->next_handle);
10228 if (next_handle != 0) {
10229 if (resp->wol_type ==
10230 WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT) {
10231 bp->wol = 1;
10232 bp->wol_filter_id = resp->wol_filter_id;
10233 }
10234 }
10235 }
bbf33d1d 10236 hwrm_req_drop(bp, req);
c1ef146a
MC
10237 return next_handle;
10238}
10239
10240static void bnxt_get_wol_settings(struct bnxt *bp)
10241{
10242 u16 handle = 0;
10243
ba642ab7 10244 bp->wol = 0;
c1ef146a
MC
10245 if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_WOL_CAP))
10246 return;
10247
10248 do {
10249 handle = bnxt_hwrm_get_wol_fltrs(bp, handle);
10250 } while (handle && handle != 0xffff);
10251}
10252
cde49a42
VV
10253#ifdef CONFIG_BNXT_HWMON
10254static ssize_t bnxt_show_temp(struct device *dev,
10255 struct device_attribute *devattr, char *buf)
10256{
cde49a42 10257 struct hwrm_temp_monitor_query_output *resp;
bbf33d1d 10258 struct hwrm_temp_monitor_query_input *req;
cde49a42 10259 struct bnxt *bp = dev_get_drvdata(dev);
12cce90b 10260 u32 len = 0;
d69753fa 10261 int rc;
cde49a42 10262
bbf33d1d
EP
10263 rc = hwrm_req_init(bp, req, HWRM_TEMP_MONITOR_QUERY);
10264 if (rc)
10265 return rc;
10266 resp = hwrm_req_hold(bp, req);
10267 rc = hwrm_req_send(bp, req);
d69753fa 10268 if (!rc)
12cce90b 10269 len = sprintf(buf, "%u\n", resp->temp * 1000); /* display millidegree */
bbf33d1d 10270 hwrm_req_drop(bp, req);
27537929
DC
10271 if (rc)
10272 return rc;
10273 return len;
cde49a42
VV
10274}
10275static SENSOR_DEVICE_ATTR(temp1_input, 0444, bnxt_show_temp, NULL, 0);
10276
10277static struct attribute *bnxt_attrs[] = {
10278 &sensor_dev_attr_temp1_input.dev_attr.attr,
10279 NULL
10280};
10281ATTRIBUTE_GROUPS(bnxt);
10282
10283static void bnxt_hwmon_close(struct bnxt *bp)
10284{
10285 if (bp->hwmon_dev) {
10286 hwmon_device_unregister(bp->hwmon_dev);
10287 bp->hwmon_dev = NULL;
10288 }
10289}
10290
10291static void bnxt_hwmon_open(struct bnxt *bp)
10292{
bbf33d1d 10293 struct hwrm_temp_monitor_query_input *req;
cde49a42 10294 struct pci_dev *pdev = bp->pdev;
d69753fa
EP
10295 int rc;
10296
bbf33d1d
EP
10297 rc = hwrm_req_init(bp, req, HWRM_TEMP_MONITOR_QUERY);
10298 if (!rc)
10299 rc = hwrm_req_send_silent(bp, req);
d69753fa
EP
10300 if (rc == -EACCES || rc == -EOPNOTSUPP) {
10301 bnxt_hwmon_close(bp);
10302 return;
10303 }
cde49a42 10304
ba642ab7
MC
10305 if (bp->hwmon_dev)
10306 return;
10307
cde49a42
VV
10308 bp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev,
10309 DRV_MODULE_NAME, bp,
10310 bnxt_groups);
10311 if (IS_ERR(bp->hwmon_dev)) {
10312 bp->hwmon_dev = NULL;
10313 dev_warn(&pdev->dev, "Cannot register hwmon device\n");
10314 }
10315}
10316#else
10317static void bnxt_hwmon_close(struct bnxt *bp)
10318{
10319}
10320
10321static void bnxt_hwmon_open(struct bnxt *bp)
10322{
10323}
10324#endif
10325
939f7f0c
MC
10326static bool bnxt_eee_config_ok(struct bnxt *bp)
10327{
10328 struct ethtool_eee *eee = &bp->eee;
10329 struct bnxt_link_info *link_info = &bp->link_info;
10330
b0d28207 10331 if (!(bp->phy_flags & BNXT_PHY_FL_EEE_CAP))
939f7f0c
MC
10332 return true;
10333
10334 if (eee->eee_enabled) {
10335 u32 advertising =
10336 _bnxt_fw_to_ethtool_adv_spds(link_info->advertising, 0);
10337
10338 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
10339 eee->eee_enabled = 0;
10340 return false;
10341 }
10342 if (eee->advertised & ~advertising) {
10343 eee->advertised = advertising & eee->supported;
10344 return false;
10345 }
10346 }
10347 return true;
10348}
10349
c0c050c5
MC
10350static int bnxt_update_phy_setting(struct bnxt *bp)
10351{
10352 int rc;
10353 bool update_link = false;
10354 bool update_pause = false;
939f7f0c 10355 bool update_eee = false;
c0c050c5
MC
10356 struct bnxt_link_info *link_info = &bp->link_info;
10357
10358 rc = bnxt_update_link(bp, true);
10359 if (rc) {
10360 netdev_err(bp->dev, "failed to update link (rc: %x)\n",
10361 rc);
10362 return rc;
10363 }
33dac24a
MC
10364 if (!BNXT_SINGLE_PF(bp))
10365 return 0;
10366
c0c050c5 10367 if ((link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
c9ee9516
MC
10368 (link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) !=
10369 link_info->req_flow_ctrl)
c0c050c5
MC
10370 update_pause = true;
10371 if (!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
10372 link_info->force_pause_setting != link_info->req_flow_ctrl)
10373 update_pause = true;
c0c050c5
MC
10374 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
10375 if (BNXT_AUTO_MODE(link_info->auto_mode))
10376 update_link = true;
d058426e
EP
10377 if (link_info->req_signal_mode == BNXT_SIG_MODE_NRZ &&
10378 link_info->req_link_speed != link_info->force_link_speed)
10379 update_link = true;
10380 else if (link_info->req_signal_mode == BNXT_SIG_MODE_PAM4 &&
10381 link_info->req_link_speed != link_info->force_pam4_link_speed)
c0c050c5 10382 update_link = true;
de73018f
MC
10383 if (link_info->req_duplex != link_info->duplex_setting)
10384 update_link = true;
c0c050c5
MC
10385 } else {
10386 if (link_info->auto_mode == BNXT_LINK_AUTO_NONE)
10387 update_link = true;
d058426e
EP
10388 if (link_info->advertising != link_info->auto_link_speeds ||
10389 link_info->advertising_pam4 != link_info->auto_pam4_link_speeds)
c0c050c5 10390 update_link = true;
c0c050c5
MC
10391 }
10392
16d663a6
MC
10393 /* The last close may have shutdown the link, so need to call
10394 * PHY_CFG to bring it back up.
10395 */
0f5a4841 10396 if (!BNXT_LINK_IS_UP(bp))
16d663a6
MC
10397 update_link = true;
10398
939f7f0c
MC
10399 if (!bnxt_eee_config_ok(bp))
10400 update_eee = true;
10401
c0c050c5 10402 if (update_link)
939f7f0c 10403 rc = bnxt_hwrm_set_link_setting(bp, update_pause, update_eee);
c0c050c5
MC
10404 else if (update_pause)
10405 rc = bnxt_hwrm_set_pause(bp);
10406 if (rc) {
10407 netdev_err(bp->dev, "failed to update phy setting (rc: %x)\n",
10408 rc);
10409 return rc;
10410 }
10411
10412 return rc;
10413}
10414
11809490
JH
10415/* Common routine to pre-map certain register block to different GRC window.
10416 * A PF has 16 4K windows and a VF has 4 4K windows. However, only 15 windows
10417 * in PF and 3 windows in VF that can be customized to map in different
10418 * register blocks.
10419 */
10420static void bnxt_preset_reg_win(struct bnxt *bp)
10421{
10422 if (BNXT_PF(bp)) {
10423 /* CAG registers map to GRC window #4 */
10424 writel(BNXT_CAG_REG_BASE,
10425 bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 12);
10426 }
10427}
10428
47558acd
MC
10429static int bnxt_init_dflt_ring_mode(struct bnxt *bp);
10430
6882c36c
EP
10431static int bnxt_reinit_after_abort(struct bnxt *bp)
10432{
10433 int rc;
10434
10435 if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
10436 return -EBUSY;
10437
d20cd745
VV
10438 if (bp->dev->reg_state == NETREG_UNREGISTERED)
10439 return -ENODEV;
10440
6882c36c
EP
10441 rc = bnxt_fw_init_one(bp);
10442 if (!rc) {
10443 bnxt_clear_int_mode(bp);
10444 rc = bnxt_init_int_mode(bp);
10445 if (!rc) {
10446 clear_bit(BNXT_STATE_ABORT_ERR, &bp->state);
10447 set_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
10448 }
10449 }
10450 return rc;
10451}
10452
c0c050c5
MC
10453static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
10454{
10455 int rc = 0;
10456
11809490 10457 bnxt_preset_reg_win(bp);
c0c050c5
MC
10458 netif_carrier_off(bp->dev);
10459 if (irq_re_init) {
47558acd
MC
10460 /* Reserve rings now if none were reserved at driver probe. */
10461 rc = bnxt_init_dflt_ring_mode(bp);
10462 if (rc) {
10463 netdev_err(bp->dev, "Failed to reserve default rings at open\n");
10464 return rc;
10465 }
c0c050c5 10466 }
1b3f0b75 10467 rc = bnxt_reserve_rings(bp, irq_re_init);
41e8d798
MC
10468 if (rc)
10469 return rc;
c0c050c5
MC
10470 if ((bp->flags & BNXT_FLAG_RFS) &&
10471 !(bp->flags & BNXT_FLAG_USING_MSIX)) {
10472 /* disable RFS if falling back to INTA */
10473 bp->dev->hw_features &= ~NETIF_F_NTUPLE;
10474 bp->flags &= ~BNXT_FLAG_RFS;
10475 }
10476
10477 rc = bnxt_alloc_mem(bp, irq_re_init);
10478 if (rc) {
10479 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
10480 goto open_err_free_mem;
10481 }
10482
10483 if (irq_re_init) {
10484 bnxt_init_napi(bp);
10485 rc = bnxt_request_irq(bp);
10486 if (rc) {
10487 netdev_err(bp->dev, "bnxt_request_irq err: %x\n", rc);
c58387ab 10488 goto open_err_irq;
c0c050c5
MC
10489 }
10490 }
10491
c0c050c5
MC
10492 rc = bnxt_init_nic(bp, irq_re_init);
10493 if (rc) {
10494 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
96ecdcc9 10495 goto open_err_irq;
c0c050c5
MC
10496 }
10497
96ecdcc9
JK
10498 bnxt_enable_napi(bp);
10499 bnxt_debug_dev_init(bp);
10500
c0c050c5 10501 if (link_re_init) {
e2dc9b6e 10502 mutex_lock(&bp->link_lock);
c0c050c5 10503 rc = bnxt_update_phy_setting(bp);
e2dc9b6e 10504 mutex_unlock(&bp->link_lock);
a1ef4a79 10505 if (rc) {
ba41d46f 10506 netdev_warn(bp->dev, "failed to update phy settings\n");
a1ef4a79
MC
10507 if (BNXT_SINGLE_PF(bp)) {
10508 bp->link_info.phy_retry = true;
10509 bp->link_info.phy_retry_expires =
10510 jiffies + 5 * HZ;
10511 }
10512 }
c0c050c5
MC
10513 }
10514
7cdd5fc3 10515 if (irq_re_init)
442a35a5 10516 udp_tunnel_nic_reset_ntf(bp->dev);
c0c050c5 10517
4f81def2
PC
10518 if (bp->tx_nr_rings_xdp < num_possible_cpus()) {
10519 if (!static_key_enabled(&bnxt_xdp_locking_key))
10520 static_branch_enable(&bnxt_xdp_locking_key);
10521 } else if (static_key_enabled(&bnxt_xdp_locking_key)) {
10522 static_branch_disable(&bnxt_xdp_locking_key);
10523 }
caefe526 10524 set_bit(BNXT_STATE_OPEN, &bp->state);
c0c050c5
MC
10525 bnxt_enable_int(bp);
10526 /* Enable TX queues */
10527 bnxt_tx_enable(bp);
10528 mod_timer(&bp->timer, jiffies + bp->current_interval);
10289bec 10529 /* Poll link status and check for SFP+ module status */
3c10ed49 10530 mutex_lock(&bp->link_lock);
10289bec 10531 bnxt_get_port_module_status(bp);
3c10ed49 10532 mutex_unlock(&bp->link_lock);
c0c050c5 10533
ee5c7fb3
SP
10534 /* VF-reps may need to be re-opened after the PF is re-opened */
10535 if (BNXT_PF(bp))
10536 bnxt_vf_reps_open(bp);
24ac1ecd 10537 bnxt_ptp_init_rtc(bp, true);
11862689 10538 bnxt_ptp_cfg_tstamp_filters(bp);
c0c050c5
MC
10539 return 0;
10540
c58387ab 10541open_err_irq:
c0c050c5
MC
10542 bnxt_del_napi(bp);
10543
10544open_err_free_mem:
10545 bnxt_free_skbs(bp);
10546 bnxt_free_irq(bp);
10547 bnxt_free_mem(bp, true);
10548 return rc;
10549}
10550
10551/* rtnl_lock held */
10552int bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
10553{
10554 int rc = 0;
10555
a1301f08
MC
10556 if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state))
10557 rc = -EIO;
10558 if (!rc)
10559 rc = __bnxt_open_nic(bp, irq_re_init, link_re_init);
c0c050c5
MC
10560 if (rc) {
10561 netdev_err(bp->dev, "nic open fail (rc: %x)\n", rc);
10562 dev_close(bp->dev);
10563 }
10564 return rc;
10565}
10566
f7dc1ea6
MC
10567/* rtnl_lock held, open the NIC half way by allocating all resources, but
10568 * NAPI, IRQ, and TX are not enabled. This is mainly used for offline
10569 * self tests.
10570 */
10571int bnxt_half_open_nic(struct bnxt *bp)
10572{
10573 int rc = 0;
10574
11a39259
SK
10575 if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
10576 netdev_err(bp->dev, "A previous firmware reset has not completed, aborting half open\n");
10577 rc = -ENODEV;
10578 goto half_open_err;
10579 }
10580
6758f937 10581 rc = bnxt_alloc_mem(bp, true);
f7dc1ea6
MC
10582 if (rc) {
10583 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
10584 goto half_open_err;
10585 }
cfcab3b3 10586 set_bit(BNXT_STATE_HALF_OPEN, &bp->state);
6758f937 10587 rc = bnxt_init_nic(bp, true);
f7dc1ea6 10588 if (rc) {
cfcab3b3 10589 clear_bit(BNXT_STATE_HALF_OPEN, &bp->state);
f7dc1ea6
MC
10590 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
10591 goto half_open_err;
10592 }
10593 return 0;
10594
10595half_open_err:
10596 bnxt_free_skbs(bp);
6758f937 10597 bnxt_free_mem(bp, true);
f7dc1ea6
MC
10598 dev_close(bp->dev);
10599 return rc;
10600}
10601
10602/* rtnl_lock held, this call can only be made after a previous successful
10603 * call to bnxt_half_open_nic().
10604 */
10605void bnxt_half_close_nic(struct bnxt *bp)
10606{
6758f937 10607 bnxt_hwrm_resource_free(bp, false, true);
f7dc1ea6 10608 bnxt_free_skbs(bp);
6758f937 10609 bnxt_free_mem(bp, true);
cfcab3b3 10610 clear_bit(BNXT_STATE_HALF_OPEN, &bp->state);
f7dc1ea6
MC
10611}
10612
228ea8c1 10613void bnxt_reenable_sriov(struct bnxt *bp)
c16d4ee0
MC
10614{
10615 if (BNXT_PF(bp)) {
10616 struct bnxt_pf_info *pf = &bp->pf;
10617 int n = pf->active_vfs;
10618
10619 if (n)
10620 bnxt_cfg_hw_sriov(bp, &n, true);
10621 }
10622}
10623
c0c050c5
MC
10624static int bnxt_open(struct net_device *dev)
10625{
10626 struct bnxt *bp = netdev_priv(dev);
25e1acd6 10627 int rc;
c0c050c5 10628
ec5d31e3 10629 if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
6882c36c
EP
10630 rc = bnxt_reinit_after_abort(bp);
10631 if (rc) {
10632 if (rc == -EBUSY)
10633 netdev_err(bp->dev, "A previous firmware reset has not completed, aborting\n");
10634 else
10635 netdev_err(bp->dev, "Failed to reinitialize after aborted firmware reset\n");
10636 return -ENODEV;
10637 }
ec5d31e3
MC
10638 }
10639
10640 rc = bnxt_hwrm_if_change(bp, true);
25e1acd6 10641 if (rc)
ec5d31e3 10642 return rc;
d7859afb 10643
ec5d31e3
MC
10644 rc = __bnxt_open_nic(bp, true, true);
10645 if (rc) {
25e1acd6 10646 bnxt_hwrm_if_change(bp, false);
ec5d31e3 10647 } else {
f3a6d206 10648 if (test_and_clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state)) {
12de2ead 10649 if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
f3a6d206 10650 bnxt_ulp_start(bp, 0);
12de2ead
MC
10651 bnxt_reenable_sriov(bp);
10652 }
ec5d31e3
MC
10653 }
10654 bnxt_hwmon_open(bp);
10655 }
cde49a42 10656
25e1acd6 10657 return rc;
c0c050c5
MC
10658}
10659
f9b76ebd
MC
10660static bool bnxt_drv_busy(struct bnxt *bp)
10661{
10662 return (test_bit(BNXT_STATE_IN_SP_TASK, &bp->state) ||
10663 test_bit(BNXT_STATE_READ_STATS, &bp->state));
10664}
10665
b8875ca3
MC
10666static void bnxt_get_ring_stats(struct bnxt *bp,
10667 struct rtnl_link_stats64 *stats);
10668
86e953db
MC
10669static void __bnxt_close_nic(struct bnxt *bp, bool irq_re_init,
10670 bool link_re_init)
c0c050c5 10671{
ee5c7fb3
SP
10672 /* Close the VF-reps before closing PF */
10673 if (BNXT_PF(bp))
10674 bnxt_vf_reps_close(bp);
86e953db 10675
c0c050c5
MC
10676 /* Change device state to avoid TX queue wake up's */
10677 bnxt_tx_disable(bp);
10678
caefe526 10679 clear_bit(BNXT_STATE_OPEN, &bp->state);
4cebdcec 10680 smp_mb__after_atomic();
f9b76ebd 10681 while (bnxt_drv_busy(bp))
4cebdcec 10682 msleep(20);
c0c050c5 10683
c909e7ca 10684 /* Flush rings and disable interrupts */
c0c050c5
MC
10685 bnxt_shutdown_nic(bp, irq_re_init);
10686
10687 /* TODO CHIMP_FW: Link/PHY related cleanup if (link_re_init) */
10688
cabfb09d 10689 bnxt_debug_dev_exit(bp);
c0c050c5 10690 bnxt_disable_napi(bp);
c0c050c5
MC
10691 del_timer_sync(&bp->timer);
10692 bnxt_free_skbs(bp);
10693
b8875ca3 10694 /* Save ring stats before shutdown */
4c70dbe3 10695 if (bp->bnapi && irq_re_init) {
b8875ca3 10696 bnxt_get_ring_stats(bp, &bp->net_stats_prev);
4c70dbe3
MC
10697 bnxt_get_ring_err_stats(bp, &bp->ring_err_stats_prev);
10698 }
c0c050c5
MC
10699 if (irq_re_init) {
10700 bnxt_free_irq(bp);
10701 bnxt_del_napi(bp);
10702 }
10703 bnxt_free_mem(bp, irq_re_init);
86e953db
MC
10704}
10705
10706int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
10707{
10708 int rc = 0;
10709
3bc7d4a3
MC
10710 if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
10711 /* If we get here, it means firmware reset is in progress
10712 * while we are trying to close. We can safely proceed with
10713 * the close because we are holding rtnl_lock(). Some firmware
10714 * messages may fail as we proceed to close. We set the
10715 * ABORT_ERR flag here so that the FW reset thread will later
10716 * abort when it gets the rtnl_lock() and sees the flag.
10717 */
10718 netdev_warn(bp->dev, "FW reset in progress during close, FW reset will be aborted\n");
10719 set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
10720 }
10721
86e953db
MC
10722#ifdef CONFIG_BNXT_SRIOV
10723 if (bp->sriov_cfg) {
10724 rc = wait_event_interruptible_timeout(bp->sriov_cfg_wait,
10725 !bp->sriov_cfg,
10726 BNXT_SRIOV_CFG_WAIT_TMO);
10727 if (rc)
10728 netdev_warn(bp->dev, "timeout waiting for SRIOV config operation to complete!\n");
10729 }
10730#endif
10731 __bnxt_close_nic(bp, irq_re_init, link_re_init);
c0c050c5
MC
10732 return rc;
10733}
10734
10735static int bnxt_close(struct net_device *dev)
10736{
10737 struct bnxt *bp = netdev_priv(dev);
10738
cde49a42 10739 bnxt_hwmon_close(bp);
c0c050c5 10740 bnxt_close_nic(bp, true, true);
33f7d55f 10741 bnxt_hwrm_shutdown_link(bp);
25e1acd6 10742 bnxt_hwrm_if_change(bp, false);
c0c050c5
MC
10743 return 0;
10744}
10745
0ca12be9
VV
10746static int bnxt_hwrm_port_phy_read(struct bnxt *bp, u16 phy_addr, u16 reg,
10747 u16 *val)
10748{
bbf33d1d
EP
10749 struct hwrm_port_phy_mdio_read_output *resp;
10750 struct hwrm_port_phy_mdio_read_input *req;
0ca12be9
VV
10751 int rc;
10752
10753 if (bp->hwrm_spec_code < 0x10a00)
10754 return -EOPNOTSUPP;
10755
bbf33d1d
EP
10756 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_MDIO_READ);
10757 if (rc)
10758 return rc;
10759
10760 req->port_id = cpu_to_le16(bp->pf.port_id);
10761 req->phy_addr = phy_addr;
10762 req->reg_addr = cpu_to_le16(reg & 0x1f);
2730214d 10763 if (mdio_phy_id_is_c45(phy_addr)) {
bbf33d1d
EP
10764 req->cl45_mdio = 1;
10765 req->phy_addr = mdio_phy_id_prtad(phy_addr);
10766 req->dev_addr = mdio_phy_id_devad(phy_addr);
10767 req->reg_addr = cpu_to_le16(reg);
0ca12be9
VV
10768 }
10769
bbf33d1d
EP
10770 resp = hwrm_req_hold(bp, req);
10771 rc = hwrm_req_send(bp, req);
0ca12be9
VV
10772 if (!rc)
10773 *val = le16_to_cpu(resp->reg_data);
bbf33d1d 10774 hwrm_req_drop(bp, req);
0ca12be9
VV
10775 return rc;
10776}
10777
10778static int bnxt_hwrm_port_phy_write(struct bnxt *bp, u16 phy_addr, u16 reg,
10779 u16 val)
10780{
bbf33d1d
EP
10781 struct hwrm_port_phy_mdio_write_input *req;
10782 int rc;
0ca12be9
VV
10783
10784 if (bp->hwrm_spec_code < 0x10a00)
10785 return -EOPNOTSUPP;
10786
bbf33d1d
EP
10787 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_MDIO_WRITE);
10788 if (rc)
10789 return rc;
10790
10791 req->port_id = cpu_to_le16(bp->pf.port_id);
10792 req->phy_addr = phy_addr;
10793 req->reg_addr = cpu_to_le16(reg & 0x1f);
2730214d 10794 if (mdio_phy_id_is_c45(phy_addr)) {
bbf33d1d
EP
10795 req->cl45_mdio = 1;
10796 req->phy_addr = mdio_phy_id_prtad(phy_addr);
10797 req->dev_addr = mdio_phy_id_devad(phy_addr);
10798 req->reg_addr = cpu_to_le16(reg);
0ca12be9 10799 }
bbf33d1d 10800 req->reg_data = cpu_to_le16(val);
0ca12be9 10801
bbf33d1d 10802 return hwrm_req_send(bp, req);
0ca12be9
VV
10803}
10804
c0c050c5
MC
10805/* rtnl_lock held */
10806static int bnxt_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
10807{
0ca12be9
VV
10808 struct mii_ioctl_data *mdio = if_mii(ifr);
10809 struct bnxt *bp = netdev_priv(dev);
10810 int rc;
10811
c0c050c5
MC
10812 switch (cmd) {
10813 case SIOCGMIIPHY:
0ca12be9
VV
10814 mdio->phy_id = bp->link_info.phy_addr;
10815
df561f66 10816 fallthrough;
c0c050c5 10817 case SIOCGMIIREG: {
0ca12be9
VV
10818 u16 mii_regval = 0;
10819
c0c050c5
MC
10820 if (!netif_running(dev))
10821 return -EAGAIN;
10822
0ca12be9
VV
10823 rc = bnxt_hwrm_port_phy_read(bp, mdio->phy_id, mdio->reg_num,
10824 &mii_regval);
10825 mdio->val_out = mii_regval;
10826 return rc;
c0c050c5
MC
10827 }
10828
10829 case SIOCSMIIREG:
10830 if (!netif_running(dev))
10831 return -EAGAIN;
10832
0ca12be9
VV
10833 return bnxt_hwrm_port_phy_write(bp, mdio->phy_id, mdio->reg_num,
10834 mdio->val_in);
c0c050c5 10835
118612d5
MC
10836 case SIOCSHWTSTAMP:
10837 return bnxt_hwtstamp_set(dev, ifr);
10838
10839 case SIOCGHWTSTAMP:
10840 return bnxt_hwtstamp_get(dev, ifr);
10841
c0c050c5
MC
10842 default:
10843 /* do nothing */
10844 break;
10845 }
10846 return -EOPNOTSUPP;
10847}
10848
b8875ca3
MC
10849static void bnxt_get_ring_stats(struct bnxt *bp,
10850 struct rtnl_link_stats64 *stats)
c0c050c5 10851{
b8875ca3 10852 int i;
c0c050c5 10853
c0c050c5
MC
10854 for (i = 0; i < bp->cp_nr_rings; i++) {
10855 struct bnxt_napi *bnapi = bp->bnapi[i];
10856 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
a0c30621 10857 u64 *sw = cpr->stats.sw_stats;
c0c050c5 10858
a0c30621
MC
10859 stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_ucast_pkts);
10860 stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_mcast_pkts);
10861 stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_bcast_pkts);
c0c050c5 10862
a0c30621
MC
10863 stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_ucast_pkts);
10864 stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_mcast_pkts);
10865 stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_bcast_pkts);
c0c050c5 10866
a0c30621
MC
10867 stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_ucast_bytes);
10868 stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_mcast_bytes);
10869 stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_bcast_bytes);
c0c050c5 10870
a0c30621
MC
10871 stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_ucast_bytes);
10872 stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_mcast_bytes);
10873 stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_bcast_bytes);
c0c050c5
MC
10874
10875 stats->rx_missed_errors +=
a0c30621 10876 BNXT_GET_RING_STATS64(sw, rx_discard_pkts);
c0c050c5 10877
a0c30621 10878 stats->multicast += BNXT_GET_RING_STATS64(sw, rx_mcast_pkts);
c0c050c5 10879
a0c30621 10880 stats->tx_dropped += BNXT_GET_RING_STATS64(sw, tx_error_pkts);
40bedf7c 10881
907fd4a2
JK
10882 stats->rx_dropped +=
10883 cpr->sw_stats.rx.rx_netpoll_discards +
10884 cpr->sw_stats.rx.rx_oom_discards;
c0c050c5 10885 }
b8875ca3
MC
10886}
10887
10888static void bnxt_add_prev_stats(struct bnxt *bp,
10889 struct rtnl_link_stats64 *stats)
10890{
10891 struct rtnl_link_stats64 *prev_stats = &bp->net_stats_prev;
10892
10893 stats->rx_packets += prev_stats->rx_packets;
10894 stats->tx_packets += prev_stats->tx_packets;
10895 stats->rx_bytes += prev_stats->rx_bytes;
10896 stats->tx_bytes += prev_stats->tx_bytes;
10897 stats->rx_missed_errors += prev_stats->rx_missed_errors;
10898 stats->multicast += prev_stats->multicast;
40bedf7c 10899 stats->rx_dropped += prev_stats->rx_dropped;
b8875ca3
MC
10900 stats->tx_dropped += prev_stats->tx_dropped;
10901}
10902
10903static void
10904bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
10905{
10906 struct bnxt *bp = netdev_priv(dev);
10907
10908 set_bit(BNXT_STATE_READ_STATS, &bp->state);
10909 /* Make sure bnxt_close_nic() sees that we are reading stats before
10910 * we check the BNXT_STATE_OPEN flag.
10911 */
10912 smp_mb__after_atomic();
10913 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
10914 clear_bit(BNXT_STATE_READ_STATS, &bp->state);
10915 *stats = bp->net_stats_prev;
10916 return;
10917 }
10918
10919 bnxt_get_ring_stats(bp, stats);
10920 bnxt_add_prev_stats(bp, stats);
c0c050c5 10921
9947f83f 10922 if (bp->flags & BNXT_FLAG_PORT_STATS) {
a0c30621
MC
10923 u64 *rx = bp->port_stats.sw_stats;
10924 u64 *tx = bp->port_stats.sw_stats +
10925 BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
10926
10927 stats->rx_crc_errors =
10928 BNXT_GET_RX_PORT_STATS64(rx, rx_fcs_err_frames);
10929 stats->rx_frame_errors =
10930 BNXT_GET_RX_PORT_STATS64(rx, rx_align_err_frames);
10931 stats->rx_length_errors =
10932 BNXT_GET_RX_PORT_STATS64(rx, rx_undrsz_frames) +
10933 BNXT_GET_RX_PORT_STATS64(rx, rx_ovrsz_frames) +
10934 BNXT_GET_RX_PORT_STATS64(rx, rx_runt_frames);
10935 stats->rx_errors =
10936 BNXT_GET_RX_PORT_STATS64(rx, rx_false_carrier_frames) +
10937 BNXT_GET_RX_PORT_STATS64(rx, rx_jbr_frames);
10938 stats->collisions =
10939 BNXT_GET_TX_PORT_STATS64(tx, tx_total_collisions);
10940 stats->tx_fifo_errors =
10941 BNXT_GET_TX_PORT_STATS64(tx, tx_fifo_underruns);
10942 stats->tx_errors = BNXT_GET_TX_PORT_STATS64(tx, tx_err);
9947f83f 10943 }
f9b76ebd 10944 clear_bit(BNXT_STATE_READ_STATS, &bp->state);
c0c050c5
MC
10945}
10946
4c70dbe3
MC
10947static void bnxt_get_one_ring_err_stats(struct bnxt *bp,
10948 struct bnxt_total_ring_err_stats *stats,
10949 struct bnxt_cp_ring_info *cpr)
10950{
10951 struct bnxt_sw_stats *sw_stats = &cpr->sw_stats;
10952 u64 *hw_stats = cpr->stats.sw_stats;
10953
10954 stats->rx_total_l4_csum_errors += sw_stats->rx.rx_l4_csum_errors;
10955 stats->rx_total_resets += sw_stats->rx.rx_resets;
10956 stats->rx_total_buf_errors += sw_stats->rx.rx_buf_errors;
10957 stats->rx_total_oom_discards += sw_stats->rx.rx_oom_discards;
10958 stats->rx_total_netpoll_discards += sw_stats->rx.rx_netpoll_discards;
10959 stats->rx_total_ring_discards +=
10960 BNXT_GET_RING_STATS64(hw_stats, rx_discard_pkts);
8becd196 10961 stats->tx_total_resets += sw_stats->tx.tx_resets;
4c70dbe3
MC
10962 stats->tx_total_ring_discards +=
10963 BNXT_GET_RING_STATS64(hw_stats, tx_discard_pkts);
10964 stats->total_missed_irqs += sw_stats->cmn.missed_irqs;
10965}
10966
10967void bnxt_get_ring_err_stats(struct bnxt *bp,
10968 struct bnxt_total_ring_err_stats *stats)
10969{
10970 int i;
10971
10972 for (i = 0; i < bp->cp_nr_rings; i++)
10973 bnxt_get_one_ring_err_stats(bp, stats, &bp->bnapi[i]->cp_ring);
10974}
10975
c0c050c5
MC
10976static bool bnxt_mc_list_updated(struct bnxt *bp, u32 *rx_mask)
10977{
10978 struct net_device *dev = bp->dev;
10979 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
10980 struct netdev_hw_addr *ha;
10981 u8 *haddr;
10982 int mc_count = 0;
10983 bool update = false;
10984 int off = 0;
10985
10986 netdev_for_each_mc_addr(ha, dev) {
10987 if (mc_count >= BNXT_MAX_MC_ADDRS) {
10988 *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
10989 vnic->mc_list_count = 0;
10990 return false;
10991 }
10992 haddr = ha->addr;
10993 if (!ether_addr_equal(haddr, vnic->mc_list + off)) {
10994 memcpy(vnic->mc_list + off, haddr, ETH_ALEN);
10995 update = true;
10996 }
10997 off += ETH_ALEN;
10998 mc_count++;
10999 }
11000 if (mc_count)
11001 *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_MCAST;
11002
11003 if (mc_count != vnic->mc_list_count) {
11004 vnic->mc_list_count = mc_count;
11005 update = true;
11006 }
11007 return update;
11008}
11009
11010static bool bnxt_uc_list_updated(struct bnxt *bp)
11011{
11012 struct net_device *dev = bp->dev;
11013 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
11014 struct netdev_hw_addr *ha;
11015 int off = 0;
11016
11017 if (netdev_uc_count(dev) != (vnic->uc_filter_count - 1))
11018 return true;
11019
11020 netdev_for_each_uc_addr(ha, dev) {
11021 if (!ether_addr_equal(ha->addr, vnic->uc_list + off))
11022 return true;
11023
11024 off += ETH_ALEN;
11025 }
11026 return false;
11027}
11028
11029static void bnxt_set_rx_mode(struct net_device *dev)
11030{
11031 struct bnxt *bp = netdev_priv(dev);
268d0895 11032 struct bnxt_vnic_info *vnic;
c0c050c5
MC
11033 bool mc_update = false;
11034 bool uc_update;
268d0895 11035 u32 mask;
c0c050c5 11036
268d0895 11037 if (!test_bit(BNXT_STATE_OPEN, &bp->state))
c0c050c5
MC
11038 return;
11039
268d0895
MC
11040 vnic = &bp->vnic_info[0];
11041 mask = vnic->rx_mask;
c0c050c5
MC
11042 mask &= ~(CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS |
11043 CFA_L2_SET_RX_MASK_REQ_MASK_MCAST |
30e33848
MC
11044 CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST |
11045 CFA_L2_SET_RX_MASK_REQ_MASK_BCAST);
c0c050c5 11046
dd85fc0a 11047 if (dev->flags & IFF_PROMISC)
c0c050c5
MC
11048 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
11049
11050 uc_update = bnxt_uc_list_updated(bp);
11051
30e33848
MC
11052 if (dev->flags & IFF_BROADCAST)
11053 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
c0c050c5
MC
11054 if (dev->flags & IFF_ALLMULTI) {
11055 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
11056 vnic->mc_list_count = 0;
8cdb1592 11057 } else if (dev->flags & IFF_MULTICAST) {
c0c050c5
MC
11058 mc_update = bnxt_mc_list_updated(bp, &mask);
11059 }
11060
11061 if (mask != vnic->rx_mask || uc_update || mc_update) {
11062 vnic->rx_mask = mask;
11063
9b1a00fd 11064 bnxt_queue_sp_work(bp, BNXT_RX_MASK_SP_EVENT);
c0c050c5
MC
11065 }
11066}
11067
b664f008 11068static int bnxt_cfg_rx_mode(struct bnxt *bp)
c0c050c5
MC
11069{
11070 struct net_device *dev = bp->dev;
11071 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
bbf33d1d 11072 struct hwrm_cfa_l2_filter_free_input *req;
c0c050c5
MC
11073 struct netdev_hw_addr *ha;
11074 int i, off = 0, rc;
11075 bool uc_update;
11076
11077 netif_addr_lock_bh(dev);
11078 uc_update = bnxt_uc_list_updated(bp);
11079 netif_addr_unlock_bh(dev);
11080
11081 if (!uc_update)
11082 goto skip_uc;
11083
bbf33d1d
EP
11084 rc = hwrm_req_init(bp, req, HWRM_CFA_L2_FILTER_FREE);
11085 if (rc)
11086 return rc;
11087 hwrm_req_hold(bp, req);
c0c050c5 11088 for (i = 1; i < vnic->uc_filter_count; i++) {
bbf33d1d 11089 req->l2_filter_id = vnic->fw_l2_filter_id[i];
c0c050c5 11090
bbf33d1d 11091 rc = hwrm_req_send(bp, req);
c0c050c5 11092 }
bbf33d1d 11093 hwrm_req_drop(bp, req);
c0c050c5
MC
11094
11095 vnic->uc_filter_count = 1;
11096
11097 netif_addr_lock_bh(dev);
11098 if (netdev_uc_count(dev) > (BNXT_MAX_UC_ADDRS - 1)) {
11099 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
11100 } else {
11101 netdev_for_each_uc_addr(ha, dev) {
11102 memcpy(vnic->uc_list + off, ha->addr, ETH_ALEN);
11103 off += ETH_ALEN;
11104 vnic->uc_filter_count++;
11105 }
11106 }
11107 netif_addr_unlock_bh(dev);
11108
11109 for (i = 1, off = 0; i < vnic->uc_filter_count; i++, off += ETH_ALEN) {
11110 rc = bnxt_hwrm_set_vnic_filter(bp, 0, i, vnic->uc_list + off);
11111 if (rc) {
662c9b22
EP
11112 if (BNXT_VF(bp) && rc == -ENODEV) {
11113 if (!test_and_set_bit(BNXT_STATE_L2_FILTER_RETRY, &bp->state))
11114 netdev_warn(bp->dev, "Cannot configure L2 filters while PF is unavailable, will retry\n");
11115 else
11116 netdev_dbg(bp->dev, "PF still unavailable while configuring L2 filters.\n");
11117 rc = 0;
11118 } else {
11119 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc);
11120 }
c0c050c5 11121 vnic->uc_filter_count = i;
b664f008 11122 return rc;
c0c050c5
MC
11123 }
11124 }
662c9b22
EP
11125 if (test_and_clear_bit(BNXT_STATE_L2_FILTER_RETRY, &bp->state))
11126 netdev_notice(bp->dev, "Retry of L2 filter configuration successful.\n");
c0c050c5
MC
11127
11128skip_uc:
dd85fc0a
EP
11129 if ((vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS) &&
11130 !bnxt_promisc_ok(bp))
11131 vnic->rx_mask &= ~CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
c0c050c5 11132 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
8cdb1592 11133 if (rc && (vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_MCAST)) {
b4e30e8e
MC
11134 netdev_info(bp->dev, "Failed setting MC filters rc: %d, turning on ALL_MCAST mode\n",
11135 rc);
8cdb1592 11136 vnic->rx_mask &= ~CFA_L2_SET_RX_MASK_REQ_MASK_MCAST;
b4e30e8e
MC
11137 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
11138 vnic->mc_list_count = 0;
11139 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
11140 }
c0c050c5 11141 if (rc)
b4e30e8e 11142 netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %d\n",
c0c050c5 11143 rc);
b664f008
MC
11144
11145 return rc;
c0c050c5
MC
11146}
11147
2773dfb2
MC
11148static bool bnxt_can_reserve_rings(struct bnxt *bp)
11149{
11150#ifdef CONFIG_BNXT_SRIOV
f1ca94de 11151 if (BNXT_NEW_RM(bp) && BNXT_VF(bp)) {
2773dfb2
MC
11152 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
11153
11154 /* No minimum rings were provisioned by the PF. Don't
11155 * reserve rings by default when device is down.
11156 */
11157 if (hw_resc->min_tx_rings || hw_resc->resv_tx_rings)
11158 return true;
11159
11160 if (!netif_running(bp->dev))
11161 return false;
11162 }
11163#endif
11164 return true;
11165}
11166
8079e8f1
MC
11167/* If the chip and firmware supports RFS */
11168static bool bnxt_rfs_supported(struct bnxt *bp)
11169{
e969ae5b 11170 if (bp->flags & BNXT_FLAG_CHIP_P5) {
41136ab3 11171 if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2)
e969ae5b 11172 return true;
41e8d798 11173 return false;
e969ae5b 11174 }
976e52b7
MC
11175 /* 212 firmware is broken for aRFS */
11176 if (BNXT_FW_MAJ(bp) == 212)
11177 return false;
8079e8f1
MC
11178 if (BNXT_PF(bp) && !BNXT_CHIP_TYPE_NITRO_A0(bp))
11179 return true;
ae10ae74
MC
11180 if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
11181 return true;
8079e8f1
MC
11182 return false;
11183}
11184
11185/* If runtime conditions support RFS */
2bcfa6f6
MC
11186static bool bnxt_rfs_capable(struct bnxt *bp)
11187{
11188#ifdef CONFIG_RFS_ACCEL
8079e8f1 11189 int vnics, max_vnics, max_rss_ctxs;
2bcfa6f6 11190
41e8d798 11191 if (bp->flags & BNXT_FLAG_CHIP_P5)
ac33906c 11192 return bnxt_rfs_supported(bp);
13ba7943 11193 if (!(bp->flags & BNXT_FLAG_MSIX_CAP) || !bnxt_can_reserve_rings(bp) || !bp->rx_nr_rings)
2bcfa6f6
MC
11194 return false;
11195
11196 vnics = 1 + bp->rx_nr_rings;
8079e8f1
MC
11197 max_vnics = bnxt_get_max_func_vnics(bp);
11198 max_rss_ctxs = bnxt_get_max_func_rss_ctxs(bp);
ae10ae74
MC
11199
11200 /* RSS contexts not a limiting factor */
11201 if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
11202 max_rss_ctxs = max_vnics;
8079e8f1 11203 if (vnics > max_vnics || vnics > max_rss_ctxs) {
6a1eef5b
MC
11204 if (bp->rx_nr_rings > 1)
11205 netdev_warn(bp->dev,
11206 "Not enough resources to support NTUPLE filters, enough resources for up to %d rx rings\n",
11207 min(max_rss_ctxs - 1, max_vnics - 1));
2bcfa6f6 11208 return false;
a2304909 11209 }
2bcfa6f6 11210
f1ca94de 11211 if (!BNXT_NEW_RM(bp))
6a1eef5b
MC
11212 return true;
11213
11214 if (vnics == bp->hw_resc.resv_vnics)
11215 return true;
11216
780baad4 11217 bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, 0, vnics);
6a1eef5b
MC
11218 if (vnics <= bp->hw_resc.resv_vnics)
11219 return true;
11220
11221 netdev_warn(bp->dev, "Unable to reserve resources to support NTUPLE filters.\n");
780baad4 11222 bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, 0, 1);
6a1eef5b 11223 return false;
2bcfa6f6
MC
11224#else
11225 return false;
11226#endif
11227}
11228
c0c050c5
MC
11229static netdev_features_t bnxt_fix_features(struct net_device *dev,
11230 netdev_features_t features)
11231{
2bcfa6f6 11232 struct bnxt *bp = netdev_priv(dev);
c72cb303 11233 netdev_features_t vlan_features;
2bcfa6f6 11234
a2304909 11235 if ((features & NETIF_F_NTUPLE) && !bnxt_rfs_capable(bp))
2bcfa6f6 11236 features &= ~NETIF_F_NTUPLE;
5a9f6b23 11237
366c3047 11238 if ((bp->flags & BNXT_FLAG_NO_AGG_RINGS) || bp->xdp_prog)
1dc4c557
AG
11239 features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
11240
1054aee8
MC
11241 if (!(features & NETIF_F_GRO))
11242 features &= ~NETIF_F_GRO_HW;
11243
11244 if (features & NETIF_F_GRO_HW)
11245 features &= ~NETIF_F_LRO;
11246
5a9f6b23
MC
11247 /* Both CTAG and STAG VLAN accelaration on the RX side have to be
11248 * turned on or off together.
11249 */
a196e96b
EP
11250 vlan_features = features & BNXT_HW_FEATURE_VLAN_ALL_RX;
11251 if (vlan_features != BNXT_HW_FEATURE_VLAN_ALL_RX) {
11252 if (dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)
11253 features &= ~BNXT_HW_FEATURE_VLAN_ALL_RX;
c72cb303 11254 else if (vlan_features)
a196e96b 11255 features |= BNXT_HW_FEATURE_VLAN_ALL_RX;
5a9f6b23 11256 }
cf6645f8 11257#ifdef CONFIG_BNXT_SRIOV
a196e96b
EP
11258 if (BNXT_VF(bp) && bp->vf.vlan)
11259 features &= ~BNXT_HW_FEATURE_VLAN_ALL_RX;
cf6645f8 11260#endif
c0c050c5
MC
11261 return features;
11262}
11263
11264static int bnxt_set_features(struct net_device *dev, netdev_features_t features)
11265{
11266 struct bnxt *bp = netdev_priv(dev);
11267 u32 flags = bp->flags;
11268 u32 changes;
11269 int rc = 0;
11270 bool re_init = false;
11271 bool update_tpa = false;
11272
11273 flags &= ~BNXT_FLAG_ALL_CONFIG_FEATS;
1054aee8 11274 if (features & NETIF_F_GRO_HW)
c0c050c5 11275 flags |= BNXT_FLAG_GRO;
1054aee8 11276 else if (features & NETIF_F_LRO)
c0c050c5
MC
11277 flags |= BNXT_FLAG_LRO;
11278
bdbd1eb5
MC
11279 if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
11280 flags &= ~BNXT_FLAG_TPA;
11281
a196e96b 11282 if (features & BNXT_HW_FEATURE_VLAN_ALL_RX)
c0c050c5
MC
11283 flags |= BNXT_FLAG_STRIP_VLAN;
11284
11285 if (features & NETIF_F_NTUPLE)
11286 flags |= BNXT_FLAG_RFS;
11287
11288 changes = flags ^ bp->flags;
11289 if (changes & BNXT_FLAG_TPA) {
11290 update_tpa = true;
11291 if ((bp->flags & BNXT_FLAG_TPA) == 0 ||
f45b7b78
MC
11292 (flags & BNXT_FLAG_TPA) == 0 ||
11293 (bp->flags & BNXT_FLAG_CHIP_P5))
c0c050c5
MC
11294 re_init = true;
11295 }
11296
11297 if (changes & ~BNXT_FLAG_TPA)
11298 re_init = true;
11299
11300 if (flags != bp->flags) {
11301 u32 old_flags = bp->flags;
11302
2bcfa6f6 11303 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
f45b7b78 11304 bp->flags = flags;
c0c050c5
MC
11305 if (update_tpa)
11306 bnxt_set_ring_params(bp);
11307 return rc;
11308 }
11309
11310 if (re_init) {
11311 bnxt_close_nic(bp, false, false);
f45b7b78 11312 bp->flags = flags;
c0c050c5
MC
11313 if (update_tpa)
11314 bnxt_set_ring_params(bp);
11315
11316 return bnxt_open_nic(bp, false, false);
11317 }
11318 if (update_tpa) {
f45b7b78 11319 bp->flags = flags;
c0c050c5
MC
11320 rc = bnxt_set_tpa(bp,
11321 (flags & BNXT_FLAG_TPA) ?
11322 true : false);
11323 if (rc)
11324 bp->flags = old_flags;
11325 }
11326 }
11327 return rc;
11328}
11329
aa473d6c
MC
11330static bool bnxt_exthdr_check(struct bnxt *bp, struct sk_buff *skb, int nw_off,
11331 u8 **nextp)
11332{
11333 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + nw_off);
b6488b16 11334 struct hop_jumbo_hdr *jhdr;
aa473d6c
MC
11335 int hdr_count = 0;
11336 u8 *nexthdr;
11337 int start;
11338
11339 /* Check that there are at most 2 IPv6 extension headers, no
11340 * fragment header, and each is <= 64 bytes.
11341 */
11342 start = nw_off + sizeof(*ip6h);
11343 nexthdr = &ip6h->nexthdr;
11344 while (ipv6_ext_hdr(*nexthdr)) {
11345 struct ipv6_opt_hdr *hp;
11346 int hdrlen;
11347
11348 if (hdr_count >= 3 || *nexthdr == NEXTHDR_NONE ||
11349 *nexthdr == NEXTHDR_FRAGMENT)
11350 return false;
11351 hp = __skb_header_pointer(NULL, start, sizeof(*hp), skb->data,
11352 skb_headlen(skb), NULL);
11353 if (!hp)
11354 return false;
11355 if (*nexthdr == NEXTHDR_AUTH)
11356 hdrlen = ipv6_authlen(hp);
11357 else
11358 hdrlen = ipv6_optlen(hp);
11359
11360 if (hdrlen > 64)
11361 return false;
b6488b16
CL
11362
11363 /* The ext header may be a hop-by-hop header inserted for
11364 * big TCP purposes. This will be removed before sending
11365 * from NIC, so do not count it.
11366 */
11367 if (*nexthdr == NEXTHDR_HOP) {
11368 if (likely(skb->len <= GRO_LEGACY_MAX_SIZE))
11369 goto increment_hdr;
11370
11371 jhdr = (struct hop_jumbo_hdr *)hp;
11372 if (jhdr->tlv_type != IPV6_TLV_JUMBO || jhdr->hdrlen != 0 ||
11373 jhdr->nexthdr != IPPROTO_TCP)
11374 goto increment_hdr;
11375
11376 goto next_hdr;
11377 }
11378increment_hdr:
11379 hdr_count++;
11380next_hdr:
aa473d6c
MC
11381 nexthdr = &hp->nexthdr;
11382 start += hdrlen;
aa473d6c
MC
11383 }
11384 if (nextp) {
11385 /* Caller will check inner protocol */
11386 if (skb->encapsulation) {
11387 *nextp = nexthdr;
11388 return true;
11389 }
11390 *nextp = NULL;
11391 }
11392 /* Only support TCP/UDP for non-tunneled ipv6 and inner ipv6 */
11393 return *nexthdr == IPPROTO_TCP || *nexthdr == IPPROTO_UDP;
11394}
11395
11396/* For UDP, we can only handle 1 Vxlan port and 1 Geneve port. */
11397static bool bnxt_udp_tunl_check(struct bnxt *bp, struct sk_buff *skb)
11398{
11399 struct udphdr *uh = udp_hdr(skb);
11400 __be16 udp_port = uh->dest;
11401
11402 if (udp_port != bp->vxlan_port && udp_port != bp->nge_port)
11403 return false;
11404 if (skb->inner_protocol_type == ENCAP_TYPE_ETHER) {
11405 struct ethhdr *eh = inner_eth_hdr(skb);
11406
11407 switch (eh->h_proto) {
11408 case htons(ETH_P_IP):
11409 return true;
11410 case htons(ETH_P_IPV6):
11411 return bnxt_exthdr_check(bp, skb,
11412 skb_inner_network_offset(skb),
11413 NULL);
11414 }
11415 }
11416 return false;
11417}
11418
11419static bool bnxt_tunl_check(struct bnxt *bp, struct sk_buff *skb, u8 l4_proto)
11420{
11421 switch (l4_proto) {
11422 case IPPROTO_UDP:
11423 return bnxt_udp_tunl_check(bp, skb);
11424 case IPPROTO_IPIP:
11425 return true;
11426 case IPPROTO_GRE: {
11427 switch (skb->inner_protocol) {
11428 default:
11429 return false;
11430 case htons(ETH_P_IP):
11431 return true;
11432 case htons(ETH_P_IPV6):
11433 fallthrough;
11434 }
11435 }
11436 case IPPROTO_IPV6:
11437 /* Check ext headers of inner ipv6 */
11438 return bnxt_exthdr_check(bp, skb, skb_inner_network_offset(skb),
11439 NULL);
11440 }
11441 return false;
11442}
11443
1698d600
MC
11444static netdev_features_t bnxt_features_check(struct sk_buff *skb,
11445 struct net_device *dev,
11446 netdev_features_t features)
11447{
aa473d6c
MC
11448 struct bnxt *bp = netdev_priv(dev);
11449 u8 *l4_proto;
1698d600
MC
11450
11451 features = vlan_features_check(skb, features);
1698d600
MC
11452 switch (vlan_get_protocol(skb)) {
11453 case htons(ETH_P_IP):
aa473d6c
MC
11454 if (!skb->encapsulation)
11455 return features;
11456 l4_proto = &ip_hdr(skb)->protocol;
11457 if (bnxt_tunl_check(bp, skb, *l4_proto))
11458 return features;
1698d600
MC
11459 break;
11460 case htons(ETH_P_IPV6):
aa473d6c
MC
11461 if (!bnxt_exthdr_check(bp, skb, skb_network_offset(skb),
11462 &l4_proto))
11463 break;
11464 if (!l4_proto || bnxt_tunl_check(bp, skb, *l4_proto))
11465 return features;
1698d600 11466 break;
1698d600 11467 }
1698d600
MC
11468 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
11469}
11470
b5d600b0
VV
11471int bnxt_dbg_hwrm_rd_reg(struct bnxt *bp, u32 reg_off, u16 num_words,
11472 u32 *reg_buf)
11473{
bbf33d1d
EP
11474 struct hwrm_dbg_read_direct_output *resp;
11475 struct hwrm_dbg_read_direct_input *req;
b5d600b0
VV
11476 __le32 *dbg_reg_buf;
11477 dma_addr_t mapping;
11478 int rc, i;
11479
bbf33d1d
EP
11480 rc = hwrm_req_init(bp, req, HWRM_DBG_READ_DIRECT);
11481 if (rc)
11482 return rc;
11483
11484 dbg_reg_buf = hwrm_req_dma_slice(bp, req, num_words * 4,
11485 &mapping);
11486 if (!dbg_reg_buf) {
11487 rc = -ENOMEM;
11488 goto dbg_rd_reg_exit;
11489 }
11490
11491 req->host_dest_addr = cpu_to_le64(mapping);
11492
11493 resp = hwrm_req_hold(bp, req);
11494 req->read_addr = cpu_to_le32(reg_off + CHIMP_REG_VIEW_ADDR);
11495 req->read_len32 = cpu_to_le32(num_words);
11496
11497 rc = hwrm_req_send(bp, req);
b5d600b0
VV
11498 if (rc || resp->error_code) {
11499 rc = -EIO;
11500 goto dbg_rd_reg_exit;
11501 }
11502 for (i = 0; i < num_words; i++)
11503 reg_buf[i] = le32_to_cpu(dbg_reg_buf[i]);
11504
11505dbg_rd_reg_exit:
bbf33d1d 11506 hwrm_req_drop(bp, req);
b5d600b0
VV
11507 return rc;
11508}
11509
ffd77621
MC
11510static int bnxt_dbg_hwrm_ring_info_get(struct bnxt *bp, u8 ring_type,
11511 u32 ring_id, u32 *prod, u32 *cons)
11512{
bbf33d1d
EP
11513 struct hwrm_dbg_ring_info_get_output *resp;
11514 struct hwrm_dbg_ring_info_get_input *req;
ffd77621
MC
11515 int rc;
11516
bbf33d1d
EP
11517 rc = hwrm_req_init(bp, req, HWRM_DBG_RING_INFO_GET);
11518 if (rc)
11519 return rc;
11520
11521 req->ring_type = ring_type;
11522 req->fw_ring_id = cpu_to_le32(ring_id);
11523 resp = hwrm_req_hold(bp, req);
11524 rc = hwrm_req_send(bp, req);
ffd77621
MC
11525 if (!rc) {
11526 *prod = le32_to_cpu(resp->producer_index);
11527 *cons = le32_to_cpu(resp->consumer_index);
11528 }
bbf33d1d 11529 hwrm_req_drop(bp, req);
ffd77621
MC
11530 return rc;
11531}
11532
9f554590
MC
11533static void bnxt_dump_tx_sw_state(struct bnxt_napi *bnapi)
11534{
b6ab4b01 11535 struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
9f554590
MC
11536 int i = bnapi->index;
11537
3b2b7d9d
MC
11538 if (!txr)
11539 return;
11540
9f554590
MC
11541 netdev_info(bnapi->bp->dev, "[%d]: tx{fw_ring: %d prod: %x cons: %x}\n",
11542 i, txr->tx_ring_struct.fw_ring_id, txr->tx_prod,
11543 txr->tx_cons);
11544}
11545
11546static void bnxt_dump_rx_sw_state(struct bnxt_napi *bnapi)
11547{
b6ab4b01 11548 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
9f554590
MC
11549 int i = bnapi->index;
11550
3b2b7d9d
MC
11551 if (!rxr)
11552 return;
11553
9f554590
MC
11554 netdev_info(bnapi->bp->dev, "[%d]: rx{fw_ring: %d prod: %x} rx_agg{fw_ring: %d agg_prod: %x sw_agg_prod: %x}\n",
11555 i, rxr->rx_ring_struct.fw_ring_id, rxr->rx_prod,
11556 rxr->rx_agg_ring_struct.fw_ring_id, rxr->rx_agg_prod,
11557 rxr->rx_sw_agg_prod);
11558}
11559
11560static void bnxt_dump_cp_sw_state(struct bnxt_napi *bnapi)
11561{
11562 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
11563 int i = bnapi->index;
11564
11565 netdev_info(bnapi->bp->dev, "[%d]: cp{fw_ring: %d raw_cons: %x}\n",
11566 i, cpr->cp_ring_struct.fw_ring_id, cpr->cp_raw_cons);
11567}
11568
c0c050c5
MC
11569static void bnxt_dbg_dump_states(struct bnxt *bp)
11570{
11571 int i;
11572 struct bnxt_napi *bnapi;
c0c050c5
MC
11573
11574 for (i = 0; i < bp->cp_nr_rings; i++) {
11575 bnapi = bp->bnapi[i];
c0c050c5 11576 if (netif_msg_drv(bp)) {
9f554590
MC
11577 bnxt_dump_tx_sw_state(bnapi);
11578 bnxt_dump_rx_sw_state(bnapi);
11579 bnxt_dump_cp_sw_state(bnapi);
c0c050c5
MC
11580 }
11581 }
11582}
11583
8fbf58e1
MC
11584static int bnxt_hwrm_rx_ring_reset(struct bnxt *bp, int ring_nr)
11585{
11586 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
bbf33d1d 11587 struct hwrm_ring_reset_input *req;
8fbf58e1
MC
11588 struct bnxt_napi *bnapi = rxr->bnapi;
11589 struct bnxt_cp_ring_info *cpr;
11590 u16 cp_ring_id;
bbf33d1d
EP
11591 int rc;
11592
11593 rc = hwrm_req_init(bp, req, HWRM_RING_RESET);
11594 if (rc)
11595 return rc;
8fbf58e1
MC
11596
11597 cpr = &bnapi->cp_ring;
11598 cp_ring_id = cpr->cp_ring_struct.fw_ring_id;
bbf33d1d
EP
11599 req->cmpl_ring = cpu_to_le16(cp_ring_id);
11600 req->ring_type = RING_RESET_REQ_RING_TYPE_RX_RING_GRP;
11601 req->ring_id = cpu_to_le16(bp->grp_info[bnapi->index].fw_grp_id);
11602 return hwrm_req_send_silent(bp, req);
8fbf58e1
MC
11603}
11604
6988bd92 11605static void bnxt_reset_task(struct bnxt *bp, bool silent)
c0c050c5 11606{
6988bd92
MC
11607 if (!silent)
11608 bnxt_dbg_dump_states(bp);
028de140 11609 if (netif_running(bp->dev)) {
b386cd36
MC
11610 int rc;
11611
aa46dfff
VV
11612 if (silent) {
11613 bnxt_close_nic(bp, false, false);
11614 bnxt_open_nic(bp, false, false);
11615 } else {
b386cd36 11616 bnxt_ulp_stop(bp);
aa46dfff
VV
11617 bnxt_close_nic(bp, true, false);
11618 rc = bnxt_open_nic(bp, true, false);
11619 bnxt_ulp_start(bp, rc);
11620 }
028de140 11621 }
c0c050c5
MC
11622}
11623
0290bd29 11624static void bnxt_tx_timeout(struct net_device *dev, unsigned int txqueue)
c0c050c5
MC
11625{
11626 struct bnxt *bp = netdev_priv(dev);
11627
11628 netdev_err(bp->dev, "TX timeout detected, starting reset task!\n");
9b1a00fd 11629 bnxt_queue_sp_work(bp, BNXT_RESET_TASK_SP_EVENT);
c0c050c5
MC
11630}
11631
acfb50e4
VV
11632static void bnxt_fw_health_check(struct bnxt *bp)
11633{
11634 struct bnxt_fw_health *fw_health = bp->fw_health;
83474a9b 11635 struct pci_dev *pdev = bp->pdev;
acfb50e4
VV
11636 u32 val;
11637
0797c10d 11638 if (!fw_health->enabled || test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
acfb50e4
VV
11639 return;
11640
1b2b9183
MC
11641 /* Make sure it is enabled before checking the tmr_counter. */
11642 smp_rmb();
acfb50e4
VV
11643 if (fw_health->tmr_counter) {
11644 fw_health->tmr_counter--;
11645 return;
11646 }
11647
11648 val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
83474a9b 11649 if (val == fw_health->last_fw_heartbeat && pci_device_is_present(pdev)) {
8cc95ceb 11650 fw_health->arrests++;
acfb50e4 11651 goto fw_reset;
8cc95ceb 11652 }
acfb50e4
VV
11653
11654 fw_health->last_fw_heartbeat = val;
11655
11656 val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
83474a9b 11657 if (val != fw_health->last_fw_reset_cnt && pci_device_is_present(pdev)) {
8cc95ceb 11658 fw_health->discoveries++;
acfb50e4 11659 goto fw_reset;
8cc95ceb 11660 }
acfb50e4
VV
11661
11662 fw_health->tmr_counter = fw_health->tmr_multiplier;
11663 return;
11664
11665fw_reset:
9b1a00fd 11666 bnxt_queue_sp_work(bp, BNXT_FW_EXCEPTION_SP_EVENT);
acfb50e4
VV
11667}
11668
e99e88a9 11669static void bnxt_timer(struct timer_list *t)
c0c050c5 11670{
e99e88a9 11671 struct bnxt *bp = from_timer(bp, t, timer);
c0c050c5
MC
11672 struct net_device *dev = bp->dev;
11673
e0009404 11674 if (!netif_running(dev) || !test_bit(BNXT_STATE_OPEN, &bp->state))
c0c050c5
MC
11675 return;
11676
11677 if (atomic_read(&bp->intr_sem) != 0)
11678 goto bnxt_restart_timer;
11679
acfb50e4
VV
11680 if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
11681 bnxt_fw_health_check(bp);
11682
9b1a00fd
JK
11683 if (BNXT_LINK_IS_UP(bp) && bp->stats_coal_ticks)
11684 bnxt_queue_sp_work(bp, BNXT_PERIODIC_STATS_SP_EVENT);
5a84acbe 11685
9b1a00fd
JK
11686 if (bnxt_tc_flower_enabled(bp))
11687 bnxt_queue_sp_work(bp, BNXT_FLOW_STATS_SP_EVENT);
a1ef4a79 11688
87d67f59 11689#ifdef CONFIG_RFS_ACCEL
9b1a00fd
JK
11690 if ((bp->flags & BNXT_FLAG_RFS) && bp->ntp_fltr_count)
11691 bnxt_queue_sp_work(bp, BNXT_RX_NTP_FLTR_SP_EVENT);
87d67f59
PC
11692#endif /*CONFIG_RFS_ACCEL*/
11693
a1ef4a79
MC
11694 if (bp->link_info.phy_retry) {
11695 if (time_after(jiffies, bp->link_info.phy_retry_expires)) {
acda6180 11696 bp->link_info.phy_retry = false;
a1ef4a79
MC
11697 netdev_warn(bp->dev, "failed to update phy settings after maximum retries.\n");
11698 } else {
9b1a00fd 11699 bnxt_queue_sp_work(bp, BNXT_UPDATE_PHY_SP_EVENT);
a1ef4a79
MC
11700 }
11701 }
ffd77621 11702
9b1a00fd
JK
11703 if (test_bit(BNXT_STATE_L2_FILTER_RETRY, &bp->state))
11704 bnxt_queue_sp_work(bp, BNXT_RX_MASK_SP_EVENT);
662c9b22 11705
5313845f 11706 if ((bp->flags & BNXT_FLAG_CHIP_P5) && !bp->chip_rev &&
9b1a00fd
JK
11707 netif_carrier_ok(dev))
11708 bnxt_queue_sp_work(bp, BNXT_RING_COAL_NOW_SP_EVENT);
11709
c0c050c5
MC
11710bnxt_restart_timer:
11711 mod_timer(&bp->timer, jiffies + bp->current_interval);
11712}
11713
a551ee94 11714static void bnxt_rtnl_lock_sp(struct bnxt *bp)
6988bd92 11715{
a551ee94
MC
11716 /* We are called from bnxt_sp_task which has BNXT_STATE_IN_SP_TASK
11717 * set. If the device is being closed, bnxt_close() may be holding
6988bd92
MC
11718 * rtnl() and waiting for BNXT_STATE_IN_SP_TASK to clear. So we
11719 * must clear BNXT_STATE_IN_SP_TASK before holding rtnl().
11720 */
11721 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
11722 rtnl_lock();
a551ee94
MC
11723}
11724
11725static void bnxt_rtnl_unlock_sp(struct bnxt *bp)
11726{
6988bd92
MC
11727 set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
11728 rtnl_unlock();
11729}
11730
a551ee94
MC
11731/* Only called from bnxt_sp_task() */
11732static void bnxt_reset(struct bnxt *bp, bool silent)
11733{
11734 bnxt_rtnl_lock_sp(bp);
11735 if (test_bit(BNXT_STATE_OPEN, &bp->state))
11736 bnxt_reset_task(bp, silent);
11737 bnxt_rtnl_unlock_sp(bp);
11738}
11739
8fbf58e1
MC
11740/* Only called from bnxt_sp_task() */
11741static void bnxt_rx_ring_reset(struct bnxt *bp)
11742{
11743 int i;
11744
11745 bnxt_rtnl_lock_sp(bp);
11746 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
11747 bnxt_rtnl_unlock_sp(bp);
11748 return;
11749 }
11750 /* Disable and flush TPA before resetting the RX ring */
11751 if (bp->flags & BNXT_FLAG_TPA)
11752 bnxt_set_tpa(bp, false);
11753 for (i = 0; i < bp->rx_nr_rings; i++) {
11754 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
11755 struct bnxt_cp_ring_info *cpr;
11756 int rc;
11757
11758 if (!rxr->bnapi->in_reset)
11759 continue;
11760
11761 rc = bnxt_hwrm_rx_ring_reset(bp, i);
11762 if (rc) {
11763 if (rc == -EINVAL || rc == -EOPNOTSUPP)
11764 netdev_info_once(bp->dev, "RX ring reset not supported by firmware, falling back to global reset\n");
11765 else
11766 netdev_warn(bp->dev, "RX ring reset failed, rc = %d, falling back to global reset\n",
11767 rc);
8fb35cd3 11768 bnxt_reset_task(bp, true);
8fbf58e1
MC
11769 break;
11770 }
11771 bnxt_free_one_rx_ring_skbs(bp, i);
11772 rxr->rx_prod = 0;
11773 rxr->rx_agg_prod = 0;
11774 rxr->rx_sw_agg_prod = 0;
11775 rxr->rx_next_cons = 0;
11776 rxr->bnapi->in_reset = false;
11777 bnxt_alloc_one_rx_ring(bp, i);
11778 cpr = &rxr->bnapi->cp_ring;
8a27d4b9 11779 cpr->sw_stats.rx.rx_resets++;
8fbf58e1
MC
11780 if (bp->flags & BNXT_FLAG_AGG_RINGS)
11781 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
11782 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
11783 }
11784 if (bp->flags & BNXT_FLAG_TPA)
11785 bnxt_set_tpa(bp, true);
11786 bnxt_rtnl_unlock_sp(bp);
11787}
11788
230d1f0d
MC
11789static void bnxt_fw_reset_close(struct bnxt *bp)
11790{
f3a6d206 11791 bnxt_ulp_stop(bp);
4f036b2e
MC
11792 /* When firmware is in fatal state, quiesce device and disable
11793 * bus master to prevent any potential bad DMAs before freeing
11794 * kernel memory.
d4073028 11795 */
4f036b2e 11796 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) {
dab62e7c
MC
11797 u16 val = 0;
11798
11799 pci_read_config_word(bp->pdev, PCI_SUBSYSTEM_ID, &val);
11800 if (val == 0xffff)
11801 bp->fw_reset_min_dsecs = 0;
4f036b2e
MC
11802 bnxt_tx_disable(bp);
11803 bnxt_disable_napi(bp);
11804 bnxt_disable_int_sync(bp);
11805 bnxt_free_irq(bp);
11806 bnxt_clear_int_mode(bp);
d4073028 11807 pci_disable_device(bp->pdev);
4f036b2e 11808 }
230d1f0d 11809 __bnxt_close_nic(bp, true, false);
ac797ced 11810 bnxt_vf_reps_free(bp);
230d1f0d
MC
11811 bnxt_clear_int_mode(bp);
11812 bnxt_hwrm_func_drv_unrgtr(bp);
d4073028
VV
11813 if (pci_is_enabled(bp->pdev))
11814 pci_disable_device(bp->pdev);
230d1f0d
MC
11815 bnxt_free_ctx_mem(bp);
11816 kfree(bp->ctx);
11817 bp->ctx = NULL;
11818}
11819
acfb50e4
VV
11820static bool is_bnxt_fw_ok(struct bnxt *bp)
11821{
11822 struct bnxt_fw_health *fw_health = bp->fw_health;
11823 bool no_heartbeat = false, has_reset = false;
11824 u32 val;
11825
11826 val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
11827 if (val == fw_health->last_fw_heartbeat)
11828 no_heartbeat = true;
11829
11830 val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
11831 if (val != fw_health->last_fw_reset_cnt)
11832 has_reset = true;
11833
11834 if (!no_heartbeat && has_reset)
11835 return true;
11836
11837 return false;
11838}
11839
d1db9e16
MC
11840/* rtnl_lock is acquired before calling this function */
11841static void bnxt_force_fw_reset(struct bnxt *bp)
11842{
11843 struct bnxt_fw_health *fw_health = bp->fw_health;
30e96f48 11844 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
d1db9e16
MC
11845 u32 wait_dsecs;
11846
11847 if (!test_bit(BNXT_STATE_OPEN, &bp->state) ||
11848 test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
11849 return;
11850
30e96f48
MC
11851 if (ptp) {
11852 spin_lock_bh(&ptp->ptp_lock);
11853 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
11854 spin_unlock_bh(&ptp->ptp_lock);
11855 } else {
11856 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
11857 }
d1db9e16
MC
11858 bnxt_fw_reset_close(bp);
11859 wait_dsecs = fw_health->master_func_wait_dsecs;
1596847d 11860 if (fw_health->primary) {
d1db9e16
MC
11861 if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU)
11862 wait_dsecs = 0;
11863 bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW;
11864 } else {
11865 bp->fw_reset_timestamp = jiffies + wait_dsecs * HZ / 10;
11866 wait_dsecs = fw_health->normal_func_wait_dsecs;
11867 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
11868 }
4037eb71
VV
11869
11870 bp->fw_reset_min_dsecs = fw_health->post_reset_wait_dsecs;
d1db9e16
MC
11871 bp->fw_reset_max_dsecs = fw_health->post_reset_max_wait_dsecs;
11872 bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10);
11873}
11874
11875void bnxt_fw_exception(struct bnxt *bp)
11876{
a2b31e27 11877 netdev_warn(bp->dev, "Detected firmware fatal condition, initiating reset\n");
d1db9e16
MC
11878 set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
11879 bnxt_rtnl_lock_sp(bp);
11880 bnxt_force_fw_reset(bp);
11881 bnxt_rtnl_unlock_sp(bp);
11882}
11883
e72cb7d6
MC
11884/* Returns the number of registered VFs, or 1 if VF configuration is pending, or
11885 * < 0 on error.
11886 */
11887static int bnxt_get_registered_vfs(struct bnxt *bp)
230d1f0d 11888{
e72cb7d6 11889#ifdef CONFIG_BNXT_SRIOV
230d1f0d
MC
11890 int rc;
11891
e72cb7d6
MC
11892 if (!BNXT_PF(bp))
11893 return 0;
11894
11895 rc = bnxt_hwrm_func_qcfg(bp);
11896 if (rc) {
11897 netdev_err(bp->dev, "func_qcfg cmd failed, rc = %d\n", rc);
11898 return rc;
11899 }
11900 if (bp->pf.registered_vfs)
11901 return bp->pf.registered_vfs;
11902 if (bp->sriov_cfg)
11903 return 1;
11904#endif
11905 return 0;
11906}
11907
11908void bnxt_fw_reset(struct bnxt *bp)
11909{
230d1f0d
MC
11910 bnxt_rtnl_lock_sp(bp);
11911 if (test_bit(BNXT_STATE_OPEN, &bp->state) &&
11912 !test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
30e96f48 11913 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
4037eb71 11914 int n = 0, tmo;
e72cb7d6 11915
30e96f48
MC
11916 if (ptp) {
11917 spin_lock_bh(&ptp->ptp_lock);
11918 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
11919 spin_unlock_bh(&ptp->ptp_lock);
11920 } else {
11921 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
11922 }
e72cb7d6
MC
11923 if (bp->pf.active_vfs &&
11924 !test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
11925 n = bnxt_get_registered_vfs(bp);
11926 if (n < 0) {
11927 netdev_err(bp->dev, "Firmware reset aborted, rc = %d\n",
11928 n);
11929 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
11930 dev_close(bp->dev);
11931 goto fw_reset_exit;
11932 } else if (n > 0) {
11933 u16 vf_tmo_dsecs = n * 10;
11934
11935 if (bp->fw_reset_max_dsecs < vf_tmo_dsecs)
11936 bp->fw_reset_max_dsecs = vf_tmo_dsecs;
11937 bp->fw_reset_state =
11938 BNXT_FW_RESET_STATE_POLL_VF;
11939 bnxt_queue_fw_reset_work(bp, HZ / 10);
11940 goto fw_reset_exit;
230d1f0d
MC
11941 }
11942 bnxt_fw_reset_close(bp);
4037eb71
VV
11943 if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
11944 bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW_DOWN;
11945 tmo = HZ / 10;
11946 } else {
11947 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
11948 tmo = bp->fw_reset_min_dsecs * HZ / 10;
11949 }
11950 bnxt_queue_fw_reset_work(bp, tmo);
230d1f0d
MC
11951 }
11952fw_reset_exit:
11953 bnxt_rtnl_unlock_sp(bp);
11954}
11955
ffd77621
MC
11956static void bnxt_chk_missed_irq(struct bnxt *bp)
11957{
11958 int i;
11959
11960 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
11961 return;
11962
11963 for (i = 0; i < bp->cp_nr_rings; i++) {
11964 struct bnxt_napi *bnapi = bp->bnapi[i];
11965 struct bnxt_cp_ring_info *cpr;
11966 u32 fw_ring_id;
11967 int j;
11968
11969 if (!bnapi)
11970 continue;
11971
11972 cpr = &bnapi->cp_ring;
11973 for (j = 0; j < 2; j++) {
11974 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
11975 u32 val[2];
11976
11977 if (!cpr2 || cpr2->has_more_work ||
11978 !bnxt_has_work(bp, cpr2))
11979 continue;
11980
11981 if (cpr2->cp_raw_cons != cpr2->last_cp_raw_cons) {
11982 cpr2->last_cp_raw_cons = cpr2->cp_raw_cons;
11983 continue;
11984 }
11985 fw_ring_id = cpr2->cp_ring_struct.fw_ring_id;
11986 bnxt_dbg_hwrm_ring_info_get(bp,
11987 DBG_RING_INFO_GET_REQ_RING_TYPE_L2_CMPL,
11988 fw_ring_id, &val[0], &val[1]);
9d8b5f05 11989 cpr->sw_stats.cmn.missed_irqs++;
ffd77621
MC
11990 }
11991 }
11992}
11993
c0c050c5
MC
11994static void bnxt_cfg_ntp_filters(struct bnxt *);
11995
8119e49b
MC
11996static void bnxt_init_ethtool_link_settings(struct bnxt *bp)
11997{
11998 struct bnxt_link_info *link_info = &bp->link_info;
11999
12000 if (BNXT_AUTO_MODE(link_info->auto_mode)) {
12001 link_info->autoneg = BNXT_AUTONEG_SPEED;
12002 if (bp->hwrm_spec_code >= 0x10201) {
12003 if (link_info->auto_pause_setting &
12004 PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE)
12005 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
12006 } else {
12007 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
12008 }
12009 link_info->advertising = link_info->auto_link_speeds;
d058426e 12010 link_info->advertising_pam4 = link_info->auto_pam4_link_speeds;
8119e49b
MC
12011 } else {
12012 link_info->req_link_speed = link_info->force_link_speed;
d058426e
EP
12013 link_info->req_signal_mode = BNXT_SIG_MODE_NRZ;
12014 if (link_info->force_pam4_link_speed) {
12015 link_info->req_link_speed =
12016 link_info->force_pam4_link_speed;
12017 link_info->req_signal_mode = BNXT_SIG_MODE_PAM4;
12018 }
8119e49b
MC
12019 link_info->req_duplex = link_info->duplex_setting;
12020 }
12021 if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
12022 link_info->req_flow_ctrl =
12023 link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH;
12024 else
12025 link_info->req_flow_ctrl = link_info->force_pause_setting;
12026}
12027
df97b34d
MC
12028static void bnxt_fw_echo_reply(struct bnxt *bp)
12029{
12030 struct bnxt_fw_health *fw_health = bp->fw_health;
bbf33d1d
EP
12031 struct hwrm_func_echo_response_input *req;
12032 int rc;
df97b34d 12033
bbf33d1d
EP
12034 rc = hwrm_req_init(bp, req, HWRM_FUNC_ECHO_RESPONSE);
12035 if (rc)
12036 return;
12037 req->event_data1 = cpu_to_le32(fw_health->echo_req_data1);
12038 req->event_data2 = cpu_to_le32(fw_health->echo_req_data2);
12039 hwrm_req_send(bp, req);
df97b34d
MC
12040}
12041
c0c050c5
MC
12042static void bnxt_sp_task(struct work_struct *work)
12043{
12044 struct bnxt *bp = container_of(work, struct bnxt, sp_task);
c0c050c5 12045
4cebdcec
MC
12046 set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
12047 smp_mb__after_atomic();
12048 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
12049 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
c0c050c5 12050 return;
4cebdcec 12051 }
c0c050c5
MC
12052
12053 if (test_and_clear_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event))
12054 bnxt_cfg_rx_mode(bp);
12055
12056 if (test_and_clear_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event))
12057 bnxt_cfg_ntp_filters(bp);
c0c050c5
MC
12058 if (test_and_clear_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event))
12059 bnxt_hwrm_exec_fwd_req(bp);
00db3cba 12060 if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event)) {
531d1d26
MC
12061 bnxt_hwrm_port_qstats(bp, 0);
12062 bnxt_hwrm_port_qstats_ext(bp, 0);
fea6b333 12063 bnxt_accumulate_all_stats(bp);
00db3cba 12064 }
3bdf56c4 12065
0eaa24b9 12066 if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) {
e2dc9b6e 12067 int rc;
0eaa24b9 12068
e2dc9b6e 12069 mutex_lock(&bp->link_lock);
0eaa24b9
MC
12070 if (test_and_clear_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT,
12071 &bp->sp_event))
12072 bnxt_hwrm_phy_qcaps(bp);
12073
e2dc9b6e 12074 rc = bnxt_update_link(bp, true);
0eaa24b9
MC
12075 if (rc)
12076 netdev_err(bp->dev, "SP task can't update link (rc: %x)\n",
12077 rc);
ca0c7538
VV
12078
12079 if (test_and_clear_bit(BNXT_LINK_CFG_CHANGE_SP_EVENT,
12080 &bp->sp_event))
12081 bnxt_init_ethtool_link_settings(bp);
12082 mutex_unlock(&bp->link_lock);
0eaa24b9 12083 }
a1ef4a79
MC
12084 if (test_and_clear_bit(BNXT_UPDATE_PHY_SP_EVENT, &bp->sp_event)) {
12085 int rc;
12086
12087 mutex_lock(&bp->link_lock);
12088 rc = bnxt_update_phy_setting(bp);
12089 mutex_unlock(&bp->link_lock);
12090 if (rc) {
12091 netdev_warn(bp->dev, "update phy settings retry failed\n");
12092 } else {
12093 bp->link_info.phy_retry = false;
12094 netdev_info(bp->dev, "update phy settings retry succeeded\n");
12095 }
12096 }
90c694bb 12097 if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event)) {
e2dc9b6e
MC
12098 mutex_lock(&bp->link_lock);
12099 bnxt_get_port_module_status(bp);
12100 mutex_unlock(&bp->link_lock);
90c694bb 12101 }
5a84acbe
SP
12102
12103 if (test_and_clear_bit(BNXT_FLOW_STATS_SP_EVENT, &bp->sp_event))
12104 bnxt_tc_flow_stats_work(bp);
12105
ffd77621
MC
12106 if (test_and_clear_bit(BNXT_RING_COAL_NOW_SP_EVENT, &bp->sp_event))
12107 bnxt_chk_missed_irq(bp);
12108
df97b34d
MC
12109 if (test_and_clear_bit(BNXT_FW_ECHO_REQUEST_SP_EVENT, &bp->sp_event))
12110 bnxt_fw_echo_reply(bp);
12111
e2dc9b6e
MC
12112 /* These functions below will clear BNXT_STATE_IN_SP_TASK. They
12113 * must be the last functions to be called before exiting.
12114 */
6988bd92
MC
12115 if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event))
12116 bnxt_reset(bp, false);
4cebdcec 12117
fc0f1929
MC
12118 if (test_and_clear_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event))
12119 bnxt_reset(bp, true);
12120
8fbf58e1
MC
12121 if (test_and_clear_bit(BNXT_RST_RING_SP_EVENT, &bp->sp_event))
12122 bnxt_rx_ring_reset(bp);
12123
aadb0b1a
EP
12124 if (test_and_clear_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event)) {
12125 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state) ||
12126 test_bit(BNXT_STATE_FW_NON_FATAL_COND, &bp->state))
12127 bnxt_devlink_health_fw_report(bp);
12128 else
12129 bnxt_fw_reset(bp);
12130 }
657a33c8 12131
acfb50e4
VV
12132 if (test_and_clear_bit(BNXT_FW_EXCEPTION_SP_EVENT, &bp->sp_event)) {
12133 if (!is_bnxt_fw_ok(bp))
aadb0b1a 12134 bnxt_devlink_health_fw_report(bp);
acfb50e4
VV
12135 }
12136
4cebdcec
MC
12137 smp_mb__before_atomic();
12138 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
c0c050c5
MC
12139}
12140
d1e7925e 12141/* Under rtnl_lock */
98fdbe73
MC
12142int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
12143 int tx_xdp)
d1e7925e
MC
12144{
12145 int max_rx, max_tx, tx_sets = 1;
780baad4 12146 int tx_rings_needed, stats;
8f23d638 12147 int rx_rings = rx;
6fc2ffdf 12148 int cp, vnics, rc;
d1e7925e 12149
d1e7925e
MC
12150 if (tcs)
12151 tx_sets = tcs;
12152
12153 rc = bnxt_get_max_rings(bp, &max_rx, &max_tx, sh);
12154 if (rc)
12155 return rc;
12156
12157 if (max_rx < rx)
12158 return -ENOMEM;
12159
5f449249 12160 tx_rings_needed = tx * tx_sets + tx_xdp;
d1e7925e
MC
12161 if (max_tx < tx_rings_needed)
12162 return -ENOMEM;
12163
6fc2ffdf 12164 vnics = 1;
9b3d15e6 12165 if ((bp->flags & (BNXT_FLAG_RFS | BNXT_FLAG_CHIP_P5)) == BNXT_FLAG_RFS)
6fc2ffdf
EW
12166 vnics += rx_rings;
12167
8f23d638
MC
12168 if (bp->flags & BNXT_FLAG_AGG_RINGS)
12169 rx_rings <<= 1;
12170 cp = sh ? max_t(int, tx_rings_needed, rx) : tx_rings_needed + rx;
780baad4
VV
12171 stats = cp;
12172 if (BNXT_NEW_RM(bp)) {
11c3ec7b 12173 cp += bnxt_get_ulp_msix_num(bp);
780baad4
VV
12174 stats += bnxt_get_ulp_stat_ctxs(bp);
12175 }
6fc2ffdf 12176 return bnxt_hwrm_check_rings(bp, tx_rings_needed, rx_rings, rx, cp,
780baad4 12177 stats, vnics);
d1e7925e
MC
12178}
12179
17086399
SP
12180static void bnxt_unmap_bars(struct bnxt *bp, struct pci_dev *pdev)
12181{
12182 if (bp->bar2) {
12183 pci_iounmap(pdev, bp->bar2);
12184 bp->bar2 = NULL;
12185 }
12186
12187 if (bp->bar1) {
12188 pci_iounmap(pdev, bp->bar1);
12189 bp->bar1 = NULL;
12190 }
12191
12192 if (bp->bar0) {
12193 pci_iounmap(pdev, bp->bar0);
12194 bp->bar0 = NULL;
12195 }
12196}
12197
12198static void bnxt_cleanup_pci(struct bnxt *bp)
12199{
12200 bnxt_unmap_bars(bp, bp->pdev);
12201 pci_release_regions(bp->pdev);
f6824308
VV
12202 if (pci_is_enabled(bp->pdev))
12203 pci_disable_device(bp->pdev);
17086399
SP
12204}
12205
18775aa8
MC
12206static void bnxt_init_dflt_coal(struct bnxt *bp)
12207{
df78ea22 12208 struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
18775aa8 12209 struct bnxt_coal *coal;
df78ea22
MC
12210 u16 flags = 0;
12211
12212 if (coal_cap->cmpl_params &
12213 RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_TIMER_RESET)
12214 flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
18775aa8
MC
12215
12216 /* Tick values in micro seconds.
12217 * 1 coal_buf x bufs_per_record = 1 completion record.
12218 */
12219 coal = &bp->rx_coal;
0c2ff8d7 12220 coal->coal_ticks = 10;
18775aa8
MC
12221 coal->coal_bufs = 30;
12222 coal->coal_ticks_irq = 1;
12223 coal->coal_bufs_irq = 2;
05abe4dd 12224 coal->idle_thresh = 50;
18775aa8
MC
12225 coal->bufs_per_record = 2;
12226 coal->budget = 64; /* NAPI budget */
df78ea22 12227 coal->flags = flags;
18775aa8
MC
12228
12229 coal = &bp->tx_coal;
12230 coal->coal_ticks = 28;
12231 coal->coal_bufs = 30;
12232 coal->coal_ticks_irq = 2;
12233 coal->coal_bufs_irq = 2;
12234 coal->bufs_per_record = 1;
df78ea22 12235 coal->flags = flags;
18775aa8
MC
12236
12237 bp->stats_coal_ticks = BNXT_DEF_STATS_COAL_TICKS;
12238}
12239
7c380918
MC
12240static int bnxt_fw_init_one_p1(struct bnxt *bp)
12241{
12242 int rc;
12243
12244 bp->fw_cap = 0;
12245 rc = bnxt_hwrm_ver_get(bp);
ba02629f
EP
12246 bnxt_try_map_fw_health_reg(bp);
12247 if (rc) {
b187e4ba
EP
12248 rc = bnxt_try_recover_fw(bp);
12249 if (rc)
12250 return rc;
12251 rc = bnxt_hwrm_ver_get(bp);
87f7ab8d
EP
12252 if (rc)
12253 return rc;
ba02629f 12254 }
7c380918 12255
4933f675
VV
12256 bnxt_nvm_cfg_ver_get(bp);
12257
7c380918
MC
12258 rc = bnxt_hwrm_func_reset(bp);
12259 if (rc)
12260 return -ENODEV;
12261
12262 bnxt_hwrm_fw_set_time(bp);
12263 return 0;
12264}
12265
12266static int bnxt_fw_init_one_p2(struct bnxt *bp)
12267{
12268 int rc;
12269
12270 /* Get the MAX capabilities for this function */
12271 rc = bnxt_hwrm_func_qcaps(bp);
12272 if (rc) {
12273 netdev_err(bp->dev, "hwrm query capability failure rc: %x\n",
12274 rc);
12275 return -ENODEV;
12276 }
12277
12278 rc = bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(bp);
12279 if (rc)
12280 netdev_warn(bp->dev, "hwrm query adv flow mgnt failure rc: %d\n",
12281 rc);
12282
3e9ec2bb
EP
12283 if (bnxt_alloc_fw_health(bp)) {
12284 netdev_warn(bp->dev, "no memory for firmware error recovery\n");
12285 } else {
12286 rc = bnxt_hwrm_error_recovery_qcfg(bp);
12287 if (rc)
12288 netdev_warn(bp->dev, "hwrm query error recovery failure rc: %d\n",
12289 rc);
12290 }
07f83d72 12291
2e882468 12292 rc = bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false);
7c380918
MC
12293 if (rc)
12294 return -ENODEV;
12295
12296 bnxt_hwrm_func_qcfg(bp);
12297 bnxt_hwrm_vnic_qcaps(bp);
12298 bnxt_hwrm_port_led_qcaps(bp);
12299 bnxt_ethtool_init(bp);
edc52873
PC
12300 if (bp->fw_cap & BNXT_FW_CAP_PTP)
12301 __bnxt_hwrm_ptp_qcfg(bp);
7c380918
MC
12302 bnxt_dcb_init(bp);
12303 return 0;
12304}
12305
ba642ab7
MC
12306static void bnxt_set_dflt_rss_hash_type(struct bnxt *bp)
12307{
12308 bp->flags &= ~BNXT_FLAG_UDP_RSS_CAP;
12309 bp->rss_hash_cfg = VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 |
12310 VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 |
12311 VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 |
12312 VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6;
98a4322b
EP
12313 if (bp->fw_cap & BNXT_FW_CAP_RSS_HASH_TYPE_DELTA)
12314 bp->rss_hash_delta = bp->rss_hash_cfg;
c66c06c5 12315 if (BNXT_CHIP_P4_PLUS(bp) && bp->hwrm_spec_code >= 0x10501) {
ba642ab7
MC
12316 bp->flags |= BNXT_FLAG_UDP_RSS_CAP;
12317 bp->rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 |
12318 VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6;
12319 }
12320}
12321
12322static void bnxt_set_dflt_rfs(struct bnxt *bp)
12323{
12324 struct net_device *dev = bp->dev;
12325
12326 dev->hw_features &= ~NETIF_F_NTUPLE;
12327 dev->features &= ~NETIF_F_NTUPLE;
12328 bp->flags &= ~BNXT_FLAG_RFS;
12329 if (bnxt_rfs_supported(bp)) {
12330 dev->hw_features |= NETIF_F_NTUPLE;
12331 if (bnxt_rfs_capable(bp)) {
12332 bp->flags |= BNXT_FLAG_RFS;
12333 dev->features |= NETIF_F_NTUPLE;
12334 }
12335 }
12336}
12337
12338static void bnxt_fw_init_one_p3(struct bnxt *bp)
12339{
12340 struct pci_dev *pdev = bp->pdev;
12341
12342 bnxt_set_dflt_rss_hash_type(bp);
12343 bnxt_set_dflt_rfs(bp);
12344
12345 bnxt_get_wol_settings(bp);
12346 if (bp->flags & BNXT_FLAG_WOL_CAP)
12347 device_set_wakeup_enable(&pdev->dev, bp->wol);
12348 else
12349 device_set_wakeup_capable(&pdev->dev, false);
12350
12351 bnxt_hwrm_set_cache_line_size(bp, cache_line_size());
12352 bnxt_hwrm_coal_params_qcaps(bp);
12353}
12354
0afd6a4e
MC
12355static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt);
12356
228ea8c1 12357int bnxt_fw_init_one(struct bnxt *bp)
ec5d31e3
MC
12358{
12359 int rc;
12360
12361 rc = bnxt_fw_init_one_p1(bp);
12362 if (rc) {
12363 netdev_err(bp->dev, "Firmware init phase 1 failed\n");
12364 return rc;
12365 }
12366 rc = bnxt_fw_init_one_p2(bp);
12367 if (rc) {
12368 netdev_err(bp->dev, "Firmware init phase 2 failed\n");
12369 return rc;
12370 }
0afd6a4e
MC
12371 rc = bnxt_probe_phy(bp, false);
12372 if (rc)
12373 return rc;
ec5d31e3
MC
12374 rc = bnxt_approve_mac(bp, bp->dev->dev_addr, false);
12375 if (rc)
12376 return rc;
937f188c 12377
ec5d31e3
MC
12378 bnxt_fw_init_one_p3(bp);
12379 return 0;
12380}
12381
cbb51067
MC
12382static void bnxt_fw_reset_writel(struct bnxt *bp, int reg_idx)
12383{
12384 struct bnxt_fw_health *fw_health = bp->fw_health;
12385 u32 reg = fw_health->fw_reset_seq_regs[reg_idx];
12386 u32 val = fw_health->fw_reset_seq_vals[reg_idx];
12387 u32 reg_type, reg_off, delay_msecs;
12388
12389 delay_msecs = fw_health->fw_reset_seq_delay_msec[reg_idx];
12390 reg_type = BNXT_FW_HEALTH_REG_TYPE(reg);
12391 reg_off = BNXT_FW_HEALTH_REG_OFF(reg);
12392 switch (reg_type) {
12393 case BNXT_FW_HEALTH_REG_TYPE_CFG:
12394 pci_write_config_dword(bp->pdev, reg_off, val);
12395 break;
12396 case BNXT_FW_HEALTH_REG_TYPE_GRC:
12397 writel(reg_off & BNXT_GRC_BASE_MASK,
12398 bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4);
12399 reg_off = (reg_off & BNXT_GRC_OFFSET_MASK) + 0x2000;
df561f66 12400 fallthrough;
cbb51067
MC
12401 case BNXT_FW_HEALTH_REG_TYPE_BAR0:
12402 writel(val, bp->bar0 + reg_off);
12403 break;
12404 case BNXT_FW_HEALTH_REG_TYPE_BAR1:
12405 writel(val, bp->bar1 + reg_off);
12406 break;
12407 }
12408 if (delay_msecs) {
12409 pci_read_config_dword(bp->pdev, 0, &val);
12410 msleep(delay_msecs);
12411 }
12412}
12413
892a662f
EP
12414bool bnxt_hwrm_reset_permitted(struct bnxt *bp)
12415{
12416 struct hwrm_func_qcfg_output *resp;
12417 struct hwrm_func_qcfg_input *req;
12418 bool result = true; /* firmware will enforce if unknown */
12419
12420 if (~bp->fw_cap & BNXT_FW_CAP_HOT_RESET_IF)
12421 return result;
12422
12423 if (hwrm_req_init(bp, req, HWRM_FUNC_QCFG))
12424 return result;
12425
12426 req->fid = cpu_to_le16(0xffff);
12427 resp = hwrm_req_hold(bp, req);
12428 if (!hwrm_req_send(bp, req))
12429 result = !!(le16_to_cpu(resp->flags) &
12430 FUNC_QCFG_RESP_FLAGS_HOT_RESET_ALLOWED);
12431 hwrm_req_drop(bp, req);
12432 return result;
12433}
12434
cbb51067
MC
12435static void bnxt_reset_all(struct bnxt *bp)
12436{
12437 struct bnxt_fw_health *fw_health = bp->fw_health;
e07ab202
VV
12438 int i, rc;
12439
12440 if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
87f7ab8d 12441 bnxt_fw_reset_via_optee(bp);
e07ab202 12442 bp->fw_reset_timestamp = jiffies;
e07ab202
VV
12443 return;
12444 }
cbb51067
MC
12445
12446 if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_HOST) {
12447 for (i = 0; i < fw_health->fw_reset_seq_cnt; i++)
12448 bnxt_fw_reset_writel(bp, i);
12449 } else if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) {
bbf33d1d
EP
12450 struct hwrm_fw_reset_input *req;
12451
12452 rc = hwrm_req_init(bp, req, HWRM_FW_RESET);
12453 if (!rc) {
12454 req->target_id = cpu_to_le16(HWRM_TARGET_ID_KONG);
12455 req->embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP;
12456 req->selfrst_status = FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP;
12457 req->flags = FW_RESET_REQ_FLAGS_RESET_GRACEFUL;
12458 rc = hwrm_req_send(bp, req);
12459 }
a2f3835c 12460 if (rc != -ENODEV)
cbb51067
MC
12461 netdev_warn(bp->dev, "Unable to reset FW rc=%d\n", rc);
12462 }
12463 bp->fw_reset_timestamp = jiffies;
12464}
12465
339eeb4b
MC
12466static bool bnxt_fw_reset_timeout(struct bnxt *bp)
12467{
12468 return time_after(jiffies, bp->fw_reset_timestamp +
12469 (bp->fw_reset_max_dsecs * HZ / 10));
12470}
12471
3958b1da
SK
12472static void bnxt_fw_reset_abort(struct bnxt *bp, int rc)
12473{
12474 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
12475 if (bp->fw_reset_state != BNXT_FW_RESET_STATE_POLL_VF) {
12476 bnxt_ulp_start(bp, rc);
aadb0b1a 12477 bnxt_dl_health_fw_status_update(bp, false);
3958b1da
SK
12478 }
12479 bp->fw_reset_state = 0;
12480 dev_close(bp->dev);
12481}
12482
230d1f0d
MC
12483static void bnxt_fw_reset_task(struct work_struct *work)
12484{
12485 struct bnxt *bp = container_of(work, struct bnxt, fw_reset_task.work);
3958b1da 12486 int rc = 0;
230d1f0d
MC
12487
12488 if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
12489 netdev_err(bp->dev, "bnxt_fw_reset_task() called when not in fw reset mode!\n");
12490 return;
12491 }
12492
12493 switch (bp->fw_reset_state) {
e72cb7d6
MC
12494 case BNXT_FW_RESET_STATE_POLL_VF: {
12495 int n = bnxt_get_registered_vfs(bp);
4037eb71 12496 int tmo;
e72cb7d6
MC
12497
12498 if (n < 0) {
230d1f0d 12499 netdev_err(bp->dev, "Firmware reset aborted, subsequent func_qcfg cmd failed, rc = %d, %d msecs since reset timestamp\n",
e72cb7d6 12500 n, jiffies_to_msecs(jiffies -
230d1f0d
MC
12501 bp->fw_reset_timestamp));
12502 goto fw_reset_abort;
e72cb7d6 12503 } else if (n > 0) {
339eeb4b 12504 if (bnxt_fw_reset_timeout(bp)) {
230d1f0d
MC
12505 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
12506 bp->fw_reset_state = 0;
e72cb7d6
MC
12507 netdev_err(bp->dev, "Firmware reset aborted, bnxt_get_registered_vfs() returns %d\n",
12508 n);
230d1f0d
MC
12509 return;
12510 }
12511 bnxt_queue_fw_reset_work(bp, HZ / 10);
12512 return;
12513 }
12514 bp->fw_reset_timestamp = jiffies;
12515 rtnl_lock();
6cd657cb 12516 if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
3958b1da 12517 bnxt_fw_reset_abort(bp, rc);
6cd657cb 12518 rtnl_unlock();
3958b1da 12519 return;
6cd657cb 12520 }
230d1f0d 12521 bnxt_fw_reset_close(bp);
4037eb71
VV
12522 if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
12523 bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW_DOWN;
12524 tmo = HZ / 10;
12525 } else {
12526 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
12527 tmo = bp->fw_reset_min_dsecs * HZ / 10;
12528 }
230d1f0d 12529 rtnl_unlock();
4037eb71 12530 bnxt_queue_fw_reset_work(bp, tmo);
230d1f0d 12531 return;
e72cb7d6 12532 }
4037eb71
VV
12533 case BNXT_FW_RESET_STATE_POLL_FW_DOWN: {
12534 u32 val;
12535
12536 val = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
12537 if (!(val & BNXT_FW_STATUS_SHUTDOWN) &&
339eeb4b 12538 !bnxt_fw_reset_timeout(bp)) {
4037eb71
VV
12539 bnxt_queue_fw_reset_work(bp, HZ / 5);
12540 return;
12541 }
12542
1596847d 12543 if (!bp->fw_health->primary) {
4037eb71
VV
12544 u32 wait_dsecs = bp->fw_health->normal_func_wait_dsecs;
12545
12546 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
12547 bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10);
12548 return;
12549 }
12550 bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW;
12551 }
df561f66 12552 fallthrough;
c6a9e7aa 12553 case BNXT_FW_RESET_STATE_RESET_FW:
cbb51067
MC
12554 bnxt_reset_all(bp);
12555 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
c6a9e7aa 12556 bnxt_queue_fw_reset_work(bp, bp->fw_reset_min_dsecs * HZ / 10);
cbb51067 12557 return;
230d1f0d 12558 case BNXT_FW_RESET_STATE_ENABLE_DEV:
43a440c4 12559 bnxt_inv_fw_health_reg(bp);
bae8a003
VV
12560 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state) &&
12561 !bp->fw_reset_min_dsecs) {
12562 u16 val;
12563
12564 pci_read_config_word(bp->pdev, PCI_SUBSYSTEM_ID, &val);
12565 if (val == 0xffff) {
12566 if (bnxt_fw_reset_timeout(bp)) {
12567 netdev_err(bp->dev, "Firmware reset aborted, PCI config space invalid\n");
3958b1da 12568 rc = -ETIMEDOUT;
bae8a003 12569 goto fw_reset_abort;
dab62e7c 12570 }
bae8a003
VV
12571 bnxt_queue_fw_reset_work(bp, HZ / 1000);
12572 return;
dab62e7c 12573 }
d1db9e16 12574 }
b4fff207 12575 clear_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
aadb0b1a 12576 clear_bit(BNXT_STATE_FW_NON_FATAL_COND, &bp->state);
8f6c5e4d
EP
12577 if (test_and_clear_bit(BNXT_STATE_FW_ACTIVATE_RESET, &bp->state) &&
12578 !test_bit(BNXT_STATE_FW_ACTIVATE, &bp->state))
12579 bnxt_dl_remote_reload(bp);
230d1f0d
MC
12580 if (pci_enable_device(bp->pdev)) {
12581 netdev_err(bp->dev, "Cannot re-enable PCI device\n");
3958b1da 12582 rc = -ENODEV;
230d1f0d
MC
12583 goto fw_reset_abort;
12584 }
12585 pci_set_master(bp->pdev);
12586 bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW;
df561f66 12587 fallthrough;
230d1f0d
MC
12588 case BNXT_FW_RESET_STATE_POLL_FW:
12589 bp->hwrm_cmd_timeout = SHORT_HWRM_CMD_TIMEOUT;
7b370ad7 12590 rc = bnxt_hwrm_poll(bp);
230d1f0d 12591 if (rc) {
339eeb4b 12592 if (bnxt_fw_reset_timeout(bp)) {
230d1f0d 12593 netdev_err(bp->dev, "Firmware reset aborted\n");
fc8864e0 12594 goto fw_reset_abort_status;
230d1f0d
MC
12595 }
12596 bnxt_queue_fw_reset_work(bp, HZ / 5);
12597 return;
12598 }
12599 bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
12600 bp->fw_reset_state = BNXT_FW_RESET_STATE_OPENING;
df561f66 12601 fallthrough;
230d1f0d
MC
12602 case BNXT_FW_RESET_STATE_OPENING:
12603 while (!rtnl_trylock()) {
12604 bnxt_queue_fw_reset_work(bp, HZ / 10);
12605 return;
12606 }
12607 rc = bnxt_open(bp->dev);
12608 if (rc) {
3958b1da
SK
12609 netdev_err(bp->dev, "bnxt_open() failed during FW reset\n");
12610 bnxt_fw_reset_abort(bp, rc);
12611 rtnl_unlock();
12612 return;
230d1f0d 12613 }
230d1f0d 12614
eca4cf12
MC
12615 if ((bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) &&
12616 bp->fw_health->enabled) {
12617 bp->fw_health->last_fw_reset_cnt =
12618 bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
12619 }
230d1f0d
MC
12620 bp->fw_reset_state = 0;
12621 /* Make sure fw_reset_state is 0 before clearing the flag */
12622 smp_mb__before_atomic();
12623 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
758684e4
SK
12624 bnxt_ulp_start(bp, 0);
12625 bnxt_reenable_sriov(bp);
ac797ced
SB
12626 bnxt_vf_reps_alloc(bp);
12627 bnxt_vf_reps_open(bp);
9e518f25 12628 bnxt_ptp_reapply_pps(bp);
8f6c5e4d 12629 clear_bit(BNXT_STATE_FW_ACTIVATE, &bp->state);
aadb0b1a
EP
12630 if (test_and_clear_bit(BNXT_STATE_RECOVER, &bp->state)) {
12631 bnxt_dl_health_fw_recovery_done(bp);
12632 bnxt_dl_health_fw_status_update(bp, true);
12633 }
f3a6d206 12634 rtnl_unlock();
230d1f0d
MC
12635 break;
12636 }
12637 return;
12638
fc8864e0
MC
12639fw_reset_abort_status:
12640 if (bp->fw_health->status_reliable ||
12641 (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) {
12642 u32 sts = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
12643
12644 netdev_err(bp->dev, "fw_health_status 0x%x\n", sts);
12645 }
230d1f0d 12646fw_reset_abort:
230d1f0d 12647 rtnl_lock();
3958b1da 12648 bnxt_fw_reset_abort(bp, rc);
230d1f0d
MC
12649 rtnl_unlock();
12650}
12651
c0c050c5
MC
12652static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev)
12653{
12654 int rc;
12655 struct bnxt *bp = netdev_priv(dev);
12656
12657 SET_NETDEV_DEV(dev, &pdev->dev);
12658
12659 /* enable device (incl. PCI PM wakeup), and bus-mastering */
12660 rc = pci_enable_device(pdev);
12661 if (rc) {
12662 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
12663 goto init_err;
12664 }
12665
12666 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
12667 dev_err(&pdev->dev,
12668 "Cannot find PCI device base address, aborting\n");
12669 rc = -ENODEV;
12670 goto init_err_disable;
12671 }
12672
12673 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
12674 if (rc) {
12675 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
12676 goto init_err_disable;
12677 }
12678
12679 if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) != 0 &&
12680 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
12681 dev_err(&pdev->dev, "System does not support DMA, aborting\n");
3383176e 12682 rc = -EIO;
c54bc3ce 12683 goto init_err_release;
c0c050c5
MC
12684 }
12685
12686 pci_set_master(pdev);
12687
12688 bp->dev = dev;
12689 bp->pdev = pdev;
12690
8ae24738
MC
12691 /* Doorbell BAR bp->bar1 is mapped after bnxt_fw_init_one_p2()
12692 * determines the BAR size.
12693 */
c0c050c5
MC
12694 bp->bar0 = pci_ioremap_bar(pdev, 0);
12695 if (!bp->bar0) {
12696 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
12697 rc = -ENOMEM;
12698 goto init_err_release;
12699 }
12700
c0c050c5
MC
12701 bp->bar2 = pci_ioremap_bar(pdev, 4);
12702 if (!bp->bar2) {
12703 dev_err(&pdev->dev, "Cannot map bar4 registers, aborting\n");
12704 rc = -ENOMEM;
12705 goto init_err_release;
12706 }
12707
12708 INIT_WORK(&bp->sp_task, bnxt_sp_task);
230d1f0d 12709 INIT_DELAYED_WORK(&bp->fw_reset_task, bnxt_fw_reset_task);
c0c050c5
MC
12710
12711 spin_lock_init(&bp->ntp_fltr_lock);
697197e5
MC
12712#if BITS_PER_LONG == 32
12713 spin_lock_init(&bp->db_lock);
12714#endif
c0c050c5
MC
12715
12716 bp->rx_ring_size = BNXT_DEFAULT_RX_RING_SIZE;
12717 bp->tx_ring_size = BNXT_DEFAULT_TX_RING_SIZE;
12718
e99e88a9 12719 timer_setup(&bp->timer, bnxt_timer, 0);
c0c050c5
MC
12720 bp->current_interval = BNXT_TIMER_INTERVAL;
12721
442a35a5
JK
12722 bp->vxlan_fw_dst_port_id = INVALID_HW_RING_ID;
12723 bp->nge_fw_dst_port_id = INVALID_HW_RING_ID;
12724
caefe526 12725 clear_bit(BNXT_STATE_OPEN, &bp->state);
c0c050c5
MC
12726 return 0;
12727
12728init_err_release:
17086399 12729 bnxt_unmap_bars(bp, pdev);
c0c050c5
MC
12730 pci_release_regions(pdev);
12731
12732init_err_disable:
12733 pci_disable_device(pdev);
12734
12735init_err:
12736 return rc;
12737}
12738
12739/* rtnl_lock held */
12740static int bnxt_change_mac_addr(struct net_device *dev, void *p)
12741{
12742 struct sockaddr *addr = p;
1fc2cfd0
JH
12743 struct bnxt *bp = netdev_priv(dev);
12744 int rc = 0;
c0c050c5
MC
12745
12746 if (!is_valid_ether_addr(addr->sa_data))
12747 return -EADDRNOTAVAIL;
12748
c1a7bdff
MC
12749 if (ether_addr_equal(addr->sa_data, dev->dev_addr))
12750 return 0;
12751
28ea334b 12752 rc = bnxt_approve_mac(bp, addr->sa_data, true);
84c33dd3
MC
12753 if (rc)
12754 return rc;
bdd4347b 12755
a05e4c0a 12756 eth_hw_addr_set(dev, addr->sa_data);
1fc2cfd0
JH
12757 if (netif_running(dev)) {
12758 bnxt_close_nic(bp, false, false);
12759 rc = bnxt_open_nic(bp, false, false);
12760 }
c0c050c5 12761
1fc2cfd0 12762 return rc;
c0c050c5
MC
12763}
12764
12765/* rtnl_lock held */
12766static int bnxt_change_mtu(struct net_device *dev, int new_mtu)
12767{
12768 struct bnxt *bp = netdev_priv(dev);
12769
c0c050c5 12770 if (netif_running(dev))
a9b952d2 12771 bnxt_close_nic(bp, true, false);
c0c050c5
MC
12772
12773 dev->mtu = new_mtu;
12774 bnxt_set_ring_params(bp);
12775
12776 if (netif_running(dev))
a9b952d2 12777 return bnxt_open_nic(bp, true, false);
c0c050c5
MC
12778
12779 return 0;
12780}
12781
c5e3deb8 12782int bnxt_setup_mq_tc(struct net_device *dev, u8 tc)
c0c050c5
MC
12783{
12784 struct bnxt *bp = netdev_priv(dev);
3ffb6a39 12785 bool sh = false;
d1e7925e 12786 int rc;
16e5cc64 12787
c0c050c5 12788 if (tc > bp->max_tc) {
b451c8b6 12789 netdev_err(dev, "Too many traffic classes requested: %d. Max supported is %d.\n",
c0c050c5
MC
12790 tc, bp->max_tc);
12791 return -EINVAL;
12792 }
12793
12794 if (netdev_get_num_tc(dev) == tc)
12795 return 0;
12796
3ffb6a39
MC
12797 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
12798 sh = true;
12799
98fdbe73
MC
12800 rc = bnxt_check_rings(bp, bp->tx_nr_rings_per_tc, bp->rx_nr_rings,
12801 sh, tc, bp->tx_nr_rings_xdp);
d1e7925e
MC
12802 if (rc)
12803 return rc;
c0c050c5
MC
12804
12805 /* Needs to close the device and do hw resource re-allocations */
12806 if (netif_running(bp->dev))
12807 bnxt_close_nic(bp, true, false);
12808
12809 if (tc) {
12810 bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc;
12811 netdev_set_num_tc(dev, tc);
12812 } else {
12813 bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
12814 netdev_reset_tc(dev);
12815 }
87e9b377 12816 bp->tx_nr_rings += bp->tx_nr_rings_xdp;
3ffb6a39
MC
12817 bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
12818 bp->tx_nr_rings + bp->rx_nr_rings;
c0c050c5
MC
12819
12820 if (netif_running(bp->dev))
12821 return bnxt_open_nic(bp, true, false);
12822
12823 return 0;
12824}
12825
9e0fd15d
JP
12826static int bnxt_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
12827 void *cb_priv)
c5e3deb8 12828{
9e0fd15d 12829 struct bnxt *bp = cb_priv;
de4784ca 12830
312324f1
JK
12831 if (!bnxt_tc_flower_enabled(bp) ||
12832 !tc_cls_can_offload_and_chain0(bp->dev, type_data))
38cf0426 12833 return -EOPNOTSUPP;
c5e3deb8 12834
9e0fd15d
JP
12835 switch (type) {
12836 case TC_SETUP_CLSFLOWER:
12837 return bnxt_tc_setup_flower(bp, bp->pf.fw_fid, type_data);
12838 default:
12839 return -EOPNOTSUPP;
12840 }
12841}
12842
627c89d0 12843LIST_HEAD(bnxt_block_cb_list);
955bcb6e 12844
2ae7408f
SP
12845static int bnxt_setup_tc(struct net_device *dev, enum tc_setup_type type,
12846 void *type_data)
12847{
4e95bc26
PNA
12848 struct bnxt *bp = netdev_priv(dev);
12849
2ae7408f 12850 switch (type) {
9e0fd15d 12851 case TC_SETUP_BLOCK:
955bcb6e
PNA
12852 return flow_block_cb_setup_simple(type_data,
12853 &bnxt_block_cb_list,
4e95bc26
PNA
12854 bnxt_setup_tc_block_cb,
12855 bp, bp, true);
575ed7d3 12856 case TC_SETUP_QDISC_MQPRIO: {
2ae7408f
SP
12857 struct tc_mqprio_qopt *mqprio = type_data;
12858
12859 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
56f36acd 12860
2ae7408f
SP
12861 return bnxt_setup_mq_tc(dev, mqprio->num_tc);
12862 }
12863 default:
12864 return -EOPNOTSUPP;
12865 }
c5e3deb8
MC
12866}
12867
c0c050c5
MC
12868#ifdef CONFIG_RFS_ACCEL
12869static bool bnxt_fltr_match(struct bnxt_ntuple_filter *f1,
12870 struct bnxt_ntuple_filter *f2)
12871{
12872 struct flow_keys *keys1 = &f1->fkeys;
12873 struct flow_keys *keys2 = &f2->fkeys;
12874
6fc7caa8
MC
12875 if (keys1->basic.n_proto != keys2->basic.n_proto ||
12876 keys1->basic.ip_proto != keys2->basic.ip_proto)
12877 return false;
12878
12879 if (keys1->basic.n_proto == htons(ETH_P_IP)) {
12880 if (keys1->addrs.v4addrs.src != keys2->addrs.v4addrs.src ||
12881 keys1->addrs.v4addrs.dst != keys2->addrs.v4addrs.dst)
12882 return false;
12883 } else {
12884 if (memcmp(&keys1->addrs.v6addrs.src, &keys2->addrs.v6addrs.src,
12885 sizeof(keys1->addrs.v6addrs.src)) ||
12886 memcmp(&keys1->addrs.v6addrs.dst, &keys2->addrs.v6addrs.dst,
12887 sizeof(keys1->addrs.v6addrs.dst)))
12888 return false;
12889 }
12890
12891 if (keys1->ports.ports == keys2->ports.ports &&
61aad724 12892 keys1->control.flags == keys2->control.flags &&
a54c4d74
MC
12893 ether_addr_equal(f1->src_mac_addr, f2->src_mac_addr) &&
12894 ether_addr_equal(f1->dst_mac_addr, f2->dst_mac_addr))
c0c050c5
MC
12895 return true;
12896
12897 return false;
12898}
12899
12900static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
12901 u16 rxq_index, u32 flow_id)
12902{
12903 struct bnxt *bp = netdev_priv(dev);
12904 struct bnxt_ntuple_filter *fltr, *new_fltr;
12905 struct flow_keys *fkeys;
12906 struct ethhdr *eth = (struct ethhdr *)skb_mac_header(skb);
a54c4d74 12907 int rc = 0, idx, bit_id, l2_idx = 0;
c0c050c5 12908 struct hlist_head *head;
f47d0e19 12909 u32 flags;
c0c050c5 12910
a54c4d74
MC
12911 if (!ether_addr_equal(dev->dev_addr, eth->h_dest)) {
12912 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
12913 int off = 0, j;
12914
12915 netif_addr_lock_bh(dev);
12916 for (j = 0; j < vnic->uc_filter_count; j++, off += ETH_ALEN) {
12917 if (ether_addr_equal(eth->h_dest,
12918 vnic->uc_list + off)) {
12919 l2_idx = j + 1;
12920 break;
12921 }
12922 }
12923 netif_addr_unlock_bh(dev);
12924 if (!l2_idx)
12925 return -EINVAL;
12926 }
c0c050c5
MC
12927 new_fltr = kzalloc(sizeof(*new_fltr), GFP_ATOMIC);
12928 if (!new_fltr)
12929 return -ENOMEM;
12930
12931 fkeys = &new_fltr->fkeys;
12932 if (!skb_flow_dissect_flow_keys(skb, fkeys, 0)) {
12933 rc = -EPROTONOSUPPORT;
12934 goto err_free;
12935 }
12936
dda0e746
MC
12937 if ((fkeys->basic.n_proto != htons(ETH_P_IP) &&
12938 fkeys->basic.n_proto != htons(ETH_P_IPV6)) ||
c0c050c5
MC
12939 ((fkeys->basic.ip_proto != IPPROTO_TCP) &&
12940 (fkeys->basic.ip_proto != IPPROTO_UDP))) {
12941 rc = -EPROTONOSUPPORT;
12942 goto err_free;
12943 }
dda0e746
MC
12944 if (fkeys->basic.n_proto == htons(ETH_P_IPV6) &&
12945 bp->hwrm_spec_code < 0x10601) {
12946 rc = -EPROTONOSUPPORT;
12947 goto err_free;
12948 }
f47d0e19
MC
12949 flags = fkeys->control.flags;
12950 if (((flags & FLOW_DIS_ENCAPSULATION) &&
12951 bp->hwrm_spec_code < 0x10601) || (flags & FLOW_DIS_IS_FRAGMENT)) {
61aad724
MC
12952 rc = -EPROTONOSUPPORT;
12953 goto err_free;
12954 }
c0c050c5 12955
a54c4d74 12956 memcpy(new_fltr->dst_mac_addr, eth->h_dest, ETH_ALEN);
c0c050c5
MC
12957 memcpy(new_fltr->src_mac_addr, eth->h_source, ETH_ALEN);
12958
12959 idx = skb_get_hash_raw(skb) & BNXT_NTP_FLTR_HASH_MASK;
12960 head = &bp->ntp_fltr_hash_tbl[idx];
12961 rcu_read_lock();
12962 hlist_for_each_entry_rcu(fltr, head, hash) {
12963 if (bnxt_fltr_match(fltr, new_fltr)) {
02597d39 12964 rc = fltr->sw_id;
c0c050c5 12965 rcu_read_unlock();
c0c050c5
MC
12966 goto err_free;
12967 }
12968 }
12969 rcu_read_unlock();
12970
12971 spin_lock_bh(&bp->ntp_fltr_lock);
84e86b98
MC
12972 bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap,
12973 BNXT_NTP_FLTR_MAX_FLTR, 0);
12974 if (bit_id < 0) {
c0c050c5
MC
12975 spin_unlock_bh(&bp->ntp_fltr_lock);
12976 rc = -ENOMEM;
12977 goto err_free;
12978 }
12979
84e86b98 12980 new_fltr->sw_id = (u16)bit_id;
c0c050c5 12981 new_fltr->flow_id = flow_id;
a54c4d74 12982 new_fltr->l2_fltr_idx = l2_idx;
c0c050c5
MC
12983 new_fltr->rxq = rxq_index;
12984 hlist_add_head_rcu(&new_fltr->hash, head);
12985 bp->ntp_fltr_count++;
12986 spin_unlock_bh(&bp->ntp_fltr_lock);
12987
9b1a00fd 12988 bnxt_queue_sp_work(bp, BNXT_RX_NTP_FLTR_SP_EVENT);
c0c050c5
MC
12989
12990 return new_fltr->sw_id;
12991
12992err_free:
12993 kfree(new_fltr);
12994 return rc;
12995}
12996
12997static void bnxt_cfg_ntp_filters(struct bnxt *bp)
12998{
12999 int i;
13000
13001 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
13002 struct hlist_head *head;
13003 struct hlist_node *tmp;
13004 struct bnxt_ntuple_filter *fltr;
13005 int rc;
13006
13007 head = &bp->ntp_fltr_hash_tbl[i];
13008 hlist_for_each_entry_safe(fltr, tmp, head, hash) {
13009 bool del = false;
13010
13011 if (test_bit(BNXT_FLTR_VALID, &fltr->state)) {
13012 if (rps_may_expire_flow(bp->dev, fltr->rxq,
13013 fltr->flow_id,
13014 fltr->sw_id)) {
13015 bnxt_hwrm_cfa_ntuple_filter_free(bp,
13016 fltr);
13017 del = true;
13018 }
13019 } else {
13020 rc = bnxt_hwrm_cfa_ntuple_filter_alloc(bp,
13021 fltr);
13022 if (rc)
13023 del = true;
13024 else
13025 set_bit(BNXT_FLTR_VALID, &fltr->state);
13026 }
13027
13028 if (del) {
13029 spin_lock_bh(&bp->ntp_fltr_lock);
13030 hlist_del_rcu(&fltr->hash);
13031 bp->ntp_fltr_count--;
13032 spin_unlock_bh(&bp->ntp_fltr_lock);
13033 synchronize_rcu();
13034 clear_bit(fltr->sw_id, bp->ntp_fltr_bmap);
13035 kfree(fltr);
13036 }
13037 }
13038 }
19241368 13039 if (test_and_clear_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event))
9a005c38 13040 netdev_info(bp->dev, "Receive PF driver unload event!\n");
c0c050c5
MC
13041}
13042
13043#else
13044
13045static void bnxt_cfg_ntp_filters(struct bnxt *bp)
13046{
13047}
13048
13049#endif /* CONFIG_RFS_ACCEL */
13050
1eb4ef12
SK
13051static int bnxt_udp_tunnel_set_port(struct net_device *netdev, unsigned int table,
13052 unsigned int entry, struct udp_tunnel_info *ti)
c0c050c5 13053{
442a35a5 13054 struct bnxt *bp = netdev_priv(netdev);
442a35a5 13055 unsigned int cmd;
c0c050c5 13056
1eb4ef12 13057 if (ti->type == UDP_TUNNEL_TYPE_VXLAN)
442a35a5 13058 cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN;
7ae9dc35 13059 else
442a35a5 13060 cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE;
7cdd5fc3 13061
1eb4ef12
SK
13062 return bnxt_hwrm_tunnel_dst_port_alloc(bp, ti->port, cmd);
13063}
13064
13065static int bnxt_udp_tunnel_unset_port(struct net_device *netdev, unsigned int table,
13066 unsigned int entry, struct udp_tunnel_info *ti)
13067{
13068 struct bnxt *bp = netdev_priv(netdev);
13069 unsigned int cmd;
13070
13071 if (ti->type == UDP_TUNNEL_TYPE_VXLAN)
13072 cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN;
13073 else
13074 cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE;
ad51b8e9 13075
442a35a5 13076 return bnxt_hwrm_tunnel_dst_port_free(bp, cmd);
c0c050c5
MC
13077}
13078
442a35a5 13079static const struct udp_tunnel_nic_info bnxt_udp_tunnels = {
1eb4ef12
SK
13080 .set_port = bnxt_udp_tunnel_set_port,
13081 .unset_port = bnxt_udp_tunnel_unset_port,
442a35a5
JK
13082 .flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP |
13083 UDP_TUNNEL_NIC_INFO_OPEN_ONLY,
13084 .tables = {
13085 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
13086 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, },
13087 },
13088};
c0c050c5 13089
39d8ba2e
MC
13090static int bnxt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
13091 struct net_device *dev, u32 filter_mask,
13092 int nlflags)
13093{
13094 struct bnxt *bp = netdev_priv(dev);
13095
13096 return ndo_dflt_bridge_getlink(skb, pid, seq, dev, bp->br_mode, 0, 0,
13097 nlflags, filter_mask, NULL);
13098}
13099
13100static int bnxt_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
2fd527b7 13101 u16 flags, struct netlink_ext_ack *extack)
39d8ba2e
MC
13102{
13103 struct bnxt *bp = netdev_priv(dev);
13104 struct nlattr *attr, *br_spec;
13105 int rem, rc = 0;
13106
13107 if (bp->hwrm_spec_code < 0x10708 || !BNXT_SINGLE_PF(bp))
13108 return -EOPNOTSUPP;
13109
13110 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
13111 if (!br_spec)
13112 return -EINVAL;
13113
13114 nla_for_each_nested(attr, br_spec, rem) {
13115 u16 mode;
13116
13117 if (nla_type(attr) != IFLA_BRIDGE_MODE)
13118 continue;
13119
39d8ba2e
MC
13120 mode = nla_get_u16(attr);
13121 if (mode == bp->br_mode)
13122 break;
13123
13124 rc = bnxt_hwrm_set_br_mode(bp, mode);
13125 if (!rc)
13126 bp->br_mode = mode;
13127 break;
13128 }
13129 return rc;
13130}
13131
52d5254a
FF
13132int bnxt_get_port_parent_id(struct net_device *dev,
13133 struct netdev_phys_item_id *ppid)
c124a62f 13134{
52d5254a
FF
13135 struct bnxt *bp = netdev_priv(dev);
13136
c124a62f
SP
13137 if (bp->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV)
13138 return -EOPNOTSUPP;
13139
13140 /* The PF and it's VF-reps only support the switchdev framework */
d061b241 13141 if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_DSN_VALID))
c124a62f
SP
13142 return -EOPNOTSUPP;
13143
b014232f
VV
13144 ppid->id_len = sizeof(bp->dsn);
13145 memcpy(ppid->id, bp->dsn, ppid->id_len);
c124a62f 13146
52d5254a 13147 return 0;
c124a62f
SP
13148}
13149
c0c050c5
MC
13150static const struct net_device_ops bnxt_netdev_ops = {
13151 .ndo_open = bnxt_open,
13152 .ndo_start_xmit = bnxt_start_xmit,
13153 .ndo_stop = bnxt_close,
13154 .ndo_get_stats64 = bnxt_get_stats64,
13155 .ndo_set_rx_mode = bnxt_set_rx_mode,
a7605370 13156 .ndo_eth_ioctl = bnxt_ioctl,
c0c050c5
MC
13157 .ndo_validate_addr = eth_validate_addr,
13158 .ndo_set_mac_address = bnxt_change_mac_addr,
13159 .ndo_change_mtu = bnxt_change_mtu,
13160 .ndo_fix_features = bnxt_fix_features,
13161 .ndo_set_features = bnxt_set_features,
1698d600 13162 .ndo_features_check = bnxt_features_check,
c0c050c5
MC
13163 .ndo_tx_timeout = bnxt_tx_timeout,
13164#ifdef CONFIG_BNXT_SRIOV
13165 .ndo_get_vf_config = bnxt_get_vf_config,
13166 .ndo_set_vf_mac = bnxt_set_vf_mac,
13167 .ndo_set_vf_vlan = bnxt_set_vf_vlan,
13168 .ndo_set_vf_rate = bnxt_set_vf_bw,
13169 .ndo_set_vf_link_state = bnxt_set_vf_link_state,
13170 .ndo_set_vf_spoofchk = bnxt_set_vf_spoofchk,
746df139 13171 .ndo_set_vf_trust = bnxt_set_vf_trust,
c0c050c5
MC
13172#endif
13173 .ndo_setup_tc = bnxt_setup_tc,
13174#ifdef CONFIG_RFS_ACCEL
13175 .ndo_rx_flow_steer = bnxt_rx_flow_steer,
13176#endif
f4e63525 13177 .ndo_bpf = bnxt_xdp,
f18c2b77 13178 .ndo_xdp_xmit = bnxt_xdp_xmit,
39d8ba2e
MC
13179 .ndo_bridge_getlink = bnxt_bridge_getlink,
13180 .ndo_bridge_setlink = bnxt_bridge_setlink,
c0c050c5
MC
13181};
13182
13183static void bnxt_remove_one(struct pci_dev *pdev)
13184{
13185 struct net_device *dev = pci_get_drvdata(pdev);
13186 struct bnxt *bp = netdev_priv(dev);
13187
7e334fc8 13188 if (BNXT_PF(bp))
c0c050c5
MC
13189 bnxt_sriov_disable(bp);
13190
d80d88b0
AK
13191 bnxt_rdma_aux_device_uninit(bp);
13192
a521c8a0 13193 bnxt_ptp_clear(bp);
21d6a11e 13194 unregister_netdev(dev);
b16939b5 13195 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
21d6a11e 13196 /* Flush any pending tasks */
631ce27a
VV
13197 cancel_work_sync(&bp->sp_task);
13198 cancel_delayed_work_sync(&bp->fw_reset_task);
b16939b5
VV
13199 bp->sp_event = 0;
13200
f16a9169 13201 bnxt_dl_fw_reporters_destroy(bp);
cda2cab0 13202 bnxt_dl_unregister(bp);
2ae7408f 13203 bnxt_shutdown_tc(bp);
c0c050c5 13204
7809592d 13205 bnxt_clear_int_mode(bp);
be58a0da 13206 bnxt_hwrm_func_drv_unrgtr(bp);
c0c050c5 13207 bnxt_free_hwrm_resources(bp);
eb513658 13208 bnxt_ethtool_free(bp);
7df4ae9f 13209 bnxt_dcb_free(bp);
ae5c42f0
MC
13210 kfree(bp->ptp_cfg);
13211 bp->ptp_cfg = NULL;
8280b38e
VV
13212 kfree(bp->fw_health);
13213 bp->fw_health = NULL;
c20dc142 13214 bnxt_cleanup_pci(bp);
98f04cf0
MC
13215 bnxt_free_ctx_mem(bp);
13216 kfree(bp->ctx);
13217 bp->ctx = NULL;
1667cbf6
MC
13218 kfree(bp->rss_indir_tbl);
13219 bp->rss_indir_tbl = NULL;
fd3ab1c7 13220 bnxt_free_port_stats(bp);
c0c050c5 13221 free_netdev(dev);
c0c050c5
MC
13222}
13223
ba642ab7 13224static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt)
c0c050c5
MC
13225{
13226 int rc = 0;
13227 struct bnxt_link_info *link_info = &bp->link_info;
c0c050c5 13228
b0d28207 13229 bp->phy_flags = 0;
170ce013
MC
13230 rc = bnxt_hwrm_phy_qcaps(bp);
13231 if (rc) {
13232 netdev_err(bp->dev, "Probe phy can't get phy capabilities (rc: %x)\n",
13233 rc);
13234 return rc;
13235 }
dade5e15
MC
13236 if (bp->phy_flags & BNXT_PHY_FL_NO_FCS)
13237 bp->dev->priv_flags |= IFF_SUPP_NOFCS;
13238 else
13239 bp->dev->priv_flags &= ~IFF_SUPP_NOFCS;
43a5107d
MC
13240 if (!fw_dflt)
13241 return 0;
13242
3c10ed49 13243 mutex_lock(&bp->link_lock);
c0c050c5
MC
13244 rc = bnxt_update_link(bp, false);
13245 if (rc) {
3c10ed49 13246 mutex_unlock(&bp->link_lock);
c0c050c5
MC
13247 netdev_err(bp->dev, "Probe phy can't update link (rc: %x)\n",
13248 rc);
13249 return rc;
13250 }
13251
93ed8117
MC
13252 /* Older firmware does not have supported_auto_speeds, so assume
13253 * that all supported speeds can be autonegotiated.
13254 */
13255 if (link_info->auto_link_speeds && !link_info->support_auto_speeds)
13256 link_info->support_auto_speeds = link_info->support_speeds;
13257
8119e49b 13258 bnxt_init_ethtool_link_settings(bp);
3c10ed49 13259 mutex_unlock(&bp->link_lock);
ba642ab7 13260 return 0;
c0c050c5
MC
13261}
13262
13263static int bnxt_get_max_irq(struct pci_dev *pdev)
13264{
13265 u16 ctrl;
13266
13267 if (!pdev->msix_cap)
13268 return 1;
13269
13270 pci_read_config_word(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, &ctrl);
13271 return (ctrl & PCI_MSIX_FLAGS_QSIZE) + 1;
13272}
13273
6e6c5a57
MC
13274static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx,
13275 int *max_cp)
c0c050c5 13276{
6a4f2947 13277 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
e30fbc33 13278 int max_ring_grps = 0, max_irq;
c0c050c5 13279
6a4f2947
MC
13280 *max_tx = hw_resc->max_tx_rings;
13281 *max_rx = hw_resc->max_rx_rings;
e30fbc33
MC
13282 *max_cp = bnxt_get_max_func_cp_rings_for_en(bp);
13283 max_irq = min_t(int, bnxt_get_max_func_irqs(bp) -
13284 bnxt_get_ulp_msix_num(bp),
c027c6b4 13285 hw_resc->max_stat_ctxs - bnxt_get_ulp_stat_ctxs(bp));
e30fbc33
MC
13286 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
13287 *max_cp = min_t(int, *max_cp, max_irq);
6a4f2947 13288 max_ring_grps = hw_resc->max_hw_ring_grps;
76595193
PS
13289 if (BNXT_CHIP_TYPE_NITRO_A0(bp) && BNXT_PF(bp)) {
13290 *max_cp -= 1;
13291 *max_rx -= 2;
13292 }
c0c050c5
MC
13293 if (bp->flags & BNXT_FLAG_AGG_RINGS)
13294 *max_rx >>= 1;
e30fbc33
MC
13295 if (bp->flags & BNXT_FLAG_CHIP_P5) {
13296 bnxt_trim_rings(bp, max_rx, max_tx, *max_cp, false);
13297 /* On P5 chips, max_cp output param should be available NQs */
13298 *max_cp = max_irq;
13299 }
b72d4a68 13300 *max_rx = min_t(int, *max_rx, max_ring_grps);
6e6c5a57
MC
13301}
13302
13303int bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, bool shared)
13304{
13305 int rx, tx, cp;
13306
13307 _bnxt_get_max_rings(bp, &rx, &tx, &cp);
78f058a4
MC
13308 *max_rx = rx;
13309 *max_tx = tx;
6e6c5a57
MC
13310 if (!rx || !tx || !cp)
13311 return -ENOMEM;
13312
6e6c5a57
MC
13313 return bnxt_trim_rings(bp, max_rx, max_tx, cp, shared);
13314}
13315
e4060d30
MC
13316static int bnxt_get_dflt_rings(struct bnxt *bp, int *max_rx, int *max_tx,
13317 bool shared)
13318{
13319 int rc;
13320
13321 rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared);
bdbd1eb5
MC
13322 if (rc && (bp->flags & BNXT_FLAG_AGG_RINGS)) {
13323 /* Not enough rings, try disabling agg rings. */
13324 bp->flags &= ~BNXT_FLAG_AGG_RINGS;
13325 rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared);
07f4fde5
MC
13326 if (rc) {
13327 /* set BNXT_FLAG_AGG_RINGS back for consistency */
13328 bp->flags |= BNXT_FLAG_AGG_RINGS;
bdbd1eb5 13329 return rc;
07f4fde5 13330 }
bdbd1eb5 13331 bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
1054aee8
MC
13332 bp->dev->hw_features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
13333 bp->dev->features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
bdbd1eb5
MC
13334 bnxt_set_ring_params(bp);
13335 }
e4060d30
MC
13336
13337 if (bp->flags & BNXT_FLAG_ROCE_CAP) {
13338 int max_cp, max_stat, max_irq;
13339
13340 /* Reserve minimum resources for RoCE */
13341 max_cp = bnxt_get_max_func_cp_rings(bp);
13342 max_stat = bnxt_get_max_func_stat_ctxs(bp);
13343 max_irq = bnxt_get_max_func_irqs(bp);
13344 if (max_cp <= BNXT_MIN_ROCE_CP_RINGS ||
13345 max_irq <= BNXT_MIN_ROCE_CP_RINGS ||
13346 max_stat <= BNXT_MIN_ROCE_STAT_CTXS)
13347 return 0;
13348
13349 max_cp -= BNXT_MIN_ROCE_CP_RINGS;
13350 max_irq -= BNXT_MIN_ROCE_CP_RINGS;
13351 max_stat -= BNXT_MIN_ROCE_STAT_CTXS;
13352 max_cp = min_t(int, max_cp, max_irq);
13353 max_cp = min_t(int, max_cp, max_stat);
13354 rc = bnxt_trim_rings(bp, max_rx, max_tx, max_cp, shared);
13355 if (rc)
13356 rc = 0;
13357 }
13358 return rc;
13359}
13360
58ea801a
MC
13361/* In initial default shared ring setting, each shared ring must have a
13362 * RX/TX ring pair.
13363 */
13364static void bnxt_trim_dflt_sh_rings(struct bnxt *bp)
13365{
13366 bp->cp_nr_rings = min_t(int, bp->tx_nr_rings_per_tc, bp->rx_nr_rings);
13367 bp->rx_nr_rings = bp->cp_nr_rings;
13368 bp->tx_nr_rings_per_tc = bp->cp_nr_rings;
13369 bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
13370}
13371
702c221c 13372static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh)
6e6c5a57
MC
13373{
13374 int dflt_rings, max_rx_rings, max_tx_rings, rc;
6e6c5a57 13375
2773dfb2
MC
13376 if (!bnxt_can_reserve_rings(bp))
13377 return 0;
13378
6e6c5a57
MC
13379 if (sh)
13380 bp->flags |= BNXT_FLAG_SHARED_RINGS;
d629522e 13381 dflt_rings = is_kdump_kernel() ? 1 : netif_get_num_default_rss_queues();
1d3ef13d
MC
13382 /* Reduce default rings on multi-port cards so that total default
13383 * rings do not exceed CPU count.
13384 */
13385 if (bp->port_count > 1) {
13386 int max_rings =
13387 max_t(int, num_online_cpus() / bp->port_count, 1);
13388
13389 dflt_rings = min_t(int, dflt_rings, max_rings);
13390 }
e4060d30 13391 rc = bnxt_get_dflt_rings(bp, &max_rx_rings, &max_tx_rings, sh);
6e6c5a57
MC
13392 if (rc)
13393 return rc;
13394 bp->rx_nr_rings = min_t(int, dflt_rings, max_rx_rings);
13395 bp->tx_nr_rings_per_tc = min_t(int, dflt_rings, max_tx_rings);
58ea801a
MC
13396 if (sh)
13397 bnxt_trim_dflt_sh_rings(bp);
13398 else
13399 bp->cp_nr_rings = bp->tx_nr_rings_per_tc + bp->rx_nr_rings;
13400 bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
391be5c2 13401
674f50a5 13402 rc = __bnxt_reserve_rings(bp);
662c9b22 13403 if (rc && rc != -ENODEV)
391be5c2 13404 netdev_warn(bp->dev, "Unable to reserve tx rings\n");
58ea801a
MC
13405 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
13406 if (sh)
13407 bnxt_trim_dflt_sh_rings(bp);
391be5c2 13408
674f50a5
MC
13409 /* Rings may have been trimmed, re-reserve the trimmed rings. */
13410 if (bnxt_need_reserve_rings(bp)) {
13411 rc = __bnxt_reserve_rings(bp);
662c9b22 13412 if (rc && rc != -ENODEV)
674f50a5
MC
13413 netdev_warn(bp->dev, "2nd rings reservation failed.\n");
13414 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
13415 }
76595193
PS
13416 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
13417 bp->rx_nr_rings++;
13418 bp->cp_nr_rings++;
13419 }
5d765a5e
VV
13420 if (rc) {
13421 bp->tx_nr_rings = 0;
13422 bp->rx_nr_rings = 0;
13423 }
6e6c5a57 13424 return rc;
c0c050c5
MC
13425}
13426
47558acd
MC
13427static int bnxt_init_dflt_ring_mode(struct bnxt *bp)
13428{
13429 int rc;
13430
13431 if (bp->tx_nr_rings)
13432 return 0;
13433
6b95c3e9
MC
13434 bnxt_ulp_irq_stop(bp);
13435 bnxt_clear_int_mode(bp);
47558acd
MC
13436 rc = bnxt_set_dflt_rings(bp, true);
13437 if (rc) {
662c9b22
EP
13438 if (BNXT_VF(bp) && rc == -ENODEV)
13439 netdev_err(bp->dev, "Cannot configure VF rings while PF is unavailable.\n");
13440 else
13441 netdev_err(bp->dev, "Not enough rings available.\n");
6b95c3e9 13442 goto init_dflt_ring_err;
47558acd
MC
13443 }
13444 rc = bnxt_init_int_mode(bp);
13445 if (rc)
6b95c3e9
MC
13446 goto init_dflt_ring_err;
13447
47558acd 13448 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
13ba7943
SK
13449
13450 bnxt_set_dflt_rfs(bp);
13451
6b95c3e9
MC
13452init_dflt_ring_err:
13453 bnxt_ulp_irq_restart(bp, rc);
13454 return rc;
47558acd
MC
13455}
13456
80fcaf46 13457int bnxt_restore_pf_fw_resources(struct bnxt *bp)
7b08f661 13458{
80fcaf46
MC
13459 int rc;
13460
7b08f661
MC
13461 ASSERT_RTNL();
13462 bnxt_hwrm_func_qcaps(bp);
1a037782
VD
13463
13464 if (netif_running(bp->dev))
13465 __bnxt_close_nic(bp, true, false);
13466
ec86f14e 13467 bnxt_ulp_irq_stop(bp);
80fcaf46
MC
13468 bnxt_clear_int_mode(bp);
13469 rc = bnxt_init_int_mode(bp);
ec86f14e 13470 bnxt_ulp_irq_restart(bp, rc);
1a037782
VD
13471
13472 if (netif_running(bp->dev)) {
13473 if (rc)
13474 dev_close(bp->dev);
13475 else
13476 rc = bnxt_open_nic(bp, true, false);
13477 }
13478
80fcaf46 13479 return rc;
7b08f661
MC
13480}
13481
a22a6ac2
MC
13482static int bnxt_init_mac_addr(struct bnxt *bp)
13483{
13484 int rc = 0;
13485
13486 if (BNXT_PF(bp)) {
a96d317f 13487 eth_hw_addr_set(bp->dev, bp->pf.mac_addr);
a22a6ac2
MC
13488 } else {
13489#ifdef CONFIG_BNXT_SRIOV
13490 struct bnxt_vf_info *vf = &bp->vf;
28ea334b 13491 bool strict_approval = true;
a22a6ac2
MC
13492
13493 if (is_valid_ether_addr(vf->mac_addr)) {
91cdda40 13494 /* overwrite netdev dev_addr with admin VF MAC */
a96d317f 13495 eth_hw_addr_set(bp->dev, vf->mac_addr);
28ea334b
MC
13496 /* Older PF driver or firmware may not approve this
13497 * correctly.
13498 */
13499 strict_approval = false;
a22a6ac2
MC
13500 } else {
13501 eth_hw_addr_random(bp->dev);
a22a6ac2 13502 }
28ea334b 13503 rc = bnxt_approve_mac(bp, bp->dev->dev_addr, strict_approval);
a22a6ac2
MC
13504#endif
13505 }
13506 return rc;
13507}
13508
a0d0fd70
VV
13509static void bnxt_vpd_read_info(struct bnxt *bp)
13510{
13511 struct pci_dev *pdev = bp->pdev;
0ff25f6a
HK
13512 unsigned int vpd_size, kw_len;
13513 int pos, size;
a0d0fd70
VV
13514 u8 *vpd_data;
13515
550cd7c1
HK
13516 vpd_data = pci_vpd_alloc(pdev, &vpd_size);
13517 if (IS_ERR(vpd_data)) {
13518 pci_warn(pdev, "Unable to read VPD\n");
a0d0fd70 13519 return;
4fd13157
DM
13520 }
13521
0ff25f6a
HK
13522 pos = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size,
13523 PCI_VPD_RO_KEYWORD_PARTNO, &kw_len);
a0d0fd70
VV
13524 if (pos < 0)
13525 goto read_sn;
13526
0ff25f6a 13527 size = min_t(int, kw_len, BNXT_VPD_FLD_LEN - 1);
492adcf4 13528 memcpy(bp->board_partno, &vpd_data[pos], size);
a0d0fd70
VV
13529
13530read_sn:
0ff25f6a
HK
13531 pos = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size,
13532 PCI_VPD_RO_KEYWORD_SERIALNO,
13533 &kw_len);
a0d0fd70
VV
13534 if (pos < 0)
13535 goto exit;
13536
0ff25f6a 13537 size = min_t(int, kw_len, BNXT_VPD_FLD_LEN - 1);
492adcf4 13538 memcpy(bp->board_serialno, &vpd_data[pos], size);
a0d0fd70
VV
13539exit:
13540 kfree(vpd_data);
13541}
13542
03213a99
JP
13543static int bnxt_pcie_dsn_get(struct bnxt *bp, u8 dsn[])
13544{
13545 struct pci_dev *pdev = bp->pdev;
8d85b75b 13546 u64 qword;
03213a99 13547
8d85b75b
JK
13548 qword = pci_get_dsn(pdev);
13549 if (!qword) {
13550 netdev_info(bp->dev, "Unable to read adapter's DSN\n");
03213a99
JP
13551 return -EOPNOTSUPP;
13552 }
13553
8d85b75b
JK
13554 put_unaligned_le64(qword, dsn);
13555
d061b241 13556 bp->flags |= BNXT_FLAG_DSN_VALID;
03213a99
JP
13557 return 0;
13558}
13559
8ae24738
MC
13560static int bnxt_map_db_bar(struct bnxt *bp)
13561{
13562 if (!bp->db_size)
13563 return -ENODEV;
13564 bp->bar1 = pci_iomap(bp->pdev, 2, bp->db_size);
13565 if (!bp->bar1)
13566 return -ENOMEM;
13567 return 0;
13568}
13569
c7dd4a5b
EP
13570void bnxt_print_device_info(struct bnxt *bp)
13571{
13572 netdev_info(bp->dev, "%s found at mem %lx, node addr %pM\n",
13573 board_info[bp->board_idx].name,
13574 (long)pci_resource_start(bp->pdev, 0), bp->dev->dev_addr);
13575
13576 pcie_print_link_status(bp->pdev);
13577}
13578
c0c050c5
MC
13579static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
13580{
c0c050c5
MC
13581 struct net_device *dev;
13582 struct bnxt *bp;
6e6c5a57 13583 int rc, max_irqs;
c0c050c5 13584
4e00338a 13585 if (pci_is_bridge(pdev))
fa853dda
PS
13586 return -ENODEV;
13587
8743db4a
VV
13588 /* Clear any pending DMA transactions from crash kernel
13589 * while loading driver in capture kernel.
13590 */
13591 if (is_kdump_kernel()) {
13592 pci_clear_master(pdev);
13593 pcie_flr(pdev);
13594 }
13595
c0c050c5
MC
13596 max_irqs = bnxt_get_max_irq(pdev);
13597 dev = alloc_etherdev_mq(sizeof(*bp), max_irqs);
13598 if (!dev)
13599 return -ENOMEM;
13600
13601 bp = netdev_priv(dev);
c7dd4a5b 13602 bp->board_idx = ent->driver_data;
8fb35cd3 13603 bp->msg_enable = BNXT_DEF_MSG_ENABLE;
9c1fabdf 13604 bnxt_set_max_func_irqs(bp, max_irqs);
c0c050c5 13605
c7dd4a5b 13606 if (bnxt_vf_pciid(bp->board_idx))
c0c050c5
MC
13607 bp->flags |= BNXT_FLAG_VF;
13608
0020ae2a
VG
13609 /* No devlink port registration in case of a VF */
13610 if (BNXT_PF(bp))
13611 SET_NETDEV_DEVLINK_PORT(dev, &bp->dl_port);
13612
2bcfa6f6 13613 if (pdev->msix_cap)
c0c050c5 13614 bp->flags |= BNXT_FLAG_MSIX_CAP;
c0c050c5
MC
13615
13616 rc = bnxt_init_board(pdev, dev);
13617 if (rc < 0)
13618 goto init_err_free;
13619
13620 dev->netdev_ops = &bnxt_netdev_ops;
13621 dev->watchdog_timeo = BNXT_TX_TIMEOUT;
13622 dev->ethtool_ops = &bnxt_ethtool_ops;
c0c050c5
MC
13623 pci_set_drvdata(pdev, dev);
13624
3e8060fa
PS
13625 rc = bnxt_alloc_hwrm_resources(bp);
13626 if (rc)
17086399 13627 goto init_err_pci_clean;
3e8060fa
PS
13628
13629 mutex_init(&bp->hwrm_cmd_lock);
ba642ab7 13630 mutex_init(&bp->link_lock);
7c380918
MC
13631
13632 rc = bnxt_fw_init_one_p1(bp);
3e8060fa 13633 if (rc)
17086399 13634 goto init_err_pci_clean;
3e8060fa 13635
3e3c09b0
VV
13636 if (BNXT_PF(bp))
13637 bnxt_vpd_read_info(bp);
13638
9d6b648c 13639 if (BNXT_CHIP_P5(bp)) {
e38287b7 13640 bp->flags |= BNXT_FLAG_CHIP_P5;
9d6b648c
MC
13641 if (BNXT_CHIP_SR2(bp))
13642 bp->flags |= BNXT_FLAG_CHIP_SR2;
13643 }
e38287b7 13644
5fa65524
EP
13645 rc = bnxt_alloc_rss_indir_tbl(bp);
13646 if (rc)
13647 goto init_err_pci_clean;
13648
7c380918 13649 rc = bnxt_fw_init_one_p2(bp);
3c2217a6
MC
13650 if (rc)
13651 goto init_err_pci_clean;
13652
8ae24738
MC
13653 rc = bnxt_map_db_bar(bp);
13654 if (rc) {
13655 dev_err(&pdev->dev, "Cannot map doorbell BAR rc = %d, aborting\n",
13656 rc);
13657 goto init_err_pci_clean;
13658 }
13659
c0c050c5
MC
13660 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
13661 NETIF_F_TSO | NETIF_F_TSO6 |
13662 NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
7e13318d 13663 NETIF_F_GSO_IPXIP4 |
152971ee
AD
13664 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
13665 NETIF_F_GSO_PARTIAL | NETIF_F_RXHASH |
3e8060fa
PS
13666 NETIF_F_RXCSUM | NETIF_F_GRO;
13667
e38287b7 13668 if (BNXT_SUPPORTS_TPA(bp))
3e8060fa 13669 dev->hw_features |= NETIF_F_LRO;
c0c050c5 13670
c0c050c5
MC
13671 dev->hw_enc_features =
13672 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
13673 NETIF_F_TSO | NETIF_F_TSO6 |
13674 NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
152971ee 13675 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
7e13318d 13676 NETIF_F_GSO_IPXIP4 | NETIF_F_GSO_PARTIAL;
442a35a5
JK
13677 dev->udp_tunnel_nic_info = &bnxt_udp_tunnels;
13678
152971ee
AD
13679 dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM |
13680 NETIF_F_GSO_GRE_CSUM;
c0c050c5 13681 dev->vlan_features = dev->hw_features | NETIF_F_HIGHDMA;
1da63ddd
EP
13682 if (bp->fw_cap & BNXT_FW_CAP_VLAN_RX_STRIP)
13683 dev->hw_features |= BNXT_HW_FEATURE_VLAN_ALL_RX;
13684 if (bp->fw_cap & BNXT_FW_CAP_VLAN_TX_INSERT)
13685 dev->hw_features |= BNXT_HW_FEATURE_VLAN_ALL_TX;
e38287b7 13686 if (BNXT_SUPPORTS_TPA(bp))
1054aee8 13687 dev->hw_features |= NETIF_F_GRO_HW;
c0c050c5 13688 dev->features |= dev->hw_features | NETIF_F_HIGHDMA;
1054aee8
MC
13689 if (dev->features & NETIF_F_GRO_HW)
13690 dev->features &= ~NETIF_F_LRO;
c0c050c5
MC
13691 dev->priv_flags |= IFF_UNICAST_FLT;
13692
b6488b16
CL
13693 netif_set_tso_max_size(dev, GSO_MAX_SIZE);
13694
66c0e13a
MM
13695 dev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
13696 NETDEV_XDP_ACT_RX_SG;
13697
c0c050c5
MC
13698#ifdef CONFIG_BNXT_SRIOV
13699 init_waitqueue_head(&bp->sriov_cfg_wait);
13700#endif
e38287b7
MC
13701 if (BNXT_SUPPORTS_TPA(bp)) {
13702 bp->gro_func = bnxt_gro_func_5730x;
67912c36 13703 if (BNXT_CHIP_P4(bp))
e38287b7 13704 bp->gro_func = bnxt_gro_func_5731x;
67912c36
MC
13705 else if (BNXT_CHIP_P5(bp))
13706 bp->gro_func = bnxt_gro_func_5750x;
e38287b7
MC
13707 }
13708 if (!BNXT_CHIP_P4_PLUS(bp))
434c975a 13709 bp->flags |= BNXT_FLAG_DOUBLE_DB;
309369c9 13710
a22a6ac2
MC
13711 rc = bnxt_init_mac_addr(bp);
13712 if (rc) {
13713 dev_err(&pdev->dev, "Unable to initialize mac address.\n");
13714 rc = -EADDRNOTAVAIL;
13715 goto init_err_pci_clean;
13716 }
c0c050c5 13717
2e9217d1
VV
13718 if (BNXT_PF(bp)) {
13719 /* Read the adapter's DSN to use as the eswitch switch_id */
b014232f 13720 rc = bnxt_pcie_dsn_get(bp, bp->dsn);
2e9217d1 13721 }
567b2abe 13722
7eb9bb3a
MC
13723 /* MTU range: 60 - FW defined max */
13724 dev->min_mtu = ETH_ZLEN;
13725 dev->max_mtu = bp->max_mtu;
13726
ba642ab7 13727 rc = bnxt_probe_phy(bp, true);
d5430d31
MC
13728 if (rc)
13729 goto init_err_pci_clean;
13730
c61fb99c 13731 bnxt_set_rx_skb_mode(bp, false);
c0c050c5
MC
13732 bnxt_set_tpa_flags(bp);
13733 bnxt_set_ring_params(bp);
702c221c 13734 rc = bnxt_set_dflt_rings(bp, true);
bdbd1eb5 13735 if (rc) {
662c9b22
EP
13736 if (BNXT_VF(bp) && rc == -ENODEV) {
13737 netdev_err(bp->dev, "Cannot configure VF rings while PF is unavailable.\n");
13738 } else {
13739 netdev_err(bp->dev, "Not enough rings available.\n");
13740 rc = -ENOMEM;
13741 }
17086399 13742 goto init_err_pci_clean;
bdbd1eb5 13743 }
c0c050c5 13744
ba642ab7 13745 bnxt_fw_init_one_p3(bp);
2bcfa6f6 13746
df78ea22
MC
13747 bnxt_init_dflt_coal(bp);
13748
a196e96b 13749 if (dev->hw_features & BNXT_HW_FEATURE_VLAN_ALL_RX)
c0c050c5
MC
13750 bp->flags |= BNXT_FLAG_STRIP_VLAN;
13751
7809592d 13752 rc = bnxt_init_int_mode(bp);
c0c050c5 13753 if (rc)
17086399 13754 goto init_err_pci_clean;
c0c050c5 13755
832aed16
MC
13756 /* No TC has been set yet and rings may have been trimmed due to
13757 * limited MSIX, so we re-initialize the TX rings per TC.
13758 */
13759 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
13760
c213eae8
MC
13761 if (BNXT_PF(bp)) {
13762 if (!bnxt_pf_wq) {
13763 bnxt_pf_wq =
13764 create_singlethread_workqueue("bnxt_pf_wq");
13765 if (!bnxt_pf_wq) {
13766 dev_err(&pdev->dev, "Unable to create workqueue.\n");
b5f796b6 13767 rc = -ENOMEM;
c213eae8
MC
13768 goto init_err_pci_clean;
13769 }
13770 }
18c7015c
JK
13771 rc = bnxt_init_tc(bp);
13772 if (rc)
13773 netdev_err(dev, "Failed to initialize TC flower offload, err = %d.\n",
13774 rc);
c213eae8 13775 }
2ae7408f 13776
190eda1a 13777 bnxt_inv_fw_health_reg(bp);
e624c70e
LR
13778 rc = bnxt_dl_register(bp);
13779 if (rc)
13780 goto init_err_dl;
cda2cab0 13781
7809592d
MC
13782 rc = register_netdev(dev);
13783 if (rc)
cda2cab0 13784 goto init_err_cleanup;
7809592d 13785
7e334fc8 13786 bnxt_dl_fw_reporters_create(bp);
4ab0c6a8 13787
d80d88b0
AK
13788 bnxt_rdma_aux_device_init(bp);
13789
c7dd4a5b 13790 bnxt_print_device_info(bp);
90c4f788 13791
df3875ec 13792 pci_save_state(pdev);
c0c050c5 13793
d80d88b0 13794 return 0;
cda2cab0
VV
13795init_err_cleanup:
13796 bnxt_dl_unregister(bp);
e624c70e 13797init_err_dl:
2ae7408f 13798 bnxt_shutdown_tc(bp);
7809592d
MC
13799 bnxt_clear_int_mode(bp);
13800
17086399 13801init_err_pci_clean:
bdb38602 13802 bnxt_hwrm_func_drv_unrgtr(bp);
a2bf74f4 13803 bnxt_free_hwrm_resources(bp);
03400aaa 13804 bnxt_ethtool_free(bp);
a521c8a0 13805 bnxt_ptp_clear(bp);
ae5c42f0
MC
13806 kfree(bp->ptp_cfg);
13807 bp->ptp_cfg = NULL;
07f83d72
MC
13808 kfree(bp->fw_health);
13809 bp->fw_health = NULL;
17086399 13810 bnxt_cleanup_pci(bp);
62bfb932
MC
13811 bnxt_free_ctx_mem(bp);
13812 kfree(bp->ctx);
13813 bp->ctx = NULL;
1667cbf6
MC
13814 kfree(bp->rss_indir_tbl);
13815 bp->rss_indir_tbl = NULL;
c0c050c5
MC
13816
13817init_err_free:
13818 free_netdev(dev);
13819 return rc;
13820}
13821
d196ece7
MC
13822static void bnxt_shutdown(struct pci_dev *pdev)
13823{
13824 struct net_device *dev = pci_get_drvdata(pdev);
13825 struct bnxt *bp;
13826
13827 if (!dev)
13828 return;
13829
13830 rtnl_lock();
13831 bp = netdev_priv(dev);
13832 if (!bp)
13833 goto shutdown_exit;
13834
13835 if (netif_running(dev))
13836 dev_close(dev);
13837
5567ae4a
VV
13838 bnxt_clear_int_mode(bp);
13839 pci_disable_device(pdev);
a7f3f939 13840
d196ece7 13841 if (system_state == SYSTEM_POWER_OFF) {
d196ece7
MC
13842 pci_wake_from_d3(pdev, bp->wol);
13843 pci_set_power_state(pdev, PCI_D3hot);
13844 }
13845
13846shutdown_exit:
13847 rtnl_unlock();
13848}
13849
f65a2044
MC
13850#ifdef CONFIG_PM_SLEEP
13851static int bnxt_suspend(struct device *device)
13852{
f521eaa9 13853 struct net_device *dev = dev_get_drvdata(device);
f65a2044
MC
13854 struct bnxt *bp = netdev_priv(dev);
13855 int rc = 0;
13856
13857 rtnl_lock();
6a68749d 13858 bnxt_ulp_stop(bp);
f65a2044
MC
13859 if (netif_running(dev)) {
13860 netif_device_detach(dev);
13861 rc = bnxt_close(dev);
13862 }
13863 bnxt_hwrm_func_drv_unrgtr(bp);
ef02af8c 13864 pci_disable_device(bp->pdev);
f9b69d7f
VV
13865 bnxt_free_ctx_mem(bp);
13866 kfree(bp->ctx);
13867 bp->ctx = NULL;
f65a2044
MC
13868 rtnl_unlock();
13869 return rc;
13870}
13871
13872static int bnxt_resume(struct device *device)
13873{
f521eaa9 13874 struct net_device *dev = dev_get_drvdata(device);
f65a2044
MC
13875 struct bnxt *bp = netdev_priv(dev);
13876 int rc = 0;
13877
13878 rtnl_lock();
ef02af8c
MC
13879 rc = pci_enable_device(bp->pdev);
13880 if (rc) {
13881 netdev_err(dev, "Cannot re-enable PCI device during resume, err = %d\n",
13882 rc);
13883 goto resume_exit;
13884 }
13885 pci_set_master(bp->pdev);
f92335d8 13886 if (bnxt_hwrm_ver_get(bp)) {
f65a2044
MC
13887 rc = -ENODEV;
13888 goto resume_exit;
13889 }
13890 rc = bnxt_hwrm_func_reset(bp);
13891 if (rc) {
13892 rc = -EBUSY;
13893 goto resume_exit;
13894 }
f92335d8 13895
2084ccf6
MC
13896 rc = bnxt_hwrm_func_qcaps(bp);
13897 if (rc)
f9b69d7f 13898 goto resume_exit;
f92335d8
VV
13899
13900 if (bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false)) {
13901 rc = -ENODEV;
13902 goto resume_exit;
13903 }
13904
f65a2044
MC
13905 bnxt_get_wol_settings(bp);
13906 if (netif_running(dev)) {
13907 rc = bnxt_open(dev);
13908 if (!rc)
13909 netif_device_attach(dev);
13910 }
13911
13912resume_exit:
6a68749d 13913 bnxt_ulp_start(bp, rc);
59ae2101
MC
13914 if (!rc)
13915 bnxt_reenable_sriov(bp);
f65a2044
MC
13916 rtnl_unlock();
13917 return rc;
13918}
13919
13920static SIMPLE_DEV_PM_OPS(bnxt_pm_ops, bnxt_suspend, bnxt_resume);
13921#define BNXT_PM_OPS (&bnxt_pm_ops)
13922
13923#else
13924
13925#define BNXT_PM_OPS NULL
13926
13927#endif /* CONFIG_PM_SLEEP */
13928
6316ea6d
SB
13929/**
13930 * bnxt_io_error_detected - called when PCI error is detected
13931 * @pdev: Pointer to PCI device
13932 * @state: The current pci connection state
13933 *
13934 * This function is called after a PCI bus error affecting
13935 * this device has been detected.
13936 */
13937static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev,
13938 pci_channel_state_t state)
13939{
13940 struct net_device *netdev = pci_get_drvdata(pdev);
a588e458 13941 struct bnxt *bp = netdev_priv(netdev);
6316ea6d
SB
13942
13943 netdev_info(netdev, "PCI I/O error detected\n");
13944
13945 rtnl_lock();
13946 netif_device_detach(netdev);
13947
a588e458
MC
13948 bnxt_ulp_stop(bp);
13949
6316ea6d
SB
13950 if (state == pci_channel_io_perm_failure) {
13951 rtnl_unlock();
13952 return PCI_ERS_RESULT_DISCONNECT;
13953 }
13954
f75d9a0a
VV
13955 if (state == pci_channel_io_frozen)
13956 set_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN, &bp->state);
13957
6316ea6d
SB
13958 if (netif_running(netdev))
13959 bnxt_close(netdev);
13960
c81cfb62
KA
13961 if (pci_is_enabled(pdev))
13962 pci_disable_device(pdev);
6e2f8388
MC
13963 bnxt_free_ctx_mem(bp);
13964 kfree(bp->ctx);
13965 bp->ctx = NULL;
6316ea6d
SB
13966 rtnl_unlock();
13967
13968 /* Request a slot slot reset. */
13969 return PCI_ERS_RESULT_NEED_RESET;
13970}
13971
13972/**
13973 * bnxt_io_slot_reset - called after the pci bus has been reset.
13974 * @pdev: Pointer to PCI device
13975 *
13976 * Restart the card from scratch, as if from a cold-boot.
13977 * At this point, the card has exprienced a hard reset,
13978 * followed by fixups by BIOS, and has its config space
13979 * set up identically to what it was at cold boot.
13980 */
13981static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
13982{
fb1e6e56 13983 pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT;
6316ea6d
SB
13984 struct net_device *netdev = pci_get_drvdata(pdev);
13985 struct bnxt *bp = netdev_priv(netdev);
0cf736a1
VG
13986 int retry = 0;
13987 int err = 0;
13988 int off;
6316ea6d
SB
13989
13990 netdev_info(bp->dev, "PCI Slot Reset\n");
13991
13992 rtnl_lock();
13993
13994 if (pci_enable_device(pdev)) {
13995 dev_err(&pdev->dev,
13996 "Cannot re-enable PCI device after reset.\n");
13997 } else {
13998 pci_set_master(pdev);
f75d9a0a
VV
13999 /* Upon fatal error, our device internal logic that latches to
14000 * BAR value is getting reset and will restore only upon
14001 * rewritting the BARs.
14002 *
14003 * As pci_restore_state() does not re-write the BARs if the
14004 * value is same as saved value earlier, driver needs to
14005 * write the BARs to 0 to force restore, in case of fatal error.
14006 */
14007 if (test_and_clear_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN,
14008 &bp->state)) {
14009 for (off = PCI_BASE_ADDRESS_0;
14010 off <= PCI_BASE_ADDRESS_5; off += 4)
14011 pci_write_config_dword(bp->pdev, off, 0);
14012 }
df3875ec
VV
14013 pci_restore_state(pdev);
14014 pci_save_state(pdev);
6316ea6d 14015
0cf736a1
VG
14016 bnxt_inv_fw_health_reg(bp);
14017 bnxt_try_map_fw_health_reg(bp);
14018
14019 /* In some PCIe AER scenarios, firmware may take up to
14020 * 10 seconds to become ready in the worst case.
14021 */
14022 do {
14023 err = bnxt_try_recover_fw(bp);
14024 if (!err)
14025 break;
14026 retry++;
14027 } while (retry < BNXT_FW_SLOT_RESET_RETRY);
14028
14029 if (err) {
14030 dev_err(&pdev->dev, "Firmware not ready\n");
14031 goto reset_exit;
14032 }
14033
aa8ed021 14034 err = bnxt_hwrm_func_reset(bp);
fb1e6e56 14035 if (!err)
6e2f8388 14036 result = PCI_ERS_RESULT_RECOVERED;
0cf736a1
VG
14037
14038 bnxt_ulp_irq_stop(bp);
14039 bnxt_clear_int_mode(bp);
14040 err = bnxt_init_int_mode(bp);
14041 bnxt_ulp_irq_restart(bp, err);
bae361c5 14042 }
6316ea6d 14043
0cf736a1
VG
14044reset_exit:
14045 bnxt_clear_reservations(bp, true);
6316ea6d
SB
14046 rtnl_unlock();
14047
bae361c5 14048 return result;
6316ea6d
SB
14049}
14050
14051/**
14052 * bnxt_io_resume - called when traffic can start flowing again.
14053 * @pdev: Pointer to PCI device
14054 *
14055 * This callback is called when the error recovery driver tells
14056 * us that its OK to resume normal operation.
14057 */
14058static void bnxt_io_resume(struct pci_dev *pdev)
14059{
14060 struct net_device *netdev = pci_get_drvdata(pdev);
fb1e6e56
VV
14061 struct bnxt *bp = netdev_priv(netdev);
14062 int err;
6316ea6d 14063
fb1e6e56 14064 netdev_info(bp->dev, "PCI Slot Resume\n");
6316ea6d
SB
14065 rtnl_lock();
14066
fb1e6e56
VV
14067 err = bnxt_hwrm_func_qcaps(bp);
14068 if (!err && netif_running(netdev))
14069 err = bnxt_open(netdev);
14070
14071 bnxt_ulp_start(bp, err);
14072 if (!err) {
14073 bnxt_reenable_sriov(bp);
14074 netif_device_attach(netdev);
14075 }
6316ea6d
SB
14076
14077 rtnl_unlock();
14078}
14079
14080static const struct pci_error_handlers bnxt_err_handler = {
14081 .error_detected = bnxt_io_error_detected,
14082 .slot_reset = bnxt_io_slot_reset,
14083 .resume = bnxt_io_resume
14084};
14085
c0c050c5
MC
14086static struct pci_driver bnxt_pci_driver = {
14087 .name = DRV_MODULE_NAME,
14088 .id_table = bnxt_pci_tbl,
14089 .probe = bnxt_init_one,
14090 .remove = bnxt_remove_one,
d196ece7 14091 .shutdown = bnxt_shutdown,
f65a2044 14092 .driver.pm = BNXT_PM_OPS,
6316ea6d 14093 .err_handler = &bnxt_err_handler,
c0c050c5
MC
14094#if defined(CONFIG_BNXT_SRIOV)
14095 .sriov_configure = bnxt_sriov_configure,
14096#endif
14097};
14098
c213eae8
MC
14099static int __init bnxt_init(void)
14100{
991aef4e
GC
14101 int err;
14102
cabfb09d 14103 bnxt_debug_init();
991aef4e
GC
14104 err = pci_register_driver(&bnxt_pci_driver);
14105 if (err) {
14106 bnxt_debug_exit();
14107 return err;
14108 }
14109
14110 return 0;
c213eae8
MC
14111}
14112
14113static void __exit bnxt_exit(void)
14114{
14115 pci_unregister_driver(&bnxt_pci_driver);
14116 if (bnxt_pf_wq)
14117 destroy_workqueue(bnxt_pf_wq);
cabfb09d 14118 bnxt_debug_exit();
c213eae8
MC
14119}
14120
14121module_init(bnxt_init);
14122module_exit(bnxt_exit);