]> git.ipfire.org Git - thirdparty/linux.git/blame - drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
cxgb4: clean up init_one
[thirdparty/linux.git] / drivers / net / ethernet / chelsio / cxgb4 / cxgb4_main.c
CommitLineData
b8ff05a9
DM
1/*
2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
3 *
b72a32da 4 * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved.
b8ff05a9
DM
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
36
37#include <linux/bitmap.h>
38#include <linux/crc32.h>
39#include <linux/ctype.h>
40#include <linux/debugfs.h>
41#include <linux/err.h>
42#include <linux/etherdevice.h>
43#include <linux/firmware.h>
01789349 44#include <linux/if.h>
b8ff05a9
DM
45#include <linux/if_vlan.h>
46#include <linux/init.h>
47#include <linux/log2.h>
48#include <linux/mdio.h>
49#include <linux/module.h>
50#include <linux/moduleparam.h>
51#include <linux/mutex.h>
52#include <linux/netdevice.h>
53#include <linux/pci.h>
54#include <linux/aer.h>
55#include <linux/rtnetlink.h>
56#include <linux/sched.h>
57#include <linux/seq_file.h>
58#include <linux/sockios.h>
59#include <linux/vmalloc.h>
60#include <linux/workqueue.h>
61#include <net/neighbour.h>
62#include <net/netevent.h>
01bcca68 63#include <net/addrconf.h>
1ef8019b 64#include <net/bonding.h>
b5a02f50 65#include <net/addrconf.h>
7c0f6ba6 66#include <linux/uaccess.h>
c5a8c0f3 67#include <linux/crash_dump.h>
846eac3f 68#include <net/udp_tunnel.h>
b8ff05a9
DM
69
70#include "cxgb4.h"
d57fd6ca 71#include "cxgb4_filter.h"
b8ff05a9 72#include "t4_regs.h"
f612b815 73#include "t4_values.h"
b8ff05a9
DM
74#include "t4_msg.h"
75#include "t4fw_api.h"
cd6c2f12 76#include "t4fw_version.h"
688848b1 77#include "cxgb4_dcb.h"
c68644ef 78#include "srq.h"
fd88b31a 79#include "cxgb4_debugfs.h"
b5a02f50 80#include "clip_tbl.h"
b8ff05a9 81#include "l2t.h"
3bdb376e 82#include "smt.h"
b72a32da 83#include "sched.h"
d8931847 84#include "cxgb4_tc_u32.h"
6a345b3d 85#include "cxgb4_tc_flower.h"
a4569504 86#include "cxgb4_ptp.h"
ad75b7d3 87#include "cxgb4_cudbg.h"
b8ff05a9 88
812034f1
HS
89char cxgb4_driver_name[] = KBUILD_MODNAME;
90
01bcca68
VP
91#ifdef DRV_VERSION
92#undef DRV_VERSION
93#endif
3a7f8554 94#define DRV_VERSION "2.0.0-ko"
812034f1 95const char cxgb4_driver_version[] = DRV_VERSION;
52a5f846 96#define DRV_DESC "Chelsio T4/T5/T6 Network Driver"
b8ff05a9 97
b8ff05a9
DM
98#define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
99 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
100 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
101
3fedeab1
HS
102/* Macros needed to support the PCI Device ID Table ...
103 */
104#define CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN \
768ffc66 105 static const struct pci_device_id cxgb4_pci_tbl[] = {
baf50868
GG
106#define CXGB4_UNIFIED_PF 0x4
107
108#define CH_PCI_DEVICE_ID_FUNCTION CXGB4_UNIFIED_PF
b8ff05a9 109
3fedeab1
HS
110/* Include PCI Device IDs for both PF4 and PF0-3 so our PCI probe() routine is
111 * called for both.
112 */
113#define CH_PCI_DEVICE_ID_FUNCTION2 0x0
114
115#define CH_PCI_ID_TABLE_ENTRY(devid) \
baf50868 116 {PCI_VDEVICE(CHELSIO, (devid)), CXGB4_UNIFIED_PF}
3fedeab1
HS
117
118#define CH_PCI_DEVICE_ID_TABLE_DEFINE_END \
119 { 0, } \
120 }
121
122#include "t4_pci_id_tbl.h"
b8ff05a9 123
16e47624 124#define FW4_FNAME "cxgb4/t4fw.bin"
0a57a536 125#define FW5_FNAME "cxgb4/t5fw.bin"
3ccc6cf7 126#define FW6_FNAME "cxgb4/t6fw.bin"
16e47624 127#define FW4_CFNAME "cxgb4/t4-config.txt"
0a57a536 128#define FW5_CFNAME "cxgb4/t5-config.txt"
3ccc6cf7 129#define FW6_CFNAME "cxgb4/t6-config.txt"
01b69614
HS
130#define PHY_AQ1202_FIRMWARE "cxgb4/aq1202_fw.cld"
131#define PHY_BCM84834_FIRMWARE "cxgb4/bcm8483.bin"
132#define PHY_AQ1202_DEVICEID 0x4409
133#define PHY_BCM84834_DEVICEID 0x4486
b8ff05a9
DM
134
135MODULE_DESCRIPTION(DRV_DESC);
136MODULE_AUTHOR("Chelsio Communications");
137MODULE_LICENSE("Dual BSD/GPL");
138MODULE_VERSION(DRV_VERSION);
139MODULE_DEVICE_TABLE(pci, cxgb4_pci_tbl);
16e47624 140MODULE_FIRMWARE(FW4_FNAME);
0a57a536 141MODULE_FIRMWARE(FW5_FNAME);
52a5f846 142MODULE_FIRMWARE(FW6_FNAME);
b8ff05a9 143
b8ff05a9
DM
144/*
145 * The driver uses the best interrupt scheme available on a platform in the
146 * order MSI-X, MSI, legacy INTx interrupts. This parameter determines which
147 * of these schemes the driver may consider as follows:
148 *
149 * msi = 2: choose from among all three options
150 * msi = 1: only consider MSI and INTx interrupts
151 * msi = 0: force INTx interrupts
152 */
153static int msi = 2;
154
155module_param(msi, int, 0644);
156MODULE_PARM_DESC(msi, "whether to use INTx (0), MSI (1) or MSI-X (2)");
157
636f9d37
VP
158/*
159 * Normally we tell the chip to deliver Ingress Packets into our DMA buffers
160 * offset by 2 bytes in order to have the IP headers line up on 4-byte
161 * boundaries. This is a requirement for many architectures which will throw
162 * a machine check fault if an attempt is made to access one of the 4-byte IP
163 * header fields on a non-4-byte boundary. And it's a major performance issue
164 * even on some architectures which allow it like some implementations of the
165 * x86 ISA. However, some architectures don't mind this and for some very
166 * edge-case performance sensitive applications (like forwarding large volumes
167 * of small packets), setting this DMA offset to 0 will decrease the number of
168 * PCI-E Bus transfers enough to measurably affect performance.
169 */
170static int rx_dma_offset = 2;
171
688848b1
AB
172/* TX Queue select used to determine what algorithm to use for selecting TX
173 * queue. Select between the kernel provided function (select_queue=0) or user
174 * cxgb_select_queue function (select_queue=1)
175 *
176 * Default: select_queue=0
177 */
178static int select_queue;
179module_param(select_queue, int, 0644);
180MODULE_PARM_DESC(select_queue,
181 "Select between kernel provided method of selecting or driver method of selecting TX queue. Default is kernel method.");
182
b8ff05a9
DM
183static struct dentry *cxgb4_debugfs_root;
184
94cdb8bb
HS
185LIST_HEAD(adapter_list);
186DEFINE_MUTEX(uld_mutex);
b8ff05a9
DM
187
188static void link_report(struct net_device *dev)
189{
190 if (!netif_carrier_ok(dev))
191 netdev_info(dev, "link down\n");
192 else {
193 static const char *fc[] = { "no", "Rx", "Tx", "Tx/Rx" };
194
85412255 195 const char *s;
b8ff05a9
DM
196 const struct port_info *p = netdev_priv(dev);
197
198 switch (p->link_cfg.speed) {
5e78f7fd
GG
199 case 100:
200 s = "100Mbps";
b8ff05a9 201 break;
e8b39015 202 case 1000:
5e78f7fd 203 s = "1Gbps";
b8ff05a9 204 break;
5e78f7fd
GG
205 case 10000:
206 s = "10Gbps";
207 break;
208 case 25000:
209 s = "25Gbps";
b8ff05a9 210 break;
e8b39015 211 case 40000:
72aca4bf
KS
212 s = "40Gbps";
213 break;
7cbe543c
GG
214 case 50000:
215 s = "50Gbps";
216 break;
5e78f7fd
GG
217 case 100000:
218 s = "100Gbps";
219 break;
85412255
HS
220 default:
221 pr_info("%s: unsupported speed: %d\n",
222 dev->name, p->link_cfg.speed);
223 return;
b8ff05a9
DM
224 }
225
226 netdev_info(dev, "link up, %s, full-duplex, %s PAUSE\n", s,
227 fc[p->link_cfg.fc]);
228 }
229}
230
688848b1
AB
231#ifdef CONFIG_CHELSIO_T4_DCB
232/* Set up/tear down Data Center Bridging Priority mapping for a net device. */
233static void dcb_tx_queue_prio_enable(struct net_device *dev, int enable)
234{
235 struct port_info *pi = netdev_priv(dev);
236 struct adapter *adap = pi->adapter;
237 struct sge_eth_txq *txq = &adap->sge.ethtxq[pi->first_qset];
238 int i;
239
240 /* We use a simple mapping of Port TX Queue Index to DCB
241 * Priority when we're enabling DCB.
242 */
243 for (i = 0; i < pi->nqsets; i++, txq++) {
244 u32 name, value;
245 int err;
246
5167865a
HS
247 name = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
248 FW_PARAMS_PARAM_X_V(
249 FW_PARAMS_PARAM_DMAQ_EQ_DCBPRIO_ETH) |
250 FW_PARAMS_PARAM_YZ_V(txq->q.cntxt_id));
688848b1
AB
251 value = enable ? i : 0xffffffff;
252
253 /* Since we can be called while atomic (from "interrupt
254 * level") we need to issue the Set Parameters Commannd
255 * without sleeping (timeout < 0).
256 */
b2612722 257 err = t4_set_params_timeout(adap, adap->mbox, adap->pf, 0, 1,
01b69614
HS
258 &name, &value,
259 -FW_CMD_MAX_TIMEOUT);
688848b1
AB
260
261 if (err)
262 dev_err(adap->pdev_dev,
263 "Can't %s DCB Priority on port %d, TX Queue %d: err=%d\n",
264 enable ? "set" : "unset", pi->port_id, i, -err);
10b00466
AB
265 else
266 txq->dcb_prio = value;
688848b1
AB
267 }
268}
688848b1 269
50935857 270static int cxgb4_dcb_enabled(const struct net_device *dev)
218d48e7 271{
218d48e7
HS
272 struct port_info *pi = netdev_priv(dev);
273
274 if (!pi->dcb.enabled)
275 return 0;
276
277 return ((pi->dcb.state == CXGB4_DCB_STATE_FW_ALLSYNCED) ||
278 (pi->dcb.state == CXGB4_DCB_STATE_HOST));
218d48e7 279}
7c70c4f8 280#endif /* CONFIG_CHELSIO_T4_DCB */
218d48e7 281
b8ff05a9
DM
282void t4_os_link_changed(struct adapter *adapter, int port_id, int link_stat)
283{
284 struct net_device *dev = adapter->port[port_id];
285
286 /* Skip changes from disabled ports. */
287 if (netif_running(dev) && link_stat != netif_carrier_ok(dev)) {
288 if (link_stat)
289 netif_carrier_on(dev);
688848b1
AB
290 else {
291#ifdef CONFIG_CHELSIO_T4_DCB
218d48e7 292 if (cxgb4_dcb_enabled(dev)) {
ba581f77 293 cxgb4_dcb_reset(dev);
218d48e7
HS
294 dcb_tx_queue_prio_enable(dev, false);
295 }
688848b1 296#endif /* CONFIG_CHELSIO_T4_DCB */
b8ff05a9 297 netif_carrier_off(dev);
688848b1 298 }
b8ff05a9
DM
299
300 link_report(dev);
301 }
302}
303
8156b0ba 304void t4_os_portmod_changed(struct adapter *adap, int port_id)
b8ff05a9
DM
305{
306 static const char *mod_str[] = {
a0881cab 307 NULL, "LR", "SR", "ER", "passive DA", "active DA", "LRM"
b8ff05a9
DM
308 };
309
8156b0ba
GG
310 struct net_device *dev = adap->port[port_id];
311 struct port_info *pi = netdev_priv(dev);
b8ff05a9
DM
312
313 if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
314 netdev_info(dev, "port module unplugged\n");
a0881cab 315 else if (pi->mod_type < ARRAY_SIZE(mod_str))
b8ff05a9 316 netdev_info(dev, "%s module inserted\n", mod_str[pi->mod_type]);
be81a2de
HS
317 else if (pi->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED)
318 netdev_info(dev, "%s: unsupported port module inserted\n",
319 dev->name);
320 else if (pi->mod_type == FW_PORT_MOD_TYPE_UNKNOWN)
321 netdev_info(dev, "%s: unknown port module inserted\n",
322 dev->name);
323 else if (pi->mod_type == FW_PORT_MOD_TYPE_ERROR)
324 netdev_info(dev, "%s: transceiver module error\n", dev->name);
325 else
326 netdev_info(dev, "%s: unknown module type %d inserted\n",
327 dev->name, pi->mod_type);
8156b0ba
GG
328
329 /* If the interface is running, then we'll need any "sticky" Link
330 * Parameters redone with a new Transceiver Module.
331 */
332 pi->link_cfg.redo_l1cfg = netif_running(dev);
b8ff05a9
DM
333}
334
fc08a01a
HS
335int dbfifo_int_thresh = 10; /* 10 == 640 entry threshold */
336module_param(dbfifo_int_thresh, int, 0644);
337MODULE_PARM_DESC(dbfifo_int_thresh, "doorbell fifo interrupt threshold");
338
b8ff05a9 339/*
fc08a01a 340 * usecs to sleep while draining the dbfifo
b8ff05a9 341 */
fc08a01a
HS
342static int dbfifo_drain_delay = 1000;
343module_param(dbfifo_drain_delay, int, 0644);
344MODULE_PARM_DESC(dbfifo_drain_delay,
345 "usecs to sleep while draining the dbfifo");
346
347static inline int cxgb4_set_addr_hash(struct port_info *pi)
b8ff05a9 348{
fc08a01a
HS
349 struct adapter *adap = pi->adapter;
350 u64 vec = 0;
351 bool ucast = false;
352 struct hash_mac_addr *entry;
353
354 /* Calculate the hash vector for the updated list and program it */
355 list_for_each_entry(entry, &adap->mac_hlist, list) {
356 ucast |= is_unicast_ether_addr(entry->addr);
357 vec |= (1ULL << hash_mac_addr(entry->addr));
358 }
359 return t4_set_addr_hash(adap, adap->mbox, pi->viid, ucast,
360 vec, false);
361}
362
363static int cxgb4_mac_sync(struct net_device *netdev, const u8 *mac_addr)
364{
365 struct port_info *pi = netdev_priv(netdev);
366 struct adapter *adap = pi->adapter;
367 int ret;
b8ff05a9
DM
368 u64 mhash = 0;
369 u64 uhash = 0;
fc08a01a
HS
370 bool free = false;
371 bool ucast = is_unicast_ether_addr(mac_addr);
372 const u8 *maclist[1] = {mac_addr};
373 struct hash_mac_addr *new_entry;
374
375 ret = t4_alloc_mac_filt(adap, adap->mbox, pi->viid, free, 1, maclist,
376 NULL, ucast ? &uhash : &mhash, false);
377 if (ret < 0)
378 goto out;
379 /* if hash != 0, then add the addr to hash addr list
380 * so on the end we will calculate the hash for the
381 * list and program it
382 */
383 if (uhash || mhash) {
384 new_entry = kzalloc(sizeof(*new_entry), GFP_ATOMIC);
385 if (!new_entry)
386 return -ENOMEM;
387 ether_addr_copy(new_entry->addr, mac_addr);
388 list_add_tail(&new_entry->list, &adap->mac_hlist);
389 ret = cxgb4_set_addr_hash(pi);
b8ff05a9 390 }
fc08a01a
HS
391out:
392 return ret < 0 ? ret : 0;
393}
b8ff05a9 394
fc08a01a
HS
395static int cxgb4_mac_unsync(struct net_device *netdev, const u8 *mac_addr)
396{
397 struct port_info *pi = netdev_priv(netdev);
398 struct adapter *adap = pi->adapter;
399 int ret;
400 const u8 *maclist[1] = {mac_addr};
401 struct hash_mac_addr *entry, *tmp;
b8ff05a9 402
fc08a01a
HS
403 /* If the MAC address to be removed is in the hash addr
404 * list, delete it from the list and update hash vector
405 */
406 list_for_each_entry_safe(entry, tmp, &adap->mac_hlist, list) {
407 if (ether_addr_equal(entry->addr, mac_addr)) {
408 list_del(&entry->list);
409 kfree(entry);
410 return cxgb4_set_addr_hash(pi);
b8ff05a9
DM
411 }
412 }
413
fc08a01a
HS
414 ret = t4_free_mac_filt(adap, adap->mbox, pi->viid, 1, maclist, false);
415 return ret < 0 ? -EINVAL : 0;
b8ff05a9
DM
416}
417
418/*
419 * Set Rx properties of a port, such as promiscruity, address filters, and MTU.
420 * If @mtu is -1 it is left unchanged.
421 */
422static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok)
423{
b8ff05a9 424 struct port_info *pi = netdev_priv(dev);
fc08a01a 425 struct adapter *adapter = pi->adapter;
b8ff05a9 426
d01f7abc
HS
427 __dev_uc_sync(dev, cxgb4_mac_sync, cxgb4_mac_unsync);
428 __dev_mc_sync(dev, cxgb4_mac_sync, cxgb4_mac_unsync);
fc08a01a
HS
429
430 return t4_set_rxmode(adapter, adapter->mbox, pi->viid, mtu,
431 (dev->flags & IFF_PROMISC) ? 1 : 0,
432 (dev->flags & IFF_ALLMULTI) ? 1 : 0, 1, -1,
433 sleep_ok);
b8ff05a9
DM
434}
435
436/**
437 * link_start - enable a port
438 * @dev: the port to enable
439 *
440 * Performs the MAC and PHY actions needed to enable a port.
441 */
442static int link_start(struct net_device *dev)
443{
444 int ret;
445 struct port_info *pi = netdev_priv(dev);
b2612722 446 unsigned int mb = pi->adapter->pf;
b8ff05a9
DM
447
448 /*
449 * We do not set address filters and promiscuity here, the stack does
450 * that step explicitly.
451 */
060e0c75 452 ret = t4_set_rxmode(pi->adapter, mb, pi->viid, dev->mtu, -1, -1, -1,
f646968f 453 !!(dev->features & NETIF_F_HW_VLAN_CTAG_RX), true);
b8ff05a9 454 if (ret == 0) {
060e0c75 455 ret = t4_change_mac(pi->adapter, mb, pi->viid,
b8ff05a9 456 pi->xact_addr_filt, dev->dev_addr, true,
b6bd29e7 457 true);
b8ff05a9
DM
458 if (ret >= 0) {
459 pi->xact_addr_filt = ret;
460 ret = 0;
461 }
462 }
463 if (ret == 0)
4036da90 464 ret = t4_link_l1cfg(pi->adapter, mb, pi->tx_chan,
060e0c75 465 &pi->link_cfg);
30f00847
AB
466 if (ret == 0) {
467 local_bh_disable();
688848b1
AB
468 ret = t4_enable_vi_params(pi->adapter, mb, pi->viid, true,
469 true, CXGB4_DCB_ENABLED);
30f00847
AB
470 local_bh_enable();
471 }
688848b1 472
b8ff05a9
DM
473 return ret;
474}
475
688848b1
AB
476#ifdef CONFIG_CHELSIO_T4_DCB
477/* Handle a Data Center Bridging update message from the firmware. */
478static void dcb_rpl(struct adapter *adap, const struct fw_port_cmd *pcmd)
479{
2b5fb1f2 480 int port = FW_PORT_CMD_PORTID_G(ntohl(pcmd->op_to_portid));
134491fd 481 struct net_device *dev = adap->port[adap->chan_map[port]];
688848b1
AB
482 int old_dcb_enabled = cxgb4_dcb_enabled(dev);
483 int new_dcb_enabled;
484
485 cxgb4_dcb_handle_fw_update(adap, pcmd);
486 new_dcb_enabled = cxgb4_dcb_enabled(dev);
487
488 /* If the DCB has become enabled or disabled on the port then we're
489 * going to need to set up/tear down DCB Priority parameters for the
490 * TX Queues associated with the port.
491 */
492 if (new_dcb_enabled != old_dcb_enabled)
493 dcb_tx_queue_prio_enable(dev, new_dcb_enabled);
494}
495#endif /* CONFIG_CHELSIO_T4_DCB */
496
f2b7e78d 497/* Response queue handler for the FW event queue.
b8ff05a9
DM
498 */
499static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,
500 const struct pkt_gl *gl)
501{
502 u8 opcode = ((const struct rss_header *)rsp)->opcode;
503
504 rsp++; /* skip RSS header */
b407a4a9
VP
505
506 /* FW can send EGR_UPDATEs encapsulated in a CPL_FW4_MSG.
507 */
508 if (unlikely(opcode == CPL_FW4_MSG &&
509 ((const struct cpl_fw4_msg *)rsp)->type == FW_TYPE_RSSCPL)) {
510 rsp++;
511 opcode = ((const struct rss_header *)rsp)->opcode;
512 rsp++;
513 if (opcode != CPL_SGE_EGR_UPDATE) {
514 dev_err(q->adap->pdev_dev, "unexpected FW4/CPL %#x on FW event queue\n"
515 , opcode);
516 goto out;
517 }
518 }
519
b8ff05a9
DM
520 if (likely(opcode == CPL_SGE_EGR_UPDATE)) {
521 const struct cpl_sge_egr_update *p = (void *)rsp;
bdc590b9 522 unsigned int qid = EGR_QID_G(ntohl(p->opcode_qid));
e46dab4d 523 struct sge_txq *txq;
b8ff05a9 524
e46dab4d 525 txq = q->adap->sge.egr_map[qid - q->adap->sge.egr_start];
b8ff05a9 526 txq->restarts++;
ab677ff4 527 if (txq->q_type == CXGB4_TXQ_ETH) {
b8ff05a9
DM
528 struct sge_eth_txq *eq;
529
530 eq = container_of(txq, struct sge_eth_txq, q);
531 netif_tx_wake_queue(eq->txq);
532 } else {
ab677ff4 533 struct sge_uld_txq *oq;
b8ff05a9 534
ab677ff4 535 oq = container_of(txq, struct sge_uld_txq, q);
b8ff05a9
DM
536 tasklet_schedule(&oq->qresume_tsk);
537 }
538 } else if (opcode == CPL_FW6_MSG || opcode == CPL_FW4_MSG) {
539 const struct cpl_fw6_msg *p = (void *)rsp;
540
688848b1
AB
541#ifdef CONFIG_CHELSIO_T4_DCB
542 const struct fw_port_cmd *pcmd = (const void *)p->data;
e2ac9628 543 unsigned int cmd = FW_CMD_OP_G(ntohl(pcmd->op_to_portid));
688848b1 544 unsigned int action =
2b5fb1f2 545 FW_PORT_CMD_ACTION_G(ntohl(pcmd->action_to_len16));
688848b1
AB
546
547 if (cmd == FW_PORT_CMD &&
c3168cab
GG
548 (action == FW_PORT_ACTION_GET_PORT_INFO ||
549 action == FW_PORT_ACTION_GET_PORT_INFO32)) {
2b5fb1f2 550 int port = FW_PORT_CMD_PORTID_G(
688848b1 551 be32_to_cpu(pcmd->op_to_portid));
c3168cab
GG
552 struct net_device *dev;
553 int dcbxdis, state_input;
554
555 dev = q->adap->port[q->adap->chan_map[port]];
556 dcbxdis = (action == FW_PORT_ACTION_GET_PORT_INFO
557 ? !!(pcmd->u.info.dcbxdis_pkd &
558 FW_PORT_CMD_DCBXDIS_F)
559 : !!(pcmd->u.info32.lstatus32_to_cbllen32 &
560 FW_PORT_CMD_DCBXDIS32_F));
561 state_input = (dcbxdis
562 ? CXGB4_DCB_INPUT_FW_DISABLED
563 : CXGB4_DCB_INPUT_FW_ENABLED);
688848b1
AB
564
565 cxgb4_dcb_state_fsm(dev, state_input);
566 }
567
568 if (cmd == FW_PORT_CMD &&
569 action == FW_PORT_ACTION_L2_DCB_CFG)
570 dcb_rpl(q->adap, pcmd);
571 else
572#endif
573 if (p->type == 0)
574 t4_handle_fw_rpl(q->adap, p->data);
b8ff05a9
DM
575 } else if (opcode == CPL_L2T_WRITE_RPL) {
576 const struct cpl_l2t_write_rpl *p = (void *)rsp;
577
578 do_l2t_write_rpl(q->adap, p);
3bdb376e
KS
579 } else if (opcode == CPL_SMT_WRITE_RPL) {
580 const struct cpl_smt_write_rpl *p = (void *)rsp;
581
582 do_smt_write_rpl(q->adap, p);
f2b7e78d
VP
583 } else if (opcode == CPL_SET_TCB_RPL) {
584 const struct cpl_set_tcb_rpl *p = (void *)rsp;
585
586 filter_rpl(q->adap, p);
12b276fb
KS
587 } else if (opcode == CPL_ACT_OPEN_RPL) {
588 const struct cpl_act_open_rpl *p = (void *)rsp;
589
590 hash_filter_rpl(q->adap, p);
3b0b3bee
KS
591 } else if (opcode == CPL_ABORT_RPL_RSS) {
592 const struct cpl_abort_rpl_rss *p = (void *)rsp;
593
594 hash_del_filter_rpl(q->adap, p);
c68644ef
RR
595 } else if (opcode == CPL_SRQ_TABLE_RPL) {
596 const struct cpl_srq_table_rpl *p = (void *)rsp;
597
598 do_srq_table_rpl(q->adap, p);
b8ff05a9
DM
599 } else
600 dev_err(q->adap->pdev_dev,
601 "unexpected CPL %#x on FW event queue\n", opcode);
b407a4a9 602out:
b8ff05a9
DM
603 return 0;
604}
605
b8ff05a9
DM
606static void disable_msi(struct adapter *adapter)
607{
608 if (adapter->flags & USING_MSIX) {
609 pci_disable_msix(adapter->pdev);
610 adapter->flags &= ~USING_MSIX;
611 } else if (adapter->flags & USING_MSI) {
612 pci_disable_msi(adapter->pdev);
613 adapter->flags &= ~USING_MSI;
614 }
615}
616
617/*
618 * Interrupt handler for non-data events used with MSI-X.
619 */
620static irqreturn_t t4_nondata_intr(int irq, void *cookie)
621{
622 struct adapter *adap = cookie;
0d804338 623 u32 v = t4_read_reg(adap, MYPF_REG(PL_PF_INT_CAUSE_A));
b8ff05a9 624
0d804338 625 if (v & PFSW_F) {
b8ff05a9 626 adap->swintr = 1;
0d804338 627 t4_write_reg(adap, MYPF_REG(PL_PF_INT_CAUSE_A), v);
b8ff05a9 628 }
c3c7b121
HS
629 if (adap->flags & MASTER_PF)
630 t4_slow_intr_handler(adap);
b8ff05a9
DM
631 return IRQ_HANDLED;
632}
633
634/*
635 * Name the MSI-X interrupts.
636 */
637static void name_msix_vecs(struct adapter *adap)
638{
ba27816c 639 int i, j, msi_idx = 2, n = sizeof(adap->msix_info[0].desc);
b8ff05a9
DM
640
641 /* non-data interrupts */
b1a3c2b6 642 snprintf(adap->msix_info[0].desc, n, "%s", adap->port[0]->name);
b8ff05a9
DM
643
644 /* FW events */
b1a3c2b6
DM
645 snprintf(adap->msix_info[1].desc, n, "%s-FWeventq",
646 adap->port[0]->name);
b8ff05a9
DM
647
648 /* Ethernet queues */
649 for_each_port(adap, j) {
650 struct net_device *d = adap->port[j];
651 const struct port_info *pi = netdev_priv(d);
652
ba27816c 653 for (i = 0; i < pi->nqsets; i++, msi_idx++)
b8ff05a9
DM
654 snprintf(adap->msix_info[msi_idx].desc, n, "%s-Rx%d",
655 d->name, i);
b8ff05a9 656 }
b8ff05a9
DM
657}
658
659static int request_msix_queue_irqs(struct adapter *adap)
660{
661 struct sge *s = &adap->sge;
0fbc81b3 662 int err, ethqidx;
cf38be6d 663 int msi_index = 2;
b8ff05a9
DM
664
665 err = request_irq(adap->msix_info[1].vec, t4_sge_intr_msix, 0,
666 adap->msix_info[1].desc, &s->fw_evtq);
667 if (err)
668 return err;
669
670 for_each_ethrxq(s, ethqidx) {
404d9e3f
VP
671 err = request_irq(adap->msix_info[msi_index].vec,
672 t4_sge_intr_msix, 0,
673 adap->msix_info[msi_index].desc,
b8ff05a9
DM
674 &s->ethrxq[ethqidx].rspq);
675 if (err)
676 goto unwind;
404d9e3f 677 msi_index++;
b8ff05a9 678 }
b8ff05a9
DM
679 return 0;
680
681unwind:
b8ff05a9 682 while (--ethqidx >= 0)
404d9e3f
VP
683 free_irq(adap->msix_info[--msi_index].vec,
684 &s->ethrxq[ethqidx].rspq);
b8ff05a9
DM
685 free_irq(adap->msix_info[1].vec, &s->fw_evtq);
686 return err;
687}
688
689static void free_msix_queue_irqs(struct adapter *adap)
690{
404d9e3f 691 int i, msi_index = 2;
b8ff05a9
DM
692 struct sge *s = &adap->sge;
693
694 free_irq(adap->msix_info[1].vec, &s->fw_evtq);
695 for_each_ethrxq(s, i)
404d9e3f 696 free_irq(adap->msix_info[msi_index++].vec, &s->ethrxq[i].rspq);
b8ff05a9
DM
697}
698
671b0060 699/**
812034f1 700 * cxgb4_write_rss - write the RSS table for a given port
671b0060
DM
701 * @pi: the port
702 * @queues: array of queue indices for RSS
703 *
704 * Sets up the portion of the HW RSS table for the port's VI to distribute
705 * packets to the Rx queues in @queues.
c035e183 706 * Should never be called before setting up sge eth rx queues
671b0060 707 */
812034f1 708int cxgb4_write_rss(const struct port_info *pi, const u16 *queues)
671b0060
DM
709{
710 u16 *rss;
711 int i, err;
c035e183
HS
712 struct adapter *adapter = pi->adapter;
713 const struct sge_eth_rxq *rxq;
671b0060 714
c035e183 715 rxq = &adapter->sge.ethrxq[pi->first_qset];
671b0060
DM
716 rss = kmalloc(pi->rss_size * sizeof(u16), GFP_KERNEL);
717 if (!rss)
718 return -ENOMEM;
719
720 /* map the queue indices to queue ids */
721 for (i = 0; i < pi->rss_size; i++, queues++)
c035e183 722 rss[i] = rxq[*queues].rspq.abs_id;
671b0060 723
b2612722 724 err = t4_config_rss_range(adapter, adapter->pf, pi->viid, 0,
060e0c75 725 pi->rss_size, rss, pi->rss_size);
c035e183
HS
726 /* If Tunnel All Lookup isn't specified in the global RSS
727 * Configuration, then we need to specify a default Ingress
728 * Queue for any ingress packets which aren't hashed. We'll
729 * use our first ingress queue ...
730 */
731 if (!err)
732 err = t4_config_vi_rss(adapter, adapter->mbox, pi->viid,
733 FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F |
734 FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F |
735 FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F |
736 FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F |
737 FW_RSS_VI_CONFIG_CMD_UDPEN_F,
738 rss[0]);
671b0060
DM
739 kfree(rss);
740 return err;
741}
742
b8ff05a9
DM
743/**
744 * setup_rss - configure RSS
745 * @adap: the adapter
746 *
671b0060 747 * Sets up RSS for each port.
b8ff05a9
DM
748 */
749static int setup_rss(struct adapter *adap)
750{
c035e183 751 int i, j, err;
b8ff05a9
DM
752
753 for_each_port(adap, i) {
754 const struct port_info *pi = adap2pinfo(adap, i);
b8ff05a9 755
c035e183
HS
756 /* Fill default values with equal distribution */
757 for (j = 0; j < pi->rss_size; j++)
758 pi->rss[j] = j % pi->nqsets;
759
812034f1 760 err = cxgb4_write_rss(pi, pi->rss);
b8ff05a9
DM
761 if (err)
762 return err;
763 }
764 return 0;
765}
766
e46dab4d
DM
767/*
768 * Return the channel of the ingress queue with the given qid.
769 */
770static unsigned int rxq_to_chan(const struct sge *p, unsigned int qid)
771{
772 qid -= p->ingr_start;
773 return netdev2pinfo(p->ingr_map[qid]->netdev)->tx_chan;
774}
775
b8ff05a9
DM
776/*
777 * Wait until all NAPI handlers are descheduled.
778 */
779static void quiesce_rx(struct adapter *adap)
780{
781 int i;
782
4b8e27a8 783 for (i = 0; i < adap->sge.ingr_sz; i++) {
b8ff05a9
DM
784 struct sge_rspq *q = adap->sge.ingr_map[i];
785
5226b791 786 if (q && q->handler)
b8ff05a9
DM
787 napi_disable(&q->napi);
788 }
789}
790
b37987e8
HS
791/* Disable interrupt and napi handler */
792static void disable_interrupts(struct adapter *adap)
793{
794 if (adap->flags & FULL_INIT_DONE) {
795 t4_intr_disable(adap);
796 if (adap->flags & USING_MSIX) {
797 free_msix_queue_irqs(adap);
798 free_irq(adap->msix_info[0].vec, adap);
799 } else {
800 free_irq(adap->pdev->irq, adap);
801 }
802 quiesce_rx(adap);
803 }
804}
805
b8ff05a9
DM
806/*
807 * Enable NAPI scheduling and interrupt generation for all Rx queues.
808 */
809static void enable_rx(struct adapter *adap)
810{
811 int i;
812
4b8e27a8 813 for (i = 0; i < adap->sge.ingr_sz; i++) {
b8ff05a9
DM
814 struct sge_rspq *q = adap->sge.ingr_map[i];
815
816 if (!q)
817 continue;
5226b791 818 if (q->handler)
b8ff05a9 819 napi_enable(&q->napi);
5226b791 820
b8ff05a9 821 /* 0-increment GTS to start the timer and enable interrupts */
f612b815
HS
822 t4_write_reg(adap, MYPF_REG(SGE_PF_GTS_A),
823 SEINTARM_V(q->intr_params) |
824 INGRESSQID_V(q->cntxt_id));
b8ff05a9
DM
825 }
826}
827
1c6a5b0e 828
0fbc81b3 829static int setup_fw_sge_queues(struct adapter *adap)
b8ff05a9 830{
b8ff05a9 831 struct sge *s = &adap->sge;
0fbc81b3 832 int err = 0;
b8ff05a9 833
4b8e27a8
HS
834 bitmap_zero(s->starving_fl, s->egr_sz);
835 bitmap_zero(s->txq_maperr, s->egr_sz);
b8ff05a9
DM
836
837 if (adap->flags & USING_MSIX)
94cdb8bb 838 adap->msi_idx = 1; /* vector 0 is for non-queue interrupts */
b8ff05a9
DM
839 else {
840 err = t4_sge_alloc_rxq(adap, &s->intrq, false, adap->port[0], 0,
2337ba42 841 NULL, NULL, NULL, -1);
b8ff05a9
DM
842 if (err)
843 return err;
94cdb8bb 844 adap->msi_idx = -((int)s->intrq.abs_id + 1);
b8ff05a9
DM
845 }
846
847 err = t4_sge_alloc_rxq(adap, &s->fw_evtq, true, adap->port[0],
94cdb8bb 848 adap->msi_idx, NULL, fwevtq_handler, NULL, -1);
0fbc81b3
HS
849 return err;
850}
851
852/**
853 * setup_sge_queues - configure SGE Tx/Rx/response queues
854 * @adap: the adapter
855 *
856 * Determines how many sets of SGE queues to use and initializes them.
857 * We support multiple queue sets per port if we have MSI-X, otherwise
858 * just one queue set per port.
859 */
860static int setup_sge_queues(struct adapter *adap)
861{
862 int err, i, j;
863 struct sge *s = &adap->sge;
d427caee 864 struct sge_uld_rxq_info *rxq_info = NULL;
0fbc81b3 865 unsigned int cmplqid = 0;
b8ff05a9 866
d427caee
GG
867 if (is_uld(adap))
868 rxq_info = s->uld_rxq_info[CXGB4_ULD_RDMA];
869
b8ff05a9
DM
870 for_each_port(adap, i) {
871 struct net_device *dev = adap->port[i];
872 struct port_info *pi = netdev_priv(dev);
873 struct sge_eth_rxq *q = &s->ethrxq[pi->first_qset];
874 struct sge_eth_txq *t = &s->ethtxq[pi->first_qset];
875
876 for (j = 0; j < pi->nqsets; j++, q++) {
94cdb8bb
HS
877 if (adap->msi_idx > 0)
878 adap->msi_idx++;
b8ff05a9 879 err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev,
94cdb8bb 880 adap->msi_idx, &q->fl,
145ef8a5 881 t4_ethrx_handler,
2337ba42 882 NULL,
193c4c28
AV
883 t4_get_tp_ch_map(adap,
884 pi->tx_chan));
b8ff05a9
DM
885 if (err)
886 goto freeout;
887 q->rspq.idx = j;
888 memset(&q->stats, 0, sizeof(q->stats));
889 }
890 for (j = 0; j < pi->nqsets; j++, t++) {
891 err = t4_sge_alloc_eth_txq(adap, t, dev,
892 netdev_get_tx_queue(dev, j),
893 s->fw_evtq.cntxt_id);
894 if (err)
895 goto freeout;
896 }
897 }
898
b8ff05a9 899 for_each_port(adap, i) {
0fbc81b3 900 /* Note that cmplqid below is 0 if we don't
b8ff05a9
DM
901 * have RDMA queues, and that's the right value.
902 */
0fbc81b3
HS
903 if (rxq_info)
904 cmplqid = rxq_info->uldrxq[i].rspq.cntxt_id;
905
b8ff05a9 906 err = t4_sge_alloc_ctrl_txq(adap, &s->ctrlq[i], adap->port[i],
0fbc81b3 907 s->fw_evtq.cntxt_id, cmplqid);
b8ff05a9
DM
908 if (err)
909 goto freeout;
910 }
911
a4569504
AG
912 if (!is_t4(adap->params.chip)) {
913 err = t4_sge_alloc_eth_txq(adap, &s->ptptxq, adap->port[0],
914 netdev_get_tx_queue(adap->port[0], 0)
915 , s->fw_evtq.cntxt_id);
916 if (err)
917 goto freeout;
918 }
919
9bb59b96 920 t4_write_reg(adap, is_t4(adap->params.chip) ?
837e4a42
HS
921 MPS_TRC_RSS_CONTROL_A :
922 MPS_T5_TRC_RSS_CONTROL_A,
923 RSSCONTROL_V(netdev2pinfo(adap->port[0])->tx_chan) |
924 QUEUENUMBER_V(s->ethrxq[0].rspq.abs_id));
b8ff05a9 925 return 0;
0fbc81b3
HS
926freeout:
927 t4_free_sge_resources(adap);
928 return err;
b8ff05a9
DM
929}
930
688848b1
AB
931static u16 cxgb_select_queue(struct net_device *dev, struct sk_buff *skb,
932 void *accel_priv, select_queue_fallback_t fallback)
933{
934 int txq;
935
936#ifdef CONFIG_CHELSIO_T4_DCB
937 /* If a Data Center Bridging has been successfully negotiated on this
938 * link then we'll use the skb's priority to map it to a TX Queue.
939 * The skb's priority is determined via the VLAN Tag Priority Code
940 * Point field.
941 */
85eacf3f 942 if (cxgb4_dcb_enabled(dev) && !is_kdump_kernel()) {
688848b1
AB
943 u16 vlan_tci;
944 int err;
945
946 err = vlan_get_tag(skb, &vlan_tci);
947 if (unlikely(err)) {
948 if (net_ratelimit())
949 netdev_warn(dev,
950 "TX Packet without VLAN Tag on DCB Link\n");
951 txq = 0;
952 } else {
953 txq = (vlan_tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
84a200b3
VP
954#ifdef CONFIG_CHELSIO_T4_FCOE
955 if (skb->protocol == htons(ETH_P_FCOE))
956 txq = skb->priority & 0x7;
957#endif /* CONFIG_CHELSIO_T4_FCOE */
688848b1
AB
958 }
959 return txq;
960 }
961#endif /* CONFIG_CHELSIO_T4_DCB */
962
963 if (select_queue) {
964 txq = (skb_rx_queue_recorded(skb)
965 ? skb_get_rx_queue(skb)
966 : smp_processor_id());
967
968 while (unlikely(txq >= dev->real_num_tx_queues))
969 txq -= dev->real_num_tx_queues;
970
971 return txq;
972 }
973
974 return fallback(dev, skb) % dev->real_num_tx_queues;
975}
976
b8ff05a9
DM
977static int closest_timer(const struct sge *s, int time)
978{
979 int i, delta, match = 0, min_delta = INT_MAX;
980
981 for (i = 0; i < ARRAY_SIZE(s->timer_val); i++) {
982 delta = time - s->timer_val[i];
983 if (delta < 0)
984 delta = -delta;
985 if (delta < min_delta) {
986 min_delta = delta;
987 match = i;
988 }
989 }
990 return match;
991}
992
993static int closest_thres(const struct sge *s, int thres)
994{
995 int i, delta, match = 0, min_delta = INT_MAX;
996
997 for (i = 0; i < ARRAY_SIZE(s->counter_val); i++) {
998 delta = thres - s->counter_val[i];
999 if (delta < 0)
1000 delta = -delta;
1001 if (delta < min_delta) {
1002 min_delta = delta;
1003 match = i;
1004 }
1005 }
1006 return match;
1007}
1008
b8ff05a9 1009/**
812034f1 1010 * cxgb4_set_rspq_intr_params - set a queue's interrupt holdoff parameters
b8ff05a9
DM
1011 * @q: the Rx queue
1012 * @us: the hold-off time in us, or 0 to disable timer
1013 * @cnt: the hold-off packet count, or 0 to disable counter
1014 *
1015 * Sets an Rx queue's interrupt hold-off time and packet count. At least
1016 * one of the two needs to be enabled for the queue to generate interrupts.
1017 */
812034f1
HS
1018int cxgb4_set_rspq_intr_params(struct sge_rspq *q,
1019 unsigned int us, unsigned int cnt)
b8ff05a9 1020{
c887ad0e
HS
1021 struct adapter *adap = q->adap;
1022
b8ff05a9
DM
1023 if ((us | cnt) == 0)
1024 cnt = 1;
1025
1026 if (cnt) {
1027 int err;
1028 u32 v, new_idx;
1029
1030 new_idx = closest_thres(&adap->sge, cnt);
1031 if (q->desc && q->pktcnt_idx != new_idx) {
1032 /* the queue has already been created, update it */
5167865a
HS
1033 v = FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
1034 FW_PARAMS_PARAM_X_V(
1035 FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH) |
1036 FW_PARAMS_PARAM_YZ_V(q->cntxt_id);
b2612722
HS
1037 err = t4_set_params(adap, adap->mbox, adap->pf, 0, 1,
1038 &v, &new_idx);
b8ff05a9
DM
1039 if (err)
1040 return err;
1041 }
1042 q->pktcnt_idx = new_idx;
1043 }
1044
1045 us = us == 0 ? 6 : closest_timer(&adap->sge, us);
1ecc7b7a 1046 q->intr_params = QINTR_TIMER_IDX_V(us) | QINTR_CNT_EN_V(cnt > 0);
b8ff05a9
DM
1047 return 0;
1048}
1049
c8f44aff 1050static int cxgb_set_features(struct net_device *dev, netdev_features_t features)
87b6cf51 1051{
2ed28baa 1052 const struct port_info *pi = netdev_priv(dev);
c8f44aff 1053 netdev_features_t changed = dev->features ^ features;
19ecae2c 1054 int err;
19ecae2c 1055
f646968f 1056 if (!(changed & NETIF_F_HW_VLAN_CTAG_RX))
2ed28baa 1057 return 0;
19ecae2c 1058
b2612722 1059 err = t4_set_rxmode(pi->adapter, pi->adapter->pf, pi->viid, -1,
2ed28baa 1060 -1, -1, -1,
f646968f 1061 !!(features & NETIF_F_HW_VLAN_CTAG_RX), true);
2ed28baa 1062 if (unlikely(err))
f646968f 1063 dev->features = features ^ NETIF_F_HW_VLAN_CTAG_RX;
19ecae2c 1064 return err;
87b6cf51
DM
1065}
1066
91744948 1067static int setup_debugfs(struct adapter *adap)
b8ff05a9 1068{
b8ff05a9
DM
1069 if (IS_ERR_OR_NULL(adap->debugfs_root))
1070 return -1;
1071
fd88b31a
HS
1072#ifdef CONFIG_DEBUG_FS
1073 t4_setup_debugfs(adap);
1074#endif
b8ff05a9
DM
1075 return 0;
1076}
1077
1078/*
1079 * upper-layer driver support
1080 */
1081
1082/*
1083 * Allocate an active-open TID and set it to the supplied value.
1084 */
1085int cxgb4_alloc_atid(struct tid_info *t, void *data)
1086{
1087 int atid = -1;
1088
1089 spin_lock_bh(&t->atid_lock);
1090 if (t->afree) {
1091 union aopen_entry *p = t->afree;
1092
f2b7e78d 1093 atid = (p - t->atid_tab) + t->atid_base;
b8ff05a9
DM
1094 t->afree = p->next;
1095 p->data = data;
1096 t->atids_in_use++;
1097 }
1098 spin_unlock_bh(&t->atid_lock);
1099 return atid;
1100}
1101EXPORT_SYMBOL(cxgb4_alloc_atid);
1102
1103/*
1104 * Release an active-open TID.
1105 */
1106void cxgb4_free_atid(struct tid_info *t, unsigned int atid)
1107{
f2b7e78d 1108 union aopen_entry *p = &t->atid_tab[atid - t->atid_base];
b8ff05a9
DM
1109
1110 spin_lock_bh(&t->atid_lock);
1111 p->next = t->afree;
1112 t->afree = p;
1113 t->atids_in_use--;
1114 spin_unlock_bh(&t->atid_lock);
1115}
1116EXPORT_SYMBOL(cxgb4_free_atid);
1117
1118/*
1119 * Allocate a server TID and set it to the supplied value.
1120 */
1121int cxgb4_alloc_stid(struct tid_info *t, int family, void *data)
1122{
1123 int stid;
1124
1125 spin_lock_bh(&t->stid_lock);
1126 if (family == PF_INET) {
1127 stid = find_first_zero_bit(t->stid_bmap, t->nstids);
1128 if (stid < t->nstids)
1129 __set_bit(stid, t->stid_bmap);
1130 else
1131 stid = -1;
1132 } else {
a99c683e 1133 stid = bitmap_find_free_region(t->stid_bmap, t->nstids, 1);
b8ff05a9
DM
1134 if (stid < 0)
1135 stid = -1;
1136 }
1137 if (stid >= 0) {
1138 t->stid_tab[stid].data = data;
1139 stid += t->stid_base;
15f63b74
KS
1140 /* IPv6 requires max of 520 bits or 16 cells in TCAM
1141 * This is equivalent to 4 TIDs. With CLIP enabled it
1142 * needs 2 TIDs.
1143 */
1dec4cec 1144 if (family == PF_INET6) {
a99c683e 1145 t->stids_in_use += 2;
1dec4cec
GG
1146 t->v6_stids_in_use += 2;
1147 } else {
1148 t->stids_in_use++;
1149 }
b8ff05a9
DM
1150 }
1151 spin_unlock_bh(&t->stid_lock);
1152 return stid;
1153}
1154EXPORT_SYMBOL(cxgb4_alloc_stid);
1155
dca4faeb
VP
1156/* Allocate a server filter TID and set it to the supplied value.
1157 */
1158int cxgb4_alloc_sftid(struct tid_info *t, int family, void *data)
1159{
1160 int stid;
1161
1162 spin_lock_bh(&t->stid_lock);
1163 if (family == PF_INET) {
1164 stid = find_next_zero_bit(t->stid_bmap,
1165 t->nstids + t->nsftids, t->nstids);
1166 if (stid < (t->nstids + t->nsftids))
1167 __set_bit(stid, t->stid_bmap);
1168 else
1169 stid = -1;
1170 } else {
1171 stid = -1;
1172 }
1173 if (stid >= 0) {
1174 t->stid_tab[stid].data = data;
470c60c4
KS
1175 stid -= t->nstids;
1176 stid += t->sftid_base;
2248b293 1177 t->sftids_in_use++;
dca4faeb
VP
1178 }
1179 spin_unlock_bh(&t->stid_lock);
1180 return stid;
1181}
1182EXPORT_SYMBOL(cxgb4_alloc_sftid);
1183
1184/* Release a server TID.
b8ff05a9
DM
1185 */
1186void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family)
1187{
470c60c4
KS
1188 /* Is it a server filter TID? */
1189 if (t->nsftids && (stid >= t->sftid_base)) {
1190 stid -= t->sftid_base;
1191 stid += t->nstids;
1192 } else {
1193 stid -= t->stid_base;
1194 }
1195
b8ff05a9
DM
1196 spin_lock_bh(&t->stid_lock);
1197 if (family == PF_INET)
1198 __clear_bit(stid, t->stid_bmap);
1199 else
a99c683e 1200 bitmap_release_region(t->stid_bmap, stid, 1);
b8ff05a9 1201 t->stid_tab[stid].data = NULL;
2248b293 1202 if (stid < t->nstids) {
1dec4cec 1203 if (family == PF_INET6) {
a99c683e 1204 t->stids_in_use -= 2;
1dec4cec
GG
1205 t->v6_stids_in_use -= 2;
1206 } else {
1207 t->stids_in_use--;
1208 }
2248b293
HS
1209 } else {
1210 t->sftids_in_use--;
1211 }
1dec4cec 1212
b8ff05a9
DM
1213 spin_unlock_bh(&t->stid_lock);
1214}
1215EXPORT_SYMBOL(cxgb4_free_stid);
1216
1217/*
1218 * Populate a TID_RELEASE WR. Caller must properly size the skb.
1219 */
1220static void mk_tid_release(struct sk_buff *skb, unsigned int chan,
1221 unsigned int tid)
1222{
1223 struct cpl_tid_release *req;
1224
1225 set_wr_txq(skb, CPL_PRIORITY_SETUP, chan);
4df864c1 1226 req = __skb_put(skb, sizeof(*req));
b8ff05a9
DM
1227 INIT_TP_WR(req, tid);
1228 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, tid));
1229}
1230
1231/*
1232 * Queue a TID release request and if necessary schedule a work queue to
1233 * process it.
1234 */
31b9c19b 1235static void cxgb4_queue_tid_release(struct tid_info *t, unsigned int chan,
1236 unsigned int tid)
b8ff05a9
DM
1237{
1238 void **p = &t->tid_tab[tid];
1239 struct adapter *adap = container_of(t, struct adapter, tids);
1240
1241 spin_lock_bh(&adap->tid_release_lock);
1242 *p = adap->tid_release_head;
1243 /* Low 2 bits encode the Tx channel number */
1244 adap->tid_release_head = (void **)((uintptr_t)p | chan);
1245 if (!adap->tid_release_task_busy) {
1246 adap->tid_release_task_busy = true;
29aaee65 1247 queue_work(adap->workq, &adap->tid_release_task);
b8ff05a9
DM
1248 }
1249 spin_unlock_bh(&adap->tid_release_lock);
1250}
b8ff05a9
DM
1251
1252/*
1253 * Process the list of pending TID release requests.
1254 */
1255static void process_tid_release_list(struct work_struct *work)
1256{
1257 struct sk_buff *skb;
1258 struct adapter *adap;
1259
1260 adap = container_of(work, struct adapter, tid_release_task);
1261
1262 spin_lock_bh(&adap->tid_release_lock);
1263 while (adap->tid_release_head) {
1264 void **p = adap->tid_release_head;
1265 unsigned int chan = (uintptr_t)p & 3;
1266 p = (void *)p - chan;
1267
1268 adap->tid_release_head = *p;
1269 *p = NULL;
1270 spin_unlock_bh(&adap->tid_release_lock);
1271
1272 while (!(skb = alloc_skb(sizeof(struct cpl_tid_release),
1273 GFP_KERNEL)))
1274 schedule_timeout_uninterruptible(1);
1275
1276 mk_tid_release(skb, chan, p - adap->tids.tid_tab);
1277 t4_ofld_send(adap, skb);
1278 spin_lock_bh(&adap->tid_release_lock);
1279 }
1280 adap->tid_release_task_busy = false;
1281 spin_unlock_bh(&adap->tid_release_lock);
1282}
1283
1284/*
1285 * Release a TID and inform HW. If we are unable to allocate the release
1286 * message we defer to a work queue.
1287 */
1dec4cec
GG
1288void cxgb4_remove_tid(struct tid_info *t, unsigned int chan, unsigned int tid,
1289 unsigned short family)
b8ff05a9 1290{
b8ff05a9
DM
1291 struct sk_buff *skb;
1292 struct adapter *adap = container_of(t, struct adapter, tids);
1293
9a1bb9f6
HS
1294 WARN_ON(tid >= t->ntids);
1295
1296 if (t->tid_tab[tid]) {
1297 t->tid_tab[tid] = NULL;
1dec4cec
GG
1298 atomic_dec(&t->conns_in_use);
1299 if (t->hash_base && (tid >= t->hash_base)) {
1300 if (family == AF_INET6)
1301 atomic_sub(2, &t->hash_tids_in_use);
1302 else
1303 atomic_dec(&t->hash_tids_in_use);
1304 } else {
1305 if (family == AF_INET6)
1306 atomic_sub(2, &t->tids_in_use);
1307 else
1308 atomic_dec(&t->tids_in_use);
1309 }
9a1bb9f6
HS
1310 }
1311
b8ff05a9
DM
1312 skb = alloc_skb(sizeof(struct cpl_tid_release), GFP_ATOMIC);
1313 if (likely(skb)) {
b8ff05a9
DM
1314 mk_tid_release(skb, chan, tid);
1315 t4_ofld_send(adap, skb);
1316 } else
1317 cxgb4_queue_tid_release(t, chan, tid);
b8ff05a9
DM
1318}
1319EXPORT_SYMBOL(cxgb4_remove_tid);
1320
1321/*
1322 * Allocate and initialize the TID tables. Returns 0 on success.
1323 */
1324static int tid_init(struct tid_info *t)
1325{
b6f8eaec 1326 struct adapter *adap = container_of(t, struct adapter, tids);
578b46b9
RL
1327 unsigned int max_ftids = t->nftids + t->nsftids;
1328 unsigned int natids = t->natids;
1329 unsigned int stid_bmap_size;
1330 unsigned int ftid_bmap_size;
1331 size_t size;
b8ff05a9 1332
dca4faeb 1333 stid_bmap_size = BITS_TO_LONGS(t->nstids + t->nsftids);
578b46b9 1334 ftid_bmap_size = BITS_TO_LONGS(t->nftids);
f2b7e78d
VP
1335 size = t->ntids * sizeof(*t->tid_tab) +
1336 natids * sizeof(*t->atid_tab) +
b8ff05a9 1337 t->nstids * sizeof(*t->stid_tab) +
dca4faeb 1338 t->nsftids * sizeof(*t->stid_tab) +
f2b7e78d 1339 stid_bmap_size * sizeof(long) +
578b46b9
RL
1340 max_ftids * sizeof(*t->ftid_tab) +
1341 ftid_bmap_size * sizeof(long);
f2b7e78d 1342
752ade68 1343 t->tid_tab = kvzalloc(size, GFP_KERNEL);
b8ff05a9
DM
1344 if (!t->tid_tab)
1345 return -ENOMEM;
1346
1347 t->atid_tab = (union aopen_entry *)&t->tid_tab[t->ntids];
1348 t->stid_tab = (struct serv_entry *)&t->atid_tab[natids];
dca4faeb 1349 t->stid_bmap = (unsigned long *)&t->stid_tab[t->nstids + t->nsftids];
f2b7e78d 1350 t->ftid_tab = (struct filter_entry *)&t->stid_bmap[stid_bmap_size];
578b46b9 1351 t->ftid_bmap = (unsigned long *)&t->ftid_tab[max_ftids];
b8ff05a9
DM
1352 spin_lock_init(&t->stid_lock);
1353 spin_lock_init(&t->atid_lock);
578b46b9 1354 spin_lock_init(&t->ftid_lock);
b8ff05a9
DM
1355
1356 t->stids_in_use = 0;
1dec4cec 1357 t->v6_stids_in_use = 0;
2248b293 1358 t->sftids_in_use = 0;
b8ff05a9
DM
1359 t->afree = NULL;
1360 t->atids_in_use = 0;
1361 atomic_set(&t->tids_in_use, 0);
1dec4cec 1362 atomic_set(&t->conns_in_use, 0);
9a1bb9f6 1363 atomic_set(&t->hash_tids_in_use, 0);
b8ff05a9
DM
1364
1365 /* Setup the free list for atid_tab and clear the stid bitmap. */
1366 if (natids) {
1367 while (--natids)
1368 t->atid_tab[natids - 1].next = &t->atid_tab[natids];
1369 t->afree = t->atid_tab;
1370 }
b6f8eaec 1371
578b46b9
RL
1372 if (is_offload(adap)) {
1373 bitmap_zero(t->stid_bmap, t->nstids + t->nsftids);
1374 /* Reserve stid 0 for T4/T5 adapters */
1375 if (!t->stid_base &&
1376 CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
1377 __set_bit(0, t->stid_bmap);
1378 }
1379
1380 bitmap_zero(t->ftid_bmap, t->nftids);
b8ff05a9
DM
1381 return 0;
1382}
1383
1384/**
1385 * cxgb4_create_server - create an IP server
1386 * @dev: the device
1387 * @stid: the server TID
1388 * @sip: local IP address to bind server to
1389 * @sport: the server's TCP port
1390 * @queue: queue to direct messages from this server to
1391 *
1392 * Create an IP server for the given port and address.
1393 * Returns <0 on error and one of the %NET_XMIT_* values on success.
1394 */
1395int cxgb4_create_server(const struct net_device *dev, unsigned int stid,
793dad94
VP
1396 __be32 sip, __be16 sport, __be16 vlan,
1397 unsigned int queue)
b8ff05a9
DM
1398{
1399 unsigned int chan;
1400 struct sk_buff *skb;
1401 struct adapter *adap;
1402 struct cpl_pass_open_req *req;
80f40c1f 1403 int ret;
b8ff05a9
DM
1404
1405 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
1406 if (!skb)
1407 return -ENOMEM;
1408
1409 adap = netdev2adap(dev);
4df864c1 1410 req = __skb_put(skb, sizeof(*req));
b8ff05a9
DM
1411 INIT_TP_WR(req, 0);
1412 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ, stid));
1413 req->local_port = sport;
1414 req->peer_port = htons(0);
1415 req->local_ip = sip;
1416 req->peer_ip = htonl(0);
e46dab4d 1417 chan = rxq_to_chan(&adap->sge, queue);
d7990b0c 1418 req->opt0 = cpu_to_be64(TX_CHAN_V(chan));
6c53e938
HS
1419 req->opt1 = cpu_to_be64(CONN_POLICY_V(CPL_CONN_POLICY_ASK) |
1420 SYN_RSS_ENABLE_F | SYN_RSS_QUEUE_V(queue));
80f40c1f
VP
1421 ret = t4_mgmt_tx(adap, skb);
1422 return net_xmit_eval(ret);
b8ff05a9
DM
1423}
1424EXPORT_SYMBOL(cxgb4_create_server);
1425
80f40c1f
VP
1426/* cxgb4_create_server6 - create an IPv6 server
1427 * @dev: the device
1428 * @stid: the server TID
1429 * @sip: local IPv6 address to bind server to
1430 * @sport: the server's TCP port
1431 * @queue: queue to direct messages from this server to
1432 *
1433 * Create an IPv6 server for the given port and address.
1434 * Returns <0 on error and one of the %NET_XMIT_* values on success.
1435 */
1436int cxgb4_create_server6(const struct net_device *dev, unsigned int stid,
1437 const struct in6_addr *sip, __be16 sport,
1438 unsigned int queue)
1439{
1440 unsigned int chan;
1441 struct sk_buff *skb;
1442 struct adapter *adap;
1443 struct cpl_pass_open_req6 *req;
1444 int ret;
1445
1446 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
1447 if (!skb)
1448 return -ENOMEM;
1449
1450 adap = netdev2adap(dev);
4df864c1 1451 req = __skb_put(skb, sizeof(*req));
80f40c1f
VP
1452 INIT_TP_WR(req, 0);
1453 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ6, stid));
1454 req->local_port = sport;
1455 req->peer_port = htons(0);
1456 req->local_ip_hi = *(__be64 *)(sip->s6_addr);
1457 req->local_ip_lo = *(__be64 *)(sip->s6_addr + 8);
1458 req->peer_ip_hi = cpu_to_be64(0);
1459 req->peer_ip_lo = cpu_to_be64(0);
1460 chan = rxq_to_chan(&adap->sge, queue);
d7990b0c 1461 req->opt0 = cpu_to_be64(TX_CHAN_V(chan));
6c53e938
HS
1462 req->opt1 = cpu_to_be64(CONN_POLICY_V(CPL_CONN_POLICY_ASK) |
1463 SYN_RSS_ENABLE_F | SYN_RSS_QUEUE_V(queue));
80f40c1f
VP
1464 ret = t4_mgmt_tx(adap, skb);
1465 return net_xmit_eval(ret);
1466}
1467EXPORT_SYMBOL(cxgb4_create_server6);
1468
1469int cxgb4_remove_server(const struct net_device *dev, unsigned int stid,
1470 unsigned int queue, bool ipv6)
1471{
1472 struct sk_buff *skb;
1473 struct adapter *adap;
1474 struct cpl_close_listsvr_req *req;
1475 int ret;
1476
1477 adap = netdev2adap(dev);
1478
1479 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
1480 if (!skb)
1481 return -ENOMEM;
1482
4df864c1 1483 req = __skb_put(skb, sizeof(*req));
80f40c1f
VP
1484 INIT_TP_WR(req, 0);
1485 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ, stid));
bdc590b9
HS
1486 req->reply_ctrl = htons(NO_REPLY_V(0) | (ipv6 ? LISTSVR_IPV6_V(1) :
1487 LISTSVR_IPV6_V(0)) | QUEUENO_V(queue));
80f40c1f
VP
1488 ret = t4_mgmt_tx(adap, skb);
1489 return net_xmit_eval(ret);
1490}
1491EXPORT_SYMBOL(cxgb4_remove_server);
1492
b8ff05a9
DM
1493/**
1494 * cxgb4_best_mtu - find the entry in the MTU table closest to an MTU
1495 * @mtus: the HW MTU table
1496 * @mtu: the target MTU
1497 * @idx: index of selected entry in the MTU table
1498 *
1499 * Returns the index and the value in the HW MTU table that is closest to
1500 * but does not exceed @mtu, unless @mtu is smaller than any value in the
1501 * table, in which case that smallest available value is selected.
1502 */
1503unsigned int cxgb4_best_mtu(const unsigned short *mtus, unsigned short mtu,
1504 unsigned int *idx)
1505{
1506 unsigned int i = 0;
1507
1508 while (i < NMTUS - 1 && mtus[i + 1] <= mtu)
1509 ++i;
1510 if (idx)
1511 *idx = i;
1512 return mtus[i];
1513}
1514EXPORT_SYMBOL(cxgb4_best_mtu);
1515
92e7ae71
HS
1516/**
1517 * cxgb4_best_aligned_mtu - find best MTU, [hopefully] data size aligned
1518 * @mtus: the HW MTU table
1519 * @header_size: Header Size
1520 * @data_size_max: maximum Data Segment Size
1521 * @data_size_align: desired Data Segment Size Alignment (2^N)
1522 * @mtu_idxp: HW MTU Table Index return value pointer (possibly NULL)
1523 *
1524 * Similar to cxgb4_best_mtu() but instead of searching the Hardware
1525 * MTU Table based solely on a Maximum MTU parameter, we break that
1526 * parameter up into a Header Size and Maximum Data Segment Size, and
1527 * provide a desired Data Segment Size Alignment. If we find an MTU in
1528 * the Hardware MTU Table which will result in a Data Segment Size with
1529 * the requested alignment _and_ that MTU isn't "too far" from the
1530 * closest MTU, then we'll return that rather than the closest MTU.
1531 */
1532unsigned int cxgb4_best_aligned_mtu(const unsigned short *mtus,
1533 unsigned short header_size,
1534 unsigned short data_size_max,
1535 unsigned short data_size_align,
1536 unsigned int *mtu_idxp)
1537{
1538 unsigned short max_mtu = header_size + data_size_max;
1539 unsigned short data_size_align_mask = data_size_align - 1;
1540 int mtu_idx, aligned_mtu_idx;
1541
1542 /* Scan the MTU Table till we find an MTU which is larger than our
1543 * Maximum MTU or we reach the end of the table. Along the way,
1544 * record the last MTU found, if any, which will result in a Data
1545 * Segment Length matching the requested alignment.
1546 */
1547 for (mtu_idx = 0, aligned_mtu_idx = -1; mtu_idx < NMTUS; mtu_idx++) {
1548 unsigned short data_size = mtus[mtu_idx] - header_size;
1549
1550 /* If this MTU minus the Header Size would result in a
1551 * Data Segment Size of the desired alignment, remember it.
1552 */
1553 if ((data_size & data_size_align_mask) == 0)
1554 aligned_mtu_idx = mtu_idx;
1555
1556 /* If we're not at the end of the Hardware MTU Table and the
1557 * next element is larger than our Maximum MTU, drop out of
1558 * the loop.
1559 */
1560 if (mtu_idx+1 < NMTUS && mtus[mtu_idx+1] > max_mtu)
1561 break;
1562 }
1563
1564 /* If we fell out of the loop because we ran to the end of the table,
1565 * then we just have to use the last [largest] entry.
1566 */
1567 if (mtu_idx == NMTUS)
1568 mtu_idx--;
1569
1570 /* If we found an MTU which resulted in the requested Data Segment
1571 * Length alignment and that's "not far" from the largest MTU which is
1572 * less than or equal to the maximum MTU, then use that.
1573 */
1574 if (aligned_mtu_idx >= 0 &&
1575 mtu_idx - aligned_mtu_idx <= 1)
1576 mtu_idx = aligned_mtu_idx;
1577
1578 /* If the caller has passed in an MTU Index pointer, pass the
1579 * MTU Index back. Return the MTU value.
1580 */
1581 if (mtu_idxp)
1582 *mtu_idxp = mtu_idx;
1583 return mtus[mtu_idx];
1584}
1585EXPORT_SYMBOL(cxgb4_best_aligned_mtu);
1586
27999805
H
1587/**
1588 * cxgb4_tp_smt_idx - Get the Source Mac Table index for this VI
1589 * @chip: chip type
1590 * @viid: VI id of the given port
1591 *
1592 * Return the SMT index for this VI.
1593 */
1594unsigned int cxgb4_tp_smt_idx(enum chip_type chip, unsigned int viid)
1595{
1596 /* In T4/T5, SMT contains 256 SMAC entries organized in
1597 * 128 rows of 2 entries each.
1598 * In T6, SMT contains 256 SMAC entries in 256 rows.
1599 * TODO: The below code needs to be updated when we add support
1600 * for 256 VFs.
1601 */
1602 if (CHELSIO_CHIP_VERSION(chip) <= CHELSIO_T5)
1603 return ((viid & 0x7f) << 1);
1604 else
1605 return (viid & 0x7f);
1606}
1607EXPORT_SYMBOL(cxgb4_tp_smt_idx);
1608
b8ff05a9
DM
1609/**
1610 * cxgb4_port_chan - get the HW channel of a port
1611 * @dev: the net device for the port
1612 *
1613 * Return the HW Tx channel of the given port.
1614 */
1615unsigned int cxgb4_port_chan(const struct net_device *dev)
1616{
1617 return netdev2pinfo(dev)->tx_chan;
1618}
1619EXPORT_SYMBOL(cxgb4_port_chan);
1620
881806bc
VP
1621unsigned int cxgb4_dbfifo_count(const struct net_device *dev, int lpfifo)
1622{
1623 struct adapter *adap = netdev2adap(dev);
2cc301d2 1624 u32 v1, v2, lp_count, hp_count;
881806bc 1625
f061de42
HS
1626 v1 = t4_read_reg(adap, SGE_DBFIFO_STATUS_A);
1627 v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2_A);
d14807dd 1628 if (is_t4(adap->params.chip)) {
f061de42
HS
1629 lp_count = LP_COUNT_G(v1);
1630 hp_count = HP_COUNT_G(v1);
2cc301d2 1631 } else {
f061de42
HS
1632 lp_count = LP_COUNT_T5_G(v1);
1633 hp_count = HP_COUNT_T5_G(v2);
2cc301d2
SR
1634 }
1635 return lpfifo ? lp_count : hp_count;
881806bc
VP
1636}
1637EXPORT_SYMBOL(cxgb4_dbfifo_count);
1638
b8ff05a9
DM
1639/**
1640 * cxgb4_port_viid - get the VI id of a port
1641 * @dev: the net device for the port
1642 *
1643 * Return the VI id of the given port.
1644 */
1645unsigned int cxgb4_port_viid(const struct net_device *dev)
1646{
1647 return netdev2pinfo(dev)->viid;
1648}
1649EXPORT_SYMBOL(cxgb4_port_viid);
1650
1651/**
1652 * cxgb4_port_idx - get the index of a port
1653 * @dev: the net device for the port
1654 *
1655 * Return the index of the given port.
1656 */
1657unsigned int cxgb4_port_idx(const struct net_device *dev)
1658{
1659 return netdev2pinfo(dev)->port_id;
1660}
1661EXPORT_SYMBOL(cxgb4_port_idx);
1662
b8ff05a9
DM
1663void cxgb4_get_tcp_stats(struct pci_dev *pdev, struct tp_tcp_stats *v4,
1664 struct tp_tcp_stats *v6)
1665{
1666 struct adapter *adap = pci_get_drvdata(pdev);
1667
1668 spin_lock(&adap->stats_lock);
5ccf9d04 1669 t4_tp_get_tcp_stats(adap, v4, v6, false);
b8ff05a9
DM
1670 spin_unlock(&adap->stats_lock);
1671}
1672EXPORT_SYMBOL(cxgb4_get_tcp_stats);
1673
1674void cxgb4_iscsi_init(struct net_device *dev, unsigned int tag_mask,
1675 const unsigned int *pgsz_order)
1676{
1677 struct adapter *adap = netdev2adap(dev);
1678
0d804338
HS
1679 t4_write_reg(adap, ULP_RX_ISCSI_TAGMASK_A, tag_mask);
1680 t4_write_reg(adap, ULP_RX_ISCSI_PSZ_A, HPZ0_V(pgsz_order[0]) |
1681 HPZ1_V(pgsz_order[1]) | HPZ2_V(pgsz_order[2]) |
1682 HPZ3_V(pgsz_order[3]));
b8ff05a9
DM
1683}
1684EXPORT_SYMBOL(cxgb4_iscsi_init);
1685
3069ee9b
VP
1686int cxgb4_flush_eq_cache(struct net_device *dev)
1687{
1688 struct adapter *adap = netdev2adap(dev);
3069ee9b 1689
736c3b94 1690 return t4_sge_ctxt_flush(adap, adap->mbox, CTXT_EGRESS);
3069ee9b
VP
1691}
1692EXPORT_SYMBOL(cxgb4_flush_eq_cache);
1693
1694static int read_eq_indices(struct adapter *adap, u16 qid, u16 *pidx, u16 *cidx)
1695{
f061de42 1696 u32 addr = t4_read_reg(adap, SGE_DBQ_CTXT_BADDR_A) + 24 * qid + 8;
3069ee9b
VP
1697 __be64 indices;
1698 int ret;
1699
fc5ab020
HS
1700 spin_lock(&adap->win0_lock);
1701 ret = t4_memory_rw(adap, 0, MEM_EDC0, addr,
1702 sizeof(indices), (__be32 *)&indices,
1703 T4_MEMORY_READ);
1704 spin_unlock(&adap->win0_lock);
3069ee9b 1705 if (!ret) {
404d9e3f
VP
1706 *cidx = (be64_to_cpu(indices) >> 25) & 0xffff;
1707 *pidx = (be64_to_cpu(indices) >> 9) & 0xffff;
3069ee9b
VP
1708 }
1709 return ret;
1710}
1711
1712int cxgb4_sync_txq_pidx(struct net_device *dev, u16 qid, u16 pidx,
1713 u16 size)
1714{
1715 struct adapter *adap = netdev2adap(dev);
1716 u16 hw_pidx, hw_cidx;
1717 int ret;
1718
1719 ret = read_eq_indices(adap, qid, &hw_pidx, &hw_cidx);
1720 if (ret)
1721 goto out;
1722
1723 if (pidx != hw_pidx) {
1724 u16 delta;
f612b815 1725 u32 val;
3069ee9b
VP
1726
1727 if (pidx >= hw_pidx)
1728 delta = pidx - hw_pidx;
1729 else
1730 delta = size - hw_pidx + pidx;
f612b815
HS
1731
1732 if (is_t4(adap->params.chip))
1733 val = PIDX_V(delta);
1734 else
1735 val = PIDX_T5_V(delta);
3069ee9b 1736 wmb();
f612b815
HS
1737 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A),
1738 QID_V(qid) | val);
3069ee9b
VP
1739 }
1740out:
1741 return ret;
1742}
1743EXPORT_SYMBOL(cxgb4_sync_txq_pidx);
1744
031cf476
HS
1745int cxgb4_read_tpte(struct net_device *dev, u32 stag, __be32 *tpte)
1746{
6559a7e8 1747 u32 edc0_size, edc1_size, mc0_size, mc1_size, size;
031cf476 1748 u32 edc0_end, edc1_end, mc0_end, mc1_end;
8b4e6b3c
AV
1749 u32 offset, memtype, memaddr;
1750 struct adapter *adap;
1751 u32 hma_size = 0;
031cf476
HS
1752 int ret;
1753
1754 adap = netdev2adap(dev);
1755
1756 offset = ((stag >> 8) * 32) + adap->vres.stag.start;
1757
1758 /* Figure out where the offset lands in the Memory Type/Address scheme.
1759 * This code assumes that the memory is laid out starting at offset 0
1760 * with no breaks as: EDC0, EDC1, MC0, MC1. All cards have both EDC0
1761 * and EDC1. Some cards will have neither MC0 nor MC1, most cards have
1762 * MC0, and some have both MC0 and MC1.
1763 */
6559a7e8
HS
1764 size = t4_read_reg(adap, MA_EDRAM0_BAR_A);
1765 edc0_size = EDRAM0_SIZE_G(size) << 20;
1766 size = t4_read_reg(adap, MA_EDRAM1_BAR_A);
1767 edc1_size = EDRAM1_SIZE_G(size) << 20;
1768 size = t4_read_reg(adap, MA_EXT_MEMORY0_BAR_A);
1769 mc0_size = EXT_MEM0_SIZE_G(size) << 20;
031cf476 1770
8b4e6b3c
AV
1771 if (t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A) & HMA_MUX_F) {
1772 size = t4_read_reg(adap, MA_EXT_MEMORY1_BAR_A);
1773 hma_size = EXT_MEM1_SIZE_G(size) << 20;
1774 }
031cf476
HS
1775 edc0_end = edc0_size;
1776 edc1_end = edc0_end + edc1_size;
1777 mc0_end = edc1_end + mc0_size;
1778
1779 if (offset < edc0_end) {
1780 memtype = MEM_EDC0;
1781 memaddr = offset;
1782 } else if (offset < edc1_end) {
1783 memtype = MEM_EDC1;
1784 memaddr = offset - edc0_end;
1785 } else {
8b4e6b3c
AV
1786 if (hma_size && (offset < (edc1_end + hma_size))) {
1787 memtype = MEM_HMA;
1788 memaddr = offset - edc1_end;
1789 } else if (offset < mc0_end) {
031cf476
HS
1790 memtype = MEM_MC0;
1791 memaddr = offset - edc1_end;
3ccc6cf7 1792 } else if (is_t5(adap->params.chip)) {
6559a7e8
HS
1793 size = t4_read_reg(adap, MA_EXT_MEMORY1_BAR_A);
1794 mc1_size = EXT_MEM1_SIZE_G(size) << 20;
031cf476
HS
1795 mc1_end = mc0_end + mc1_size;
1796 if (offset < mc1_end) {
1797 memtype = MEM_MC1;
1798 memaddr = offset - mc0_end;
1799 } else {
1800 /* offset beyond the end of any memory */
1801 goto err;
1802 }
3ccc6cf7
HS
1803 } else {
1804 /* T4/T6 only has a single memory channel */
1805 goto err;
031cf476
HS
1806 }
1807 }
1808
1809 spin_lock(&adap->win0_lock);
1810 ret = t4_memory_rw(adap, 0, memtype, memaddr, 32, tpte, T4_MEMORY_READ);
1811 spin_unlock(&adap->win0_lock);
1812 return ret;
1813
1814err:
1815 dev_err(adap->pdev_dev, "stag %#x, offset %#x out of range\n",
1816 stag, offset);
1817 return -EINVAL;
1818}
1819EXPORT_SYMBOL(cxgb4_read_tpte);
1820
7730b4c7
HS
1821u64 cxgb4_read_sge_timestamp(struct net_device *dev)
1822{
1823 u32 hi, lo;
1824 struct adapter *adap;
1825
1826 adap = netdev2adap(dev);
f612b815
HS
1827 lo = t4_read_reg(adap, SGE_TIMESTAMP_LO_A);
1828 hi = TSVAL_G(t4_read_reg(adap, SGE_TIMESTAMP_HI_A));
7730b4c7
HS
1829
1830 return ((u64)hi << 32) | (u64)lo;
1831}
1832EXPORT_SYMBOL(cxgb4_read_sge_timestamp);
1833
df64e4d3
HS
1834int cxgb4_bar2_sge_qregs(struct net_device *dev,
1835 unsigned int qid,
1836 enum cxgb4_bar2_qtype qtype,
66cf188e 1837 int user,
df64e4d3
HS
1838 u64 *pbar2_qoffset,
1839 unsigned int *pbar2_qid)
1840{
b2612722 1841 return t4_bar2_sge_qregs(netdev2adap(dev),
df64e4d3
HS
1842 qid,
1843 (qtype == CXGB4_BAR2_QTYPE_EGRESS
1844 ? T4_BAR2_QTYPE_EGRESS
1845 : T4_BAR2_QTYPE_INGRESS),
66cf188e 1846 user,
df64e4d3
HS
1847 pbar2_qoffset,
1848 pbar2_qid);
1849}
1850EXPORT_SYMBOL(cxgb4_bar2_sge_qregs);
1851
b8ff05a9
DM
1852static struct pci_driver cxgb4_driver;
1853
1854static void check_neigh_update(struct neighbour *neigh)
1855{
1856 const struct device *parent;
1857 const struct net_device *netdev = neigh->dev;
1858
d0d7b10b 1859 if (is_vlan_dev(netdev))
b8ff05a9
DM
1860 netdev = vlan_dev_real_dev(netdev);
1861 parent = netdev->dev.parent;
1862 if (parent && parent->driver == &cxgb4_driver.driver)
1863 t4_l2t_update(dev_get_drvdata(parent), neigh);
1864}
1865
1866static int netevent_cb(struct notifier_block *nb, unsigned long event,
1867 void *data)
1868{
1869 switch (event) {
1870 case NETEVENT_NEIGH_UPDATE:
1871 check_neigh_update(data);
1872 break;
b8ff05a9
DM
1873 case NETEVENT_REDIRECT:
1874 default:
1875 break;
1876 }
1877 return 0;
1878}
1879
1880static bool netevent_registered;
1881static struct notifier_block cxgb4_netevent_nb = {
1882 .notifier_call = netevent_cb
1883};
1884
3069ee9b
VP
1885static void drain_db_fifo(struct adapter *adap, int usecs)
1886{
2cc301d2 1887 u32 v1, v2, lp_count, hp_count;
3069ee9b
VP
1888
1889 do {
f061de42
HS
1890 v1 = t4_read_reg(adap, SGE_DBFIFO_STATUS_A);
1891 v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2_A);
d14807dd 1892 if (is_t4(adap->params.chip)) {
f061de42
HS
1893 lp_count = LP_COUNT_G(v1);
1894 hp_count = HP_COUNT_G(v1);
2cc301d2 1895 } else {
f061de42
HS
1896 lp_count = LP_COUNT_T5_G(v1);
1897 hp_count = HP_COUNT_T5_G(v2);
2cc301d2
SR
1898 }
1899
1900 if (lp_count == 0 && hp_count == 0)
1901 break;
3069ee9b
VP
1902 set_current_state(TASK_UNINTERRUPTIBLE);
1903 schedule_timeout(usecs_to_jiffies(usecs));
3069ee9b
VP
1904 } while (1);
1905}
1906
1907static void disable_txq_db(struct sge_txq *q)
1908{
05eb2389
SW
1909 unsigned long flags;
1910
1911 spin_lock_irqsave(&q->db_lock, flags);
3069ee9b 1912 q->db_disabled = 1;
05eb2389 1913 spin_unlock_irqrestore(&q->db_lock, flags);
3069ee9b
VP
1914}
1915
05eb2389 1916static void enable_txq_db(struct adapter *adap, struct sge_txq *q)
3069ee9b
VP
1917{
1918 spin_lock_irq(&q->db_lock);
05eb2389
SW
1919 if (q->db_pidx_inc) {
1920 /* Make sure that all writes to the TX descriptors
1921 * are committed before we tell HW about them.
1922 */
1923 wmb();
f612b815
HS
1924 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A),
1925 QID_V(q->cntxt_id) | PIDX_V(q->db_pidx_inc));
05eb2389
SW
1926 q->db_pidx_inc = 0;
1927 }
3069ee9b
VP
1928 q->db_disabled = 0;
1929 spin_unlock_irq(&q->db_lock);
1930}
1931
1932static void disable_dbs(struct adapter *adap)
1933{
1934 int i;
1935
1936 for_each_ethrxq(&adap->sge, i)
1937 disable_txq_db(&adap->sge.ethtxq[i].q);
ab677ff4
HS
1938 if (is_offload(adap)) {
1939 struct sge_uld_txq_info *txq_info =
1940 adap->sge.uld_txq_info[CXGB4_TX_OFLD];
1941
1942 if (txq_info) {
1943 for_each_ofldtxq(&adap->sge, i) {
1944 struct sge_uld_txq *txq = &txq_info->uldtxq[i];
1945
1946 disable_txq_db(&txq->q);
1947 }
1948 }
1949 }
3069ee9b
VP
1950 for_each_port(adap, i)
1951 disable_txq_db(&adap->sge.ctrlq[i].q);
1952}
1953
1954static void enable_dbs(struct adapter *adap)
1955{
1956 int i;
1957
1958 for_each_ethrxq(&adap->sge, i)
05eb2389 1959 enable_txq_db(adap, &adap->sge.ethtxq[i].q);
ab677ff4
HS
1960 if (is_offload(adap)) {
1961 struct sge_uld_txq_info *txq_info =
1962 adap->sge.uld_txq_info[CXGB4_TX_OFLD];
1963
1964 if (txq_info) {
1965 for_each_ofldtxq(&adap->sge, i) {
1966 struct sge_uld_txq *txq = &txq_info->uldtxq[i];
1967
1968 enable_txq_db(adap, &txq->q);
1969 }
1970 }
1971 }
3069ee9b 1972 for_each_port(adap, i)
05eb2389
SW
1973 enable_txq_db(adap, &adap->sge.ctrlq[i].q);
1974}
1975
1976static void notify_rdma_uld(struct adapter *adap, enum cxgb4_control cmd)
1977{
0fbc81b3
HS
1978 enum cxgb4_uld type = CXGB4_ULD_RDMA;
1979
1980 if (adap->uld && adap->uld[type].handle)
1981 adap->uld[type].control(adap->uld[type].handle, cmd);
05eb2389
SW
1982}
1983
1984static void process_db_full(struct work_struct *work)
1985{
1986 struct adapter *adap;
1987
1988 adap = container_of(work, struct adapter, db_full_task);
1989
1990 drain_db_fifo(adap, dbfifo_drain_delay);
1991 enable_dbs(adap);
1992 notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY);
3ccc6cf7
HS
1993 if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
1994 t4_set_reg_field(adap, SGE_INT_ENABLE3_A,
1995 DBFIFO_HP_INT_F | DBFIFO_LP_INT_F,
1996 DBFIFO_HP_INT_F | DBFIFO_LP_INT_F);
1997 else
1998 t4_set_reg_field(adap, SGE_INT_ENABLE3_A,
1999 DBFIFO_LP_INT_F, DBFIFO_LP_INT_F);
3069ee9b
VP
2000}
2001
2002static void sync_txq_pidx(struct adapter *adap, struct sge_txq *q)
2003{
2004 u16 hw_pidx, hw_cidx;
2005 int ret;
2006
05eb2389 2007 spin_lock_irq(&q->db_lock);
3069ee9b
VP
2008 ret = read_eq_indices(adap, (u16)q->cntxt_id, &hw_pidx, &hw_cidx);
2009 if (ret)
2010 goto out;
2011 if (q->db_pidx != hw_pidx) {
2012 u16 delta;
f612b815 2013 u32 val;
3069ee9b
VP
2014
2015 if (q->db_pidx >= hw_pidx)
2016 delta = q->db_pidx - hw_pidx;
2017 else
2018 delta = q->size - hw_pidx + q->db_pidx;
f612b815
HS
2019
2020 if (is_t4(adap->params.chip))
2021 val = PIDX_V(delta);
2022 else
2023 val = PIDX_T5_V(delta);
3069ee9b 2024 wmb();
f612b815
HS
2025 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A),
2026 QID_V(q->cntxt_id) | val);
3069ee9b
VP
2027 }
2028out:
2029 q->db_disabled = 0;
05eb2389
SW
2030 q->db_pidx_inc = 0;
2031 spin_unlock_irq(&q->db_lock);
3069ee9b
VP
2032 if (ret)
2033 CH_WARN(adap, "DB drop recovery failed.\n");
2034}
0fbc81b3 2035
3069ee9b
VP
2036static void recover_all_queues(struct adapter *adap)
2037{
2038 int i;
2039
2040 for_each_ethrxq(&adap->sge, i)
2041 sync_txq_pidx(adap, &adap->sge.ethtxq[i].q);
ab677ff4
HS
2042 if (is_offload(adap)) {
2043 struct sge_uld_txq_info *txq_info =
2044 adap->sge.uld_txq_info[CXGB4_TX_OFLD];
2045 if (txq_info) {
2046 for_each_ofldtxq(&adap->sge, i) {
2047 struct sge_uld_txq *txq = &txq_info->uldtxq[i];
2048
2049 sync_txq_pidx(adap, &txq->q);
2050 }
2051 }
2052 }
3069ee9b
VP
2053 for_each_port(adap, i)
2054 sync_txq_pidx(adap, &adap->sge.ctrlq[i].q);
2055}
2056
881806bc
VP
2057static void process_db_drop(struct work_struct *work)
2058{
2059 struct adapter *adap;
881806bc 2060
3069ee9b 2061 adap = container_of(work, struct adapter, db_drop_task);
881806bc 2062
d14807dd 2063 if (is_t4(adap->params.chip)) {
05eb2389 2064 drain_db_fifo(adap, dbfifo_drain_delay);
2cc301d2 2065 notify_rdma_uld(adap, CXGB4_CONTROL_DB_DROP);
05eb2389 2066 drain_db_fifo(adap, dbfifo_drain_delay);
2cc301d2 2067 recover_all_queues(adap);
05eb2389 2068 drain_db_fifo(adap, dbfifo_drain_delay);
2cc301d2 2069 enable_dbs(adap);
05eb2389 2070 notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY);
3ccc6cf7 2071 } else if (is_t5(adap->params.chip)) {
2cc301d2
SR
2072 u32 dropped_db = t4_read_reg(adap, 0x010ac);
2073 u16 qid = (dropped_db >> 15) & 0x1ffff;
2074 u16 pidx_inc = dropped_db & 0x1fff;
df64e4d3
HS
2075 u64 bar2_qoffset;
2076 unsigned int bar2_qid;
2077 int ret;
2cc301d2 2078
b2612722 2079 ret = t4_bar2_sge_qregs(adap, qid, T4_BAR2_QTYPE_EGRESS,
e0456717 2080 0, &bar2_qoffset, &bar2_qid);
df64e4d3
HS
2081 if (ret)
2082 dev_err(adap->pdev_dev, "doorbell drop recovery: "
2083 "qid=%d, pidx_inc=%d\n", qid, pidx_inc);
2084 else
f612b815 2085 writel(PIDX_T5_V(pidx_inc) | QID_V(bar2_qid),
df64e4d3 2086 adap->bar2 + bar2_qoffset + SGE_UDB_KDOORBELL);
2cc301d2
SR
2087
2088 /* Re-enable BAR2 WC */
2089 t4_set_reg_field(adap, 0x10b0, 1<<15, 1<<15);
2090 }
2091
3ccc6cf7
HS
2092 if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
2093 t4_set_reg_field(adap, SGE_DOORBELL_CONTROL_A, DROPPED_DB_F, 0);
881806bc
VP
2094}
2095
2096void t4_db_full(struct adapter *adap)
2097{
d14807dd 2098 if (is_t4(adap->params.chip)) {
05eb2389
SW
2099 disable_dbs(adap);
2100 notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL);
f612b815
HS
2101 t4_set_reg_field(adap, SGE_INT_ENABLE3_A,
2102 DBFIFO_HP_INT_F | DBFIFO_LP_INT_F, 0);
29aaee65 2103 queue_work(adap->workq, &adap->db_full_task);
2cc301d2 2104 }
881806bc
VP
2105}
2106
2107void t4_db_dropped(struct adapter *adap)
2108{
05eb2389
SW
2109 if (is_t4(adap->params.chip)) {
2110 disable_dbs(adap);
2111 notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL);
2112 }
29aaee65 2113 queue_work(adap->workq, &adap->db_drop_task);
881806bc
VP
2114}
2115
0fbc81b3
HS
2116void t4_register_netevent_notifier(void)
2117{
b8ff05a9
DM
2118 if (!netevent_registered) {
2119 register_netevent_notifier(&cxgb4_netevent_nb);
2120 netevent_registered = true;
2121 }
b8ff05a9
DM
2122}
2123
2124static void detach_ulds(struct adapter *adap)
2125{
2126 unsigned int i;
2127
2128 mutex_lock(&uld_mutex);
2129 list_del(&adap->list_node);
6a146f3a 2130
b8ff05a9 2131 for (i = 0; i < CXGB4_ULD_MAX; i++)
6a146f3a 2132 if (adap->uld && adap->uld[i].handle)
94cdb8bb
HS
2133 adap->uld[i].state_change(adap->uld[i].handle,
2134 CXGB4_STATE_DETACH);
6a146f3a 2135
b8ff05a9
DM
2136 if (netevent_registered && list_empty(&adapter_list)) {
2137 unregister_netevent_notifier(&cxgb4_netevent_nb);
2138 netevent_registered = false;
2139 }
2140 mutex_unlock(&uld_mutex);
2141}
2142
2143static void notify_ulds(struct adapter *adap, enum cxgb4_state new_state)
2144{
2145 unsigned int i;
2146
2147 mutex_lock(&uld_mutex);
2148 for (i = 0; i < CXGB4_ULD_MAX; i++)
94cdb8bb
HS
2149 if (adap->uld && adap->uld[i].handle)
2150 adap->uld[i].state_change(adap->uld[i].handle,
2151 new_state);
b8ff05a9
DM
2152 mutex_unlock(&uld_mutex);
2153}
2154
1bb60376 2155#if IS_ENABLED(CONFIG_IPV6)
b5a02f50
AB
2156static int cxgb4_inet6addr_handler(struct notifier_block *this,
2157 unsigned long event, void *data)
01bcca68 2158{
b5a02f50
AB
2159 struct inet6_ifaddr *ifa = data;
2160 struct net_device *event_dev = ifa->idev->dev;
2161 const struct device *parent = NULL;
2162#if IS_ENABLED(CONFIG_BONDING)
01bcca68 2163 struct adapter *adap;
b5a02f50 2164#endif
d0d7b10b 2165 if (is_vlan_dev(event_dev))
b5a02f50
AB
2166 event_dev = vlan_dev_real_dev(event_dev);
2167#if IS_ENABLED(CONFIG_BONDING)
2168 if (event_dev->flags & IFF_MASTER) {
2169 list_for_each_entry(adap, &adapter_list, list_node) {
2170 switch (event) {
2171 case NETDEV_UP:
2172 cxgb4_clip_get(adap->port[0],
2173 (const u32 *)ifa, 1);
2174 break;
2175 case NETDEV_DOWN:
2176 cxgb4_clip_release(adap->port[0],
2177 (const u32 *)ifa, 1);
2178 break;
2179 default:
2180 break;
2181 }
2182 }
2183 return NOTIFY_OK;
2184 }
2185#endif
01bcca68 2186
b5a02f50
AB
2187 if (event_dev)
2188 parent = event_dev->dev.parent;
01bcca68 2189
b5a02f50 2190 if (parent && parent->driver == &cxgb4_driver.driver) {
01bcca68
VP
2191 switch (event) {
2192 case NETDEV_UP:
b5a02f50 2193 cxgb4_clip_get(event_dev, (const u32 *)ifa, 1);
01bcca68
VP
2194 break;
2195 case NETDEV_DOWN:
b5a02f50 2196 cxgb4_clip_release(event_dev, (const u32 *)ifa, 1);
01bcca68
VP
2197 break;
2198 default:
2199 break;
2200 }
2201 }
b5a02f50 2202 return NOTIFY_OK;
01bcca68
VP
2203}
2204
b5a02f50 2205static bool inet6addr_registered;
01bcca68
VP
2206static struct notifier_block cxgb4_inet6addr_notifier = {
2207 .notifier_call = cxgb4_inet6addr_handler
2208};
2209
01bcca68
VP
2210static void update_clip(const struct adapter *adap)
2211{
2212 int i;
2213 struct net_device *dev;
2214 int ret;
2215
2216 rcu_read_lock();
2217
2218 for (i = 0; i < MAX_NPORTS; i++) {
2219 dev = adap->port[i];
2220 ret = 0;
2221
2222 if (dev)
b5a02f50 2223 ret = cxgb4_update_root_dev_clip(dev);
01bcca68
VP
2224
2225 if (ret < 0)
2226 break;
2227 }
2228 rcu_read_unlock();
2229}
1bb60376 2230#endif /* IS_ENABLED(CONFIG_IPV6) */
01bcca68 2231
b8ff05a9
DM
2232/**
2233 * cxgb_up - enable the adapter
2234 * @adap: adapter being enabled
2235 *
2236 * Called when the first port is enabled, this function performs the
2237 * actions necessary to make an adapter operational, such as completing
2238 * the initialization of HW modules, and enabling interrupts.
2239 *
2240 * Must be called with the rtnl lock held.
2241 */
2242static int cxgb_up(struct adapter *adap)
2243{
aaefae9b 2244 int err;
b8ff05a9 2245
91060381 2246 mutex_lock(&uld_mutex);
aaefae9b
DM
2247 err = setup_sge_queues(adap);
2248 if (err)
91060381 2249 goto rel_lock;
aaefae9b
DM
2250 err = setup_rss(adap);
2251 if (err)
2252 goto freeq;
b8ff05a9
DM
2253
2254 if (adap->flags & USING_MSIX) {
aaefae9b 2255 name_msix_vecs(adap);
b8ff05a9
DM
2256 err = request_irq(adap->msix_info[0].vec, t4_nondata_intr, 0,
2257 adap->msix_info[0].desc, adap);
2258 if (err)
2259 goto irq_err;
b8ff05a9
DM
2260 err = request_msix_queue_irqs(adap);
2261 if (err) {
2262 free_irq(adap->msix_info[0].vec, adap);
2263 goto irq_err;
2264 }
2265 } else {
2266 err = request_irq(adap->pdev->irq, t4_intr_handler(adap),
2267 (adap->flags & USING_MSI) ? 0 : IRQF_SHARED,
b1a3c2b6 2268 adap->port[0]->name, adap);
b8ff05a9
DM
2269 if (err)
2270 goto irq_err;
2271 }
e7519f99 2272
b8ff05a9
DM
2273 enable_rx(adap);
2274 t4_sge_start(adap);
2275 t4_intr_enable(adap);
aaefae9b 2276 adap->flags |= FULL_INIT_DONE;
e7519f99
GG
2277 mutex_unlock(&uld_mutex);
2278
b8ff05a9 2279 notify_ulds(adap, CXGB4_STATE_UP);
1bb60376 2280#if IS_ENABLED(CONFIG_IPV6)
01bcca68 2281 update_clip(adap);
1bb60376 2282#endif
fc08a01a
HS
2283 /* Initialize hash mac addr list*/
2284 INIT_LIST_HEAD(&adap->mac_hlist);
b8ff05a9 2285 return err;
91060381 2286
b8ff05a9
DM
2287 irq_err:
2288 dev_err(adap->pdev_dev, "request_irq failed, err %d\n", err);
aaefae9b
DM
2289 freeq:
2290 t4_free_sge_resources(adap);
91060381
RR
2291 rel_lock:
2292 mutex_unlock(&uld_mutex);
2293 return err;
b8ff05a9
DM
2294}
2295
2296static void cxgb_down(struct adapter *adapter)
2297{
b8ff05a9 2298 cancel_work_sync(&adapter->tid_release_task);
881806bc
VP
2299 cancel_work_sync(&adapter->db_full_task);
2300 cancel_work_sync(&adapter->db_drop_task);
b8ff05a9 2301 adapter->tid_release_task_busy = false;
204dc3c0 2302 adapter->tid_release_head = NULL;
b8ff05a9 2303
aaefae9b
DM
2304 t4_sge_stop(adapter);
2305 t4_free_sge_resources(adapter);
2306 adapter->flags &= ~FULL_INIT_DONE;
b8ff05a9
DM
2307}
2308
2309/*
2310 * net_device operations
2311 */
2312static int cxgb_open(struct net_device *dev)
2313{
2314 int err;
2315 struct port_info *pi = netdev_priv(dev);
2316 struct adapter *adapter = pi->adapter;
2317
6a3c869a
DM
2318 netif_carrier_off(dev);
2319
aaefae9b
DM
2320 if (!(adapter->flags & FULL_INIT_DONE)) {
2321 err = cxgb_up(adapter);
2322 if (err < 0)
2323 return err;
2324 }
b8ff05a9 2325
2061ec3f
GG
2326 /* It's possible that the basic port information could have
2327 * changed since we first read it.
2328 */
2329 err = t4_update_port_info(pi);
2330 if (err < 0)
2331 return err;
2332
f68707b8
DM
2333 err = link_start(dev);
2334 if (!err)
2335 netif_tx_start_all_queues(dev);
2336 return err;
b8ff05a9
DM
2337}
2338
2339static int cxgb_close(struct net_device *dev)
2340{
b8ff05a9
DM
2341 struct port_info *pi = netdev_priv(dev);
2342 struct adapter *adapter = pi->adapter;
ba581f77 2343 int ret;
b8ff05a9
DM
2344
2345 netif_tx_stop_all_queues(dev);
2346 netif_carrier_off(dev);
ba581f77
GG
2347 ret = t4_enable_vi(adapter, adapter->pf, pi->viid, false, false);
2348#ifdef CONFIG_CHELSIO_T4_DCB
2349 cxgb4_dcb_reset(dev);
2350 dcb_tx_queue_prio_enable(dev, false);
2351#endif
2352 return ret;
b8ff05a9
DM
2353}
2354
dca4faeb 2355int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid,
793dad94
VP
2356 __be32 sip, __be16 sport, __be16 vlan,
2357 unsigned int queue, unsigned char port, unsigned char mask)
dca4faeb
VP
2358{
2359 int ret;
2360 struct filter_entry *f;
2361 struct adapter *adap;
2362 int i;
2363 u8 *val;
2364
2365 adap = netdev2adap(dev);
2366
1cab775c 2367 /* Adjust stid to correct filter index */
470c60c4 2368 stid -= adap->tids.sftid_base;
1cab775c
VP
2369 stid += adap->tids.nftids;
2370
dca4faeb
VP
2371 /* Check to make sure the filter requested is writable ...
2372 */
2373 f = &adap->tids.ftid_tab[stid];
2374 ret = writable_filter(f);
2375 if (ret)
2376 return ret;
2377
2378 /* Clear out any old resources being used by the filter before
2379 * we start constructing the new filter.
2380 */
2381 if (f->valid)
2382 clear_filter(adap, f);
2383
2384 /* Clear out filter specifications */
2385 memset(&f->fs, 0, sizeof(struct ch_filter_specification));
2386 f->fs.val.lport = cpu_to_be16(sport);
2387 f->fs.mask.lport = ~0;
2388 val = (u8 *)&sip;
793dad94 2389 if ((val[0] | val[1] | val[2] | val[3]) != 0) {
dca4faeb
VP
2390 for (i = 0; i < 4; i++) {
2391 f->fs.val.lip[i] = val[i];
2392 f->fs.mask.lip[i] = ~0;
2393 }
0d804338 2394 if (adap->params.tp.vlan_pri_map & PORT_F) {
793dad94
VP
2395 f->fs.val.iport = port;
2396 f->fs.mask.iport = mask;
2397 }
2398 }
dca4faeb 2399
0d804338 2400 if (adap->params.tp.vlan_pri_map & PROTOCOL_F) {
7c89e555
KS
2401 f->fs.val.proto = IPPROTO_TCP;
2402 f->fs.mask.proto = ~0;
2403 }
2404
dca4faeb
VP
2405 f->fs.dirsteer = 1;
2406 f->fs.iq = queue;
2407 /* Mark filter as locked */
2408 f->locked = 1;
2409 f->fs.rpttid = 1;
2410
6b254afd
GG
2411 /* Save the actual tid. We need this to get the corresponding
2412 * filter entry structure in filter_rpl.
2413 */
2414 f->tid = stid + adap->tids.ftid_base;
dca4faeb
VP
2415 ret = set_filter_wr(adap, stid);
2416 if (ret) {
2417 clear_filter(adap, f);
2418 return ret;
2419 }
2420
2421 return 0;
2422}
2423EXPORT_SYMBOL(cxgb4_create_server_filter);
2424
2425int cxgb4_remove_server_filter(const struct net_device *dev, unsigned int stid,
2426 unsigned int queue, bool ipv6)
2427{
dca4faeb
VP
2428 struct filter_entry *f;
2429 struct adapter *adap;
2430
2431 adap = netdev2adap(dev);
1cab775c
VP
2432
2433 /* Adjust stid to correct filter index */
470c60c4 2434 stid -= adap->tids.sftid_base;
1cab775c
VP
2435 stid += adap->tids.nftids;
2436
dca4faeb
VP
2437 f = &adap->tids.ftid_tab[stid];
2438 /* Unlock the filter */
2439 f->locked = 0;
2440
8c14846d 2441 return delete_filter(adap, stid);
dca4faeb
VP
2442}
2443EXPORT_SYMBOL(cxgb4_remove_server_filter);
2444
bc1f4470 2445static void cxgb_get_stats(struct net_device *dev,
2446 struct rtnl_link_stats64 *ns)
b8ff05a9
DM
2447{
2448 struct port_stats stats;
2449 struct port_info *p = netdev_priv(dev);
2450 struct adapter *adapter = p->adapter;
b8ff05a9 2451
9fe6cb58
GS
2452 /* Block retrieving statistics during EEH error
2453 * recovery. Otherwise, the recovery might fail
2454 * and the PCI device will be removed permanently
2455 */
b8ff05a9 2456 spin_lock(&adapter->stats_lock);
9fe6cb58
GS
2457 if (!netif_device_present(dev)) {
2458 spin_unlock(&adapter->stats_lock);
bc1f4470 2459 return;
9fe6cb58 2460 }
a4cfd929
HS
2461 t4_get_port_stats_offset(adapter, p->tx_chan, &stats,
2462 &p->stats_base);
b8ff05a9
DM
2463 spin_unlock(&adapter->stats_lock);
2464
2465 ns->tx_bytes = stats.tx_octets;
2466 ns->tx_packets = stats.tx_frames;
2467 ns->rx_bytes = stats.rx_octets;
2468 ns->rx_packets = stats.rx_frames;
2469 ns->multicast = stats.rx_mcast_frames;
2470
2471 /* detailed rx_errors */
2472 ns->rx_length_errors = stats.rx_jabber + stats.rx_too_long +
2473 stats.rx_runt;
2474 ns->rx_over_errors = 0;
2475 ns->rx_crc_errors = stats.rx_fcs_err;
2476 ns->rx_frame_errors = stats.rx_symbol_err;
b93f79be 2477 ns->rx_dropped = stats.rx_ovflow0 + stats.rx_ovflow1 +
b8ff05a9
DM
2478 stats.rx_ovflow2 + stats.rx_ovflow3 +
2479 stats.rx_trunc0 + stats.rx_trunc1 +
2480 stats.rx_trunc2 + stats.rx_trunc3;
2481 ns->rx_missed_errors = 0;
2482
2483 /* detailed tx_errors */
2484 ns->tx_aborted_errors = 0;
2485 ns->tx_carrier_errors = 0;
2486 ns->tx_fifo_errors = 0;
2487 ns->tx_heartbeat_errors = 0;
2488 ns->tx_window_errors = 0;
2489
2490 ns->tx_errors = stats.tx_error_frames;
2491 ns->rx_errors = stats.rx_symbol_err + stats.rx_fcs_err +
2492 ns->rx_length_errors + stats.rx_len_err + ns->rx_fifo_errors;
b8ff05a9
DM
2493}
2494
2495static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
2496{
060e0c75 2497 unsigned int mbox;
b8ff05a9
DM
2498 int ret = 0, prtad, devad;
2499 struct port_info *pi = netdev_priv(dev);
a4569504 2500 struct adapter *adapter = pi->adapter;
b8ff05a9
DM
2501 struct mii_ioctl_data *data = (struct mii_ioctl_data *)&req->ifr_data;
2502
2503 switch (cmd) {
2504 case SIOCGMIIPHY:
2505 if (pi->mdio_addr < 0)
2506 return -EOPNOTSUPP;
2507 data->phy_id = pi->mdio_addr;
2508 break;
2509 case SIOCGMIIREG:
2510 case SIOCSMIIREG:
2511 if (mdio_phy_id_is_c45(data->phy_id)) {
2512 prtad = mdio_phy_id_prtad(data->phy_id);
2513 devad = mdio_phy_id_devad(data->phy_id);
2514 } else if (data->phy_id < 32) {
2515 prtad = data->phy_id;
2516 devad = 0;
2517 data->reg_num &= 0x1f;
2518 } else
2519 return -EINVAL;
2520
b2612722 2521 mbox = pi->adapter->pf;
b8ff05a9 2522 if (cmd == SIOCGMIIREG)
060e0c75 2523 ret = t4_mdio_rd(pi->adapter, mbox, prtad, devad,
b8ff05a9
DM
2524 data->reg_num, &data->val_out);
2525 else
060e0c75 2526 ret = t4_mdio_wr(pi->adapter, mbox, prtad, devad,
b8ff05a9
DM
2527 data->reg_num, data->val_in);
2528 break;
5e2a5ebc
HS
2529 case SIOCGHWTSTAMP:
2530 return copy_to_user(req->ifr_data, &pi->tstamp_config,
2531 sizeof(pi->tstamp_config)) ?
2532 -EFAULT : 0;
2533 case SIOCSHWTSTAMP:
2534 if (copy_from_user(&pi->tstamp_config, req->ifr_data,
2535 sizeof(pi->tstamp_config)))
2536 return -EFAULT;
2537
a4569504
AG
2538 if (!is_t4(adapter->params.chip)) {
2539 switch (pi->tstamp_config.tx_type) {
2540 case HWTSTAMP_TX_OFF:
2541 case HWTSTAMP_TX_ON:
2542 break;
2543 default:
2544 return -ERANGE;
2545 }
2546
2547 switch (pi->tstamp_config.rx_filter) {
2548 case HWTSTAMP_FILTER_NONE:
2549 pi->rxtstamp = false;
2550 break;
2551 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
2552 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
2553 cxgb4_ptprx_timestamping(pi, pi->port_id,
2554 PTP_TS_L4);
2555 break;
2556 case HWTSTAMP_FILTER_PTP_V2_EVENT:
2557 cxgb4_ptprx_timestamping(pi, pi->port_id,
2558 PTP_TS_L2_L4);
2559 break;
2560 case HWTSTAMP_FILTER_ALL:
2561 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
2562 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
2563 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
2564 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
2565 pi->rxtstamp = true;
2566 break;
2567 default:
2568 pi->tstamp_config.rx_filter =
2569 HWTSTAMP_FILTER_NONE;
2570 return -ERANGE;
2571 }
2572
2573 if ((pi->tstamp_config.tx_type == HWTSTAMP_TX_OFF) &&
2574 (pi->tstamp_config.rx_filter ==
2575 HWTSTAMP_FILTER_NONE)) {
2576 if (cxgb4_ptp_txtype(adapter, pi->port_id) >= 0)
2577 pi->ptp_enable = false;
2578 }
2579
2580 if (pi->tstamp_config.rx_filter !=
2581 HWTSTAMP_FILTER_NONE) {
2582 if (cxgb4_ptp_redirect_rx_packet(adapter,
2583 pi) >= 0)
2584 pi->ptp_enable = true;
2585 }
2586 } else {
2587 /* For T4 Adapters */
2588 switch (pi->tstamp_config.rx_filter) {
2589 case HWTSTAMP_FILTER_NONE:
5e2a5ebc
HS
2590 pi->rxtstamp = false;
2591 break;
a4569504 2592 case HWTSTAMP_FILTER_ALL:
5e2a5ebc
HS
2593 pi->rxtstamp = true;
2594 break;
a4569504
AG
2595 default:
2596 pi->tstamp_config.rx_filter =
2597 HWTSTAMP_FILTER_NONE;
5e2a5ebc 2598 return -ERANGE;
a4569504 2599 }
5e2a5ebc 2600 }
5e2a5ebc
HS
2601 return copy_to_user(req->ifr_data, &pi->tstamp_config,
2602 sizeof(pi->tstamp_config)) ?
2603 -EFAULT : 0;
b8ff05a9
DM
2604 default:
2605 return -EOPNOTSUPP;
2606 }
2607 return ret;
2608}
2609
2610static void cxgb_set_rxmode(struct net_device *dev)
2611{
2612 /* unfortunately we can't return errors to the stack */
2613 set_rxmode(dev, -1, false);
2614}
2615
2616static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
2617{
2618 int ret;
2619 struct port_info *pi = netdev_priv(dev);
2620
b2612722 2621 ret = t4_set_rxmode(pi->adapter, pi->adapter->pf, pi->viid, new_mtu, -1,
060e0c75 2622 -1, -1, -1, true);
b8ff05a9
DM
2623 if (!ret)
2624 dev->mtu = new_mtu;
2625 return ret;
2626}
2627
858aa65c 2628#ifdef CONFIG_PCI_IOV
baf50868 2629static int cxgb4_mgmt_open(struct net_device *dev)
e7b48a32
HS
2630{
2631 /* Turn carrier off since we don't have to transmit anything on this
2632 * interface.
2633 */
2634 netif_carrier_off(dev);
2635 return 0;
2636}
2637
661dbeb9 2638/* Fill MAC address that will be assigned by the FW */
baf50868 2639static void cxgb4_mgmt_fill_vf_station_mac_addr(struct adapter *adap)
661dbeb9 2640{
661dbeb9 2641 u8 hw_addr[ETH_ALEN], macaddr[ETH_ALEN];
baf50868
GG
2642 unsigned int i, vf, nvfs;
2643 u16 a, b;
661dbeb9
HS
2644 int err;
2645 u8 *na;
661dbeb9 2646
baf50868
GG
2647 adap->params.pci.vpd_cap_addr = pci_find_capability(adap->pdev,
2648 PCI_CAP_ID_VPD);
661dbeb9 2649 err = t4_get_raw_vpd_params(adap, &adap->params.vpd);
baf50868
GG
2650 if (err)
2651 return;
2652
2653 na = adap->params.vpd.na;
2654 for (i = 0; i < ETH_ALEN; i++)
2655 hw_addr[i] = (hex2val(na[2 * i + 0]) * 16 +
2656 hex2val(na[2 * i + 1]));
2657
2658 a = (hw_addr[0] << 8) | hw_addr[1];
2659 b = (hw_addr[1] << 8) | hw_addr[2];
2660 a ^= b;
2661 a |= 0x0200; /* locally assigned Ethernet MAC address */
2662 a &= ~0x0100; /* not a multicast Ethernet MAC address */
2663 macaddr[0] = a >> 8;
2664 macaddr[1] = a & 0xff;
2665
2666 for (i = 2; i < 5; i++)
2667 macaddr[i] = hw_addr[i + 1];
2668
2669 for (vf = 0, nvfs = pci_sriov_get_totalvfs(adap->pdev);
2670 vf < nvfs; vf++) {
2671 macaddr[5] = adap->pf * 16 + vf;
2672 ether_addr_copy(adap->vfinfo[vf].vf_mac_addr, macaddr);
661dbeb9
HS
2673 }
2674}
2675
baf50868 2676static int cxgb4_mgmt_set_vf_mac(struct net_device *dev, int vf, u8 *mac)
858aa65c
HS
2677{
2678 struct port_info *pi = netdev_priv(dev);
2679 struct adapter *adap = pi->adapter;
661dbeb9 2680 int ret;
858aa65c
HS
2681
2682 /* verify MAC addr is valid */
2683 if (!is_valid_ether_addr(mac)) {
2684 dev_err(pi->adapter->pdev_dev,
2685 "Invalid Ethernet address %pM for VF %d\n",
2686 mac, vf);
2687 return -EINVAL;
2688 }
2689
2690 dev_info(pi->adapter->pdev_dev,
2691 "Setting MAC %pM on VF %d\n", mac, vf);
661dbeb9
HS
2692 ret = t4_set_vf_mac_acl(adap, vf + 1, 1, mac);
2693 if (!ret)
2694 ether_addr_copy(adap->vfinfo[vf].vf_mac_addr, mac);
2695 return ret;
2696}
2697
baf50868
GG
2698static int cxgb4_mgmt_get_vf_config(struct net_device *dev,
2699 int vf, struct ifla_vf_info *ivi)
661dbeb9
HS
2700{
2701 struct port_info *pi = netdev_priv(dev);
2702 struct adapter *adap = pi->adapter;
bd79acee 2703 struct vf_info *vfinfo;
661dbeb9
HS
2704
2705 if (vf >= adap->num_vfs)
2706 return -EINVAL;
bd79acee
AV
2707 vfinfo = &adap->vfinfo[vf];
2708
661dbeb9 2709 ivi->vf = vf;
bd79acee 2710 ivi->max_tx_rate = vfinfo->tx_rate;
8ea4fae9 2711 ivi->min_tx_rate = 0;
bd79acee
AV
2712 ether_addr_copy(ivi->mac, vfinfo->vf_mac_addr);
2713 ivi->vlan = vfinfo->vlan;
661dbeb9 2714 return 0;
858aa65c 2715}
96fe11f2 2716
baf50868
GG
2717static int cxgb4_mgmt_get_phys_port_id(struct net_device *dev,
2718 struct netdev_phys_item_id *ppid)
96fe11f2
GG
2719{
2720 struct port_info *pi = netdev_priv(dev);
2721 unsigned int phy_port_id;
2722
2723 phy_port_id = pi->adapter->adap_idx * 10 + pi->port_id;
2724 ppid->id_len = sizeof(phy_port_id);
2725 memcpy(ppid->id, &phy_port_id, ppid->id_len);
2726 return 0;
2727}
2728
baf50868
GG
2729static int cxgb4_mgmt_set_vf_rate(struct net_device *dev, int vf,
2730 int min_tx_rate, int max_tx_rate)
8ea4fae9
GG
2731{
2732 struct port_info *pi = netdev_priv(dev);
2733 struct adapter *adap = pi->adapter;
c3168cab 2734 unsigned int link_ok, speed, mtu;
8ea4fae9
GG
2735 u32 fw_pfvf, fw_class;
2736 int class_id = vf;
c3168cab 2737 int ret;
8ea4fae9
GG
2738 u16 pktsize;
2739
2740 if (vf >= adap->num_vfs)
2741 return -EINVAL;
2742
2743 if (min_tx_rate) {
2744 dev_err(adap->pdev_dev,
2745 "Min tx rate (%d) (> 0) for VF %d is Invalid.\n",
2746 min_tx_rate, vf);
2747 return -EINVAL;
2748 }
c3168cab
GG
2749
2750 ret = t4_get_link_params(pi, &link_ok, &speed, &mtu);
8ea4fae9
GG
2751 if (ret != FW_SUCCESS) {
2752 dev_err(adap->pdev_dev,
c3168cab 2753 "Failed to get link information for VF %d\n", vf);
8ea4fae9
GG
2754 return -EINVAL;
2755 }
c3168cab 2756
8ea4fae9
GG
2757 if (!link_ok) {
2758 dev_err(adap->pdev_dev, "Link down for VF %d\n", vf);
2759 return -EINVAL;
2760 }
8ea4fae9
GG
2761
2762 if (max_tx_rate > speed) {
2763 dev_err(adap->pdev_dev,
2764 "Max tx rate %d for VF %d can't be > link-speed %u",
2765 max_tx_rate, vf, speed);
2766 return -EINVAL;
2767 }
c3168cab
GG
2768
2769 pktsize = mtu;
8ea4fae9
GG
2770 /* subtract ethhdr size and 4 bytes crc since, f/w appends it */
2771 pktsize = pktsize - sizeof(struct ethhdr) - 4;
2772 /* subtract ipv4 hdr size, tcp hdr size to get typical IPv4 MSS size */
2773 pktsize = pktsize - sizeof(struct iphdr) - sizeof(struct tcphdr);
2774 /* configure Traffic Class for rate-limiting */
2775 ret = t4_sched_params(adap, SCHED_CLASS_TYPE_PACKET,
2776 SCHED_CLASS_LEVEL_CL_RL,
2777 SCHED_CLASS_MODE_CLASS,
2778 SCHED_CLASS_RATEUNIT_BITS,
2779 SCHED_CLASS_RATEMODE_ABS,
c3168cab 2780 pi->tx_chan, class_id, 0,
8ea4fae9
GG
2781 max_tx_rate * 1000, 0, pktsize);
2782 if (ret) {
2783 dev_err(adap->pdev_dev, "Err %d for Traffic Class config\n",
2784 ret);
2785 return -EINVAL;
2786 }
2787 dev_info(adap->pdev_dev,
2788 "Class %d with MSS %u configured with rate %u\n",
2789 class_id, pktsize, max_tx_rate);
2790
2791 /* bind VF to configured Traffic Class */
2792 fw_pfvf = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) |
2793 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_SCHEDCLASS_ETH));
2794 fw_class = class_id;
2795 ret = t4_set_params(adap, adap->mbox, adap->pf, vf + 1, 1, &fw_pfvf,
2796 &fw_class);
2797 if (ret) {
2798 dev_err(adap->pdev_dev,
2799 "Err %d in binding VF %d to Traffic Class %d\n",
2800 ret, vf, class_id);
2801 return -EINVAL;
2802 }
2803 dev_info(adap->pdev_dev, "PF %d VF %d is bound to Class %d\n",
2804 adap->pf, vf, class_id);
2805 adap->vfinfo[vf].tx_rate = max_tx_rate;
2806 return 0;
2807}
2808
9d5fd927
GG
2809static int cxgb4_mgmt_set_vf_vlan(struct net_device *dev, int vf,
2810 u16 vlan, u8 qos, __be16 vlan_proto)
2811{
2812 struct port_info *pi = netdev_priv(dev);
2813 struct adapter *adap = pi->adapter;
2814 int ret;
2815
2816 if (vf >= adap->num_vfs || vlan > 4095 || qos > 7)
2817 return -EINVAL;
2818
2819 if (vlan_proto != htons(ETH_P_8021Q) || qos != 0)
2820 return -EPROTONOSUPPORT;
2821
2822 ret = t4_set_vlan_acl(adap, adap->mbox, vf + 1, vlan);
2823 if (!ret) {
2824 adap->vfinfo[vf].vlan = vlan;
2825 return 0;
2826 }
2827
2828 dev_err(adap->pdev_dev, "Err %d %s VLAN ACL for PF/VF %d/%d\n",
2829 ret, (vlan ? "setting" : "clearing"), adap->pf, vf);
2830 return ret;
2831}
2832#endif /* CONFIG_PCI_IOV */
858aa65c 2833
b8ff05a9
DM
2834static int cxgb_set_mac_addr(struct net_device *dev, void *p)
2835{
2836 int ret;
2837 struct sockaddr *addr = p;
2838 struct port_info *pi = netdev_priv(dev);
2839
2840 if (!is_valid_ether_addr(addr->sa_data))
504f9b5a 2841 return -EADDRNOTAVAIL;
b8ff05a9 2842
b2612722 2843 ret = t4_change_mac(pi->adapter, pi->adapter->pf, pi->viid,
060e0c75 2844 pi->xact_addr_filt, addr->sa_data, true, true);
b8ff05a9
DM
2845 if (ret < 0)
2846 return ret;
2847
2848 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
2849 pi->xact_addr_filt = ret;
2850 return 0;
2851}
2852
b8ff05a9
DM
2853#ifdef CONFIG_NET_POLL_CONTROLLER
2854static void cxgb_netpoll(struct net_device *dev)
2855{
2856 struct port_info *pi = netdev_priv(dev);
2857 struct adapter *adap = pi->adapter;
2858
2859 if (adap->flags & USING_MSIX) {
2860 int i;
2861 struct sge_eth_rxq *rx = &adap->sge.ethrxq[pi->first_qset];
2862
2863 for (i = pi->nqsets; i; i--, rx++)
2864 t4_sge_intr_msix(0, &rx->rspq);
2865 } else
2866 t4_intr_handler(adap)(0, adap);
2867}
2868#endif
2869
10a2604e
RL
2870static int cxgb_set_tx_maxrate(struct net_device *dev, int index, u32 rate)
2871{
2872 struct port_info *pi = netdev_priv(dev);
2873 struct adapter *adap = pi->adapter;
2874 struct sched_class *e;
2875 struct ch_sched_params p;
2876 struct ch_sched_queue qe;
2877 u32 req_rate;
2878 int err = 0;
2879
2880 if (!can_sched(dev))
2881 return -ENOTSUPP;
2882
2883 if (index < 0 || index > pi->nqsets - 1)
2884 return -EINVAL;
2885
2886 if (!(adap->flags & FULL_INIT_DONE)) {
2887 dev_err(adap->pdev_dev,
2888 "Failed to rate limit on queue %d. Link Down?\n",
2889 index);
2890 return -EINVAL;
2891 }
2892
2893 /* Convert from Mbps to Kbps */
b3c594ab 2894 req_rate = rate * 1000;
10a2604e 2895
d185efc1 2896 /* Max rate is 100 Gbps */
b3c594ab 2897 if (req_rate > SCHED_MAX_RATE_KBPS) {
10a2604e 2898 dev_err(adap->pdev_dev,
d185efc1 2899 "Invalid rate %u Mbps, Max rate is %u Mbps\n",
b3c594ab 2900 rate, SCHED_MAX_RATE_KBPS / 1000);
10a2604e
RL
2901 return -ERANGE;
2902 }
2903
2904 /* First unbind the queue from any existing class */
2905 memset(&qe, 0, sizeof(qe));
2906 qe.queue = index;
2907 qe.class = SCHED_CLS_NONE;
2908
2909 err = cxgb4_sched_class_unbind(dev, (void *)(&qe), SCHED_QUEUE);
2910 if (err) {
2911 dev_err(adap->pdev_dev,
2912 "Unbinding Queue %d on port %d fail. Err: %d\n",
2913 index, pi->port_id, err);
2914 return err;
2915 }
2916
2917 /* Queue already unbound */
2918 if (!req_rate)
2919 return 0;
2920
2921 /* Fetch any available unused or matching scheduling class */
2922 memset(&p, 0, sizeof(p));
2923 p.type = SCHED_CLASS_TYPE_PACKET;
2924 p.u.params.level = SCHED_CLASS_LEVEL_CL_RL;
2925 p.u.params.mode = SCHED_CLASS_MODE_CLASS;
2926 p.u.params.rateunit = SCHED_CLASS_RATEUNIT_BITS;
2927 p.u.params.ratemode = SCHED_CLASS_RATEMODE_ABS;
2928 p.u.params.channel = pi->tx_chan;
2929 p.u.params.class = SCHED_CLS_NONE;
2930 p.u.params.minrate = 0;
2931 p.u.params.maxrate = req_rate;
2932 p.u.params.weight = 0;
2933 p.u.params.pktsize = dev->mtu;
2934
2935 e = cxgb4_sched_class_alloc(dev, &p);
2936 if (!e)
2937 return -ENOMEM;
2938
2939 /* Bind the queue to a scheduling class */
2940 memset(&qe, 0, sizeof(qe));
2941 qe.queue = index;
2942 qe.class = e->idx;
2943
2944 err = cxgb4_sched_class_bind(dev, (void *)(&qe), SCHED_QUEUE);
2945 if (err)
2946 dev_err(adap->pdev_dev,
2947 "Queue rate limiting failed. Err: %d\n", err);
2948 return err;
2949}
2950
6a345b3d
KS
2951static int cxgb_setup_tc_flower(struct net_device *dev,
2952 struct tc_cls_flower_offload *cls_flower)
2953{
6a345b3d
KS
2954 switch (cls_flower->command) {
2955 case TC_CLSFLOWER_REPLACE:
2956 return cxgb4_tc_flower_replace(dev, cls_flower);
2957 case TC_CLSFLOWER_DESTROY:
2958 return cxgb4_tc_flower_destroy(dev, cls_flower);
2959 case TC_CLSFLOWER_STATS:
2960 return cxgb4_tc_flower_stats(dev, cls_flower);
2961 default:
2962 return -EOPNOTSUPP;
2963 }
2964}
2965
f7323043 2966static int cxgb_setup_tc_cls_u32(struct net_device *dev,
f7323043
JP
2967 struct tc_cls_u32_offload *cls_u32)
2968{
f7323043
JP
2969 switch (cls_u32->command) {
2970 case TC_CLSU32_NEW_KNODE:
2971 case TC_CLSU32_REPLACE_KNODE:
5fd9fc4e 2972 return cxgb4_config_knode(dev, cls_u32);
f7323043 2973 case TC_CLSU32_DELETE_KNODE:
5fd9fc4e 2974 return cxgb4_delete_knode(dev, cls_u32);
f7323043
JP
2975 default:
2976 return -EOPNOTSUPP;
2977 }
2978}
2979
cd019e91
JP
2980static int cxgb_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
2981 void *cb_priv)
d8931847 2982{
cd019e91 2983 struct net_device *dev = cb_priv;
d8931847
RL
2984 struct port_info *pi = netdev2pinfo(dev);
2985 struct adapter *adap = netdev2adap(dev);
2986
2987 if (!(adap->flags & FULL_INIT_DONE)) {
2988 dev_err(adap->pdev_dev,
2989 "Failed to setup tc on port %d. Link Down?\n",
2990 pi->port_id);
2991 return -EINVAL;
2992 }
2993
2a84bbaf 2994 if (!tc_cls_can_offload_and_chain0(dev, type_data))
44ae12a7
JP
2995 return -EOPNOTSUPP;
2996
f7323043
JP
2997 switch (type) {
2998 case TC_SETUP_CLSU32:
de4784ca 2999 return cxgb_setup_tc_cls_u32(dev, type_data);
6a345b3d
KS
3000 case TC_SETUP_CLSFLOWER:
3001 return cxgb_setup_tc_flower(dev, type_data);
f7323043
JP
3002 default:
3003 return -EOPNOTSUPP;
d8931847 3004 }
d8931847
RL
3005}
3006
cd019e91
JP
3007static int cxgb_setup_tc_block(struct net_device *dev,
3008 struct tc_block_offload *f)
3009{
3010 struct port_info *pi = netdev2pinfo(dev);
3011
3012 if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
3013 return -EOPNOTSUPP;
3014
3015 switch (f->command) {
3016 case TC_BLOCK_BIND:
3017 return tcf_block_cb_register(f->block, cxgb_setup_tc_block_cb,
3018 pi, dev);
3019 case TC_BLOCK_UNBIND:
3020 tcf_block_cb_unregister(f->block, cxgb_setup_tc_block_cb, pi);
3021 return 0;
3022 default:
3023 return -EOPNOTSUPP;
3024 }
3025}
3026
3027static int cxgb_setup_tc(struct net_device *dev, enum tc_setup_type type,
3028 void *type_data)
3029{
3030 switch (type) {
cd019e91
JP
3031 case TC_SETUP_BLOCK:
3032 return cxgb_setup_tc_block(dev, type_data);
3033 default:
3034 return -EOPNOTSUPP;
3035 }
3036}
3037
846eac3f
GG
3038static void cxgb_del_udp_tunnel(struct net_device *netdev,
3039 struct udp_tunnel_info *ti)
3040{
3041 struct port_info *pi = netdev_priv(netdev);
3042 struct adapter *adapter = pi->adapter;
3043 unsigned int chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip);
3044 u8 match_all_mac[] = { 0, 0, 0, 0, 0, 0 };
3045 int ret = 0, i;
3046
3047 if (chip_ver < CHELSIO_T6)
3048 return;
3049
3050 switch (ti->type) {
3051 case UDP_TUNNEL_TYPE_VXLAN:
3052 if (!adapter->vxlan_port_cnt ||
3053 adapter->vxlan_port != ti->port)
3054 return; /* Invalid VxLAN destination port */
3055
3056 adapter->vxlan_port_cnt--;
3057 if (adapter->vxlan_port_cnt)
3058 return;
3059
3060 adapter->vxlan_port = 0;
3061 t4_write_reg(adapter, MPS_RX_VXLAN_TYPE_A, 0);
3062 break;
c746fc0e
GG
3063 case UDP_TUNNEL_TYPE_GENEVE:
3064 if (!adapter->geneve_port_cnt ||
3065 adapter->geneve_port != ti->port)
3066 return; /* Invalid GENEVE destination port */
3067
3068 adapter->geneve_port_cnt--;
3069 if (adapter->geneve_port_cnt)
3070 return;
3071
3072 adapter->geneve_port = 0;
3073 t4_write_reg(adapter, MPS_RX_GENEVE_TYPE_A, 0);
846eac3f
GG
3074 default:
3075 return;
3076 }
3077
3078 /* Matchall mac entries can be deleted only after all tunnel ports
3079 * are brought down or removed.
3080 */
3081 if (!adapter->rawf_cnt)
3082 return;
3083 for_each_port(adapter, i) {
3084 pi = adap2pinfo(adapter, i);
3085 ret = t4_free_raw_mac_filt(adapter, pi->viid,
3086 match_all_mac, match_all_mac,
3087 adapter->rawf_start +
3088 pi->port_id,
443e2dab 3089 1, pi->port_id, false);
846eac3f
GG
3090 if (ret < 0) {
3091 netdev_info(netdev, "Failed to free mac filter entry, for port %d\n",
3092 i);
3093 return;
3094 }
3095 atomic_dec(&adapter->mps_encap[adapter->rawf_start +
3096 pi->port_id].refcnt);
3097 }
3098}
3099
3100static void cxgb_add_udp_tunnel(struct net_device *netdev,
3101 struct udp_tunnel_info *ti)
3102{
3103 struct port_info *pi = netdev_priv(netdev);
3104 struct adapter *adapter = pi->adapter;
3105 unsigned int chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip);
3106 u8 match_all_mac[] = { 0, 0, 0, 0, 0, 0 };
3107 int i, ret;
3108
c746fc0e 3109 if (chip_ver < CHELSIO_T6 || !adapter->rawf_cnt)
846eac3f
GG
3110 return;
3111
3112 switch (ti->type) {
3113 case UDP_TUNNEL_TYPE_VXLAN:
846eac3f
GG
3114 /* Callback for adding vxlan port can be called with the same
3115 * port for both IPv4 and IPv6. We should not disable the
3116 * offloading when the same port for both protocols is added
3117 * and later one of them is removed.
3118 */
3119 if (adapter->vxlan_port_cnt &&
3120 adapter->vxlan_port == ti->port) {
3121 adapter->vxlan_port_cnt++;
3122 return;
3123 }
3124
3125 /* We will support only one VxLAN port */
3126 if (adapter->vxlan_port_cnt) {
3127 netdev_info(netdev, "UDP port %d already offloaded, not adding port %d\n",
3128 be16_to_cpu(adapter->vxlan_port),
3129 be16_to_cpu(ti->port));
3130 return;
3131 }
3132
3133 adapter->vxlan_port = ti->port;
3134 adapter->vxlan_port_cnt = 1;
3135
3136 t4_write_reg(adapter, MPS_RX_VXLAN_TYPE_A,
3137 VXLAN_V(be16_to_cpu(ti->port)) | VXLAN_EN_F);
3138 break;
c746fc0e
GG
3139 case UDP_TUNNEL_TYPE_GENEVE:
3140 if (adapter->geneve_port_cnt &&
3141 adapter->geneve_port == ti->port) {
3142 adapter->geneve_port_cnt++;
3143 return;
3144 }
3145
3146 /* We will support only one GENEVE port */
3147 if (adapter->geneve_port_cnt) {
3148 netdev_info(netdev, "UDP port %d already offloaded, not adding port %d\n",
3149 be16_to_cpu(adapter->geneve_port),
3150 be16_to_cpu(ti->port));
3151 return;
3152 }
3153
3154 adapter->geneve_port = ti->port;
3155 adapter->geneve_port_cnt = 1;
3156
3157 t4_write_reg(adapter, MPS_RX_GENEVE_TYPE_A,
3158 GENEVE_V(be16_to_cpu(ti->port)) | GENEVE_EN_F);
846eac3f
GG
3159 default:
3160 return;
3161 }
3162
3163 /* Create a 'match all' mac filter entry for inner mac,
3164 * if raw mac interface is supported. Once the linux kernel provides
3165 * driver entry points for adding/deleting the inner mac addresses,
3166 * we will remove this 'match all' entry and fallback to adding
3167 * exact match filters.
3168 */
c746fc0e
GG
3169 for_each_port(adapter, i) {
3170 pi = adap2pinfo(adapter, i);
3171
3172 ret = t4_alloc_raw_mac_filt(adapter, pi->viid,
3173 match_all_mac,
3174 match_all_mac,
3175 adapter->rawf_start +
3176 pi->port_id,
443e2dab 3177 1, pi->port_id, false);
c746fc0e
GG
3178 if (ret < 0) {
3179 netdev_info(netdev, "Failed to allocate a mac filter entry, not adding port %d\n",
3180 be16_to_cpu(ti->port));
3181 cxgb_del_udp_tunnel(netdev, ti);
3182 return;
846eac3f 3183 }
c746fc0e 3184 atomic_inc(&adapter->mps_encap[ret].refcnt);
846eac3f
GG
3185 }
3186}
3187
4621ffd6
GG
3188static netdev_features_t cxgb_features_check(struct sk_buff *skb,
3189 struct net_device *dev,
3190 netdev_features_t features)
3191{
3192 struct port_info *pi = netdev_priv(dev);
3193 struct adapter *adapter = pi->adapter;
3194
3195 if (CHELSIO_CHIP_VERSION(adapter->params.chip) < CHELSIO_T6)
3196 return features;
3197
3198 /* Check if hw supports offload for this packet */
3199 if (!skb->encapsulation || cxgb_encap_offload_supported(skb))
3200 return features;
3201
3202 /* Offload is not supported for this encapsulated packet */
3203 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
3204}
3205
90592b9a
AV
3206static netdev_features_t cxgb_fix_features(struct net_device *dev,
3207 netdev_features_t features)
3208{
3209 /* Disable GRO, if RX_CSUM is disabled */
3210 if (!(features & NETIF_F_RXCSUM))
3211 features &= ~NETIF_F_GRO;
3212
3213 return features;
3214}
3215
b8ff05a9
DM
3216static const struct net_device_ops cxgb4_netdev_ops = {
3217 .ndo_open = cxgb_open,
3218 .ndo_stop = cxgb_close,
3219 .ndo_start_xmit = t4_eth_xmit,
688848b1 3220 .ndo_select_queue = cxgb_select_queue,
9be793bf 3221 .ndo_get_stats64 = cxgb_get_stats,
b8ff05a9
DM
3222 .ndo_set_rx_mode = cxgb_set_rxmode,
3223 .ndo_set_mac_address = cxgb_set_mac_addr,
2ed28baa 3224 .ndo_set_features = cxgb_set_features,
b8ff05a9
DM
3225 .ndo_validate_addr = eth_validate_addr,
3226 .ndo_do_ioctl = cxgb_ioctl,
3227 .ndo_change_mtu = cxgb_change_mtu,
b8ff05a9
DM
3228#ifdef CONFIG_NET_POLL_CONTROLLER
3229 .ndo_poll_controller = cxgb_netpoll,
3230#endif
84a200b3
VP
3231#ifdef CONFIG_CHELSIO_T4_FCOE
3232 .ndo_fcoe_enable = cxgb_fcoe_enable,
3233 .ndo_fcoe_disable = cxgb_fcoe_disable,
3234#endif /* CONFIG_CHELSIO_T4_FCOE */
10a2604e 3235 .ndo_set_tx_maxrate = cxgb_set_tx_maxrate,
d8931847 3236 .ndo_setup_tc = cxgb_setup_tc,
846eac3f
GG
3237 .ndo_udp_tunnel_add = cxgb_add_udp_tunnel,
3238 .ndo_udp_tunnel_del = cxgb_del_udp_tunnel,
4621ffd6 3239 .ndo_features_check = cxgb_features_check,
90592b9a 3240 .ndo_fix_features = cxgb_fix_features,
b8ff05a9
DM
3241};
3242
858aa65c 3243#ifdef CONFIG_PCI_IOV
e7b48a32 3244static const struct net_device_ops cxgb4_mgmt_netdev_ops = {
baf50868
GG
3245 .ndo_open = cxgb4_mgmt_open,
3246 .ndo_set_vf_mac = cxgb4_mgmt_set_vf_mac,
3247 .ndo_get_vf_config = cxgb4_mgmt_get_vf_config,
3248 .ndo_set_vf_rate = cxgb4_mgmt_set_vf_rate,
3249 .ndo_get_phys_port_id = cxgb4_mgmt_get_phys_port_id,
9d5fd927 3250 .ndo_set_vf_vlan = cxgb4_mgmt_set_vf_vlan,
7829451c 3251};
e7b48a32 3252#endif
7829451c 3253
baf50868
GG
3254static void cxgb4_mgmt_get_drvinfo(struct net_device *dev,
3255 struct ethtool_drvinfo *info)
7829451c
HS
3256{
3257 struct adapter *adapter = netdev2adap(dev);
3258
3259 strlcpy(info->driver, cxgb4_driver_name, sizeof(info->driver));
3260 strlcpy(info->version, cxgb4_driver_version,
3261 sizeof(info->version));
3262 strlcpy(info->bus_info, pci_name(adapter->pdev),
3263 sizeof(info->bus_info));
3264}
3265
3266static const struct ethtool_ops cxgb4_mgmt_ethtool_ops = {
baf50868 3267 .get_drvinfo = cxgb4_mgmt_get_drvinfo,
7829451c
HS
3268};
3269
8b7372c1
GG
3270static void notify_fatal_err(struct work_struct *work)
3271{
3272 struct adapter *adap;
3273
3274 adap = container_of(work, struct adapter, fatal_err_notify_task);
3275 notify_ulds(adap, CXGB4_STATE_FATAL_ERROR);
3276}
3277
b8ff05a9
DM
3278void t4_fatal_err(struct adapter *adap)
3279{
3be0679b
HS
3280 int port;
3281
025d0973
GP
3282 if (pci_channel_offline(adap->pdev))
3283 return;
3284
3be0679b
HS
3285 /* Disable the SGE since ULDs are going to free resources that
3286 * could be exposed to the adapter. RDMA MWs for example...
3287 */
3288 t4_shutdown_adapter(adap);
3289 for_each_port(adap, port) {
3290 struct net_device *dev = adap->port[port];
3291
3292 /* If we get here in very early initialization the network
3293 * devices may not have been set up yet.
3294 */
3295 if (!dev)
3296 continue;
3297
3298 netif_tx_stop_all_queues(dev);
3299 netif_carrier_off(dev);
3300 }
b8ff05a9 3301 dev_alert(adap->pdev_dev, "encountered fatal error, adapter stopped\n");
8b7372c1 3302 queue_work(adap->workq, &adap->fatal_err_notify_task);
b8ff05a9
DM
3303}
3304
3305static void setup_memwin(struct adapter *adap)
3306{
b562fc37 3307 u32 nic_win_base = t4_get_util_window(adap);
b8ff05a9 3308
b562fc37 3309 t4_setup_memwin(adap, nic_win_base, MEMWIN_NIC);
636f9d37
VP
3310}
3311
3312static void setup_memwin_rdma(struct adapter *adap)
3313{
1ae970e0 3314 if (adap->vres.ocq.size) {
0abfd152
HS
3315 u32 start;
3316 unsigned int sz_kb;
1ae970e0 3317
0abfd152
HS
3318 start = t4_read_pcie_cfg4(adap, PCI_BASE_ADDRESS_2);
3319 start &= PCI_BASE_ADDRESS_MEM_MASK;
3320 start += OCQ_WIN_OFFSET(adap->pdev, &adap->vres);
1ae970e0
DM
3321 sz_kb = roundup_pow_of_two(adap->vres.ocq.size) >> 10;
3322 t4_write_reg(adap,
f061de42
HS
3323 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, 3),
3324 start | BIR_V(1) | WINDOW_V(ilog2(sz_kb)));
1ae970e0 3325 t4_write_reg(adap,
f061de42 3326 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, 3),
1ae970e0
DM
3327 adap->vres.ocq.start);
3328 t4_read_reg(adap,
f061de42 3329 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, 3));
1ae970e0 3330 }
b8ff05a9
DM
3331}
3332
8b4e6b3c
AV
3333/* HMA Definitions */
3334
3335/* The maximum number of address that can be send in a single FW cmd */
3336#define HMA_MAX_ADDR_IN_CMD 5
3337
3338#define HMA_PAGE_SIZE PAGE_SIZE
3339
3340#define HMA_MAX_NO_FW_ADDRESS (16 << 10) /* FW supports 16K addresses */
3341
3342#define HMA_PAGE_ORDER \
3343 ((HMA_PAGE_SIZE < HMA_MAX_NO_FW_ADDRESS) ? \
3344 ilog2(HMA_MAX_NO_FW_ADDRESS / HMA_PAGE_SIZE) : 0)
3345
3346/* The minimum and maximum possible HMA sizes that can be specified in the FW
3347 * configuration(in units of MB).
3348 */
3349#define HMA_MIN_TOTAL_SIZE 1
3350#define HMA_MAX_TOTAL_SIZE \
3351 (((HMA_PAGE_SIZE << HMA_PAGE_ORDER) * \
3352 HMA_MAX_NO_FW_ADDRESS) >> 20)
3353
3354static void adap_free_hma_mem(struct adapter *adapter)
3355{
3356 struct scatterlist *iter;
3357 struct page *page;
3358 int i;
3359
3360 if (!adapter->hma.sgt)
3361 return;
3362
3363 if (adapter->hma.flags & HMA_DMA_MAPPED_FLAG) {
3364 dma_unmap_sg(adapter->pdev_dev, adapter->hma.sgt->sgl,
3365 adapter->hma.sgt->nents, PCI_DMA_BIDIRECTIONAL);
3366 adapter->hma.flags &= ~HMA_DMA_MAPPED_FLAG;
3367 }
3368
3369 for_each_sg(adapter->hma.sgt->sgl, iter,
3370 adapter->hma.sgt->orig_nents, i) {
3371 page = sg_page(iter);
3372 if (page)
3373 __free_pages(page, HMA_PAGE_ORDER);
3374 }
3375
3376 kfree(adapter->hma.phy_addr);
3377 sg_free_table(adapter->hma.sgt);
3378 kfree(adapter->hma.sgt);
3379 adapter->hma.sgt = NULL;
3380}
3381
3382static int adap_config_hma(struct adapter *adapter)
3383{
3384 struct scatterlist *sgl, *iter;
3385 struct sg_table *sgt;
3386 struct page *newpage;
3387 unsigned int i, j, k;
3388 u32 param, hma_size;
3389 unsigned int ncmds;
3390 size_t page_size;
3391 u32 page_order;
3392 int node, ret;
3393
3394 /* HMA is supported only for T6+ cards.
3395 * Avoid initializing HMA in kdump kernels.
3396 */
3397 if (is_kdump_kernel() ||
3398 CHELSIO_CHIP_VERSION(adapter->params.chip) < CHELSIO_T6)
3399 return 0;
3400
3401 /* Get the HMA region size required by fw */
3402 param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
3403 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_HMA_SIZE));
3404 ret = t4_query_params(adapter, adapter->mbox, adapter->pf, 0,
3405 1, &param, &hma_size);
3406 /* An error means card has its own memory or HMA is not supported by
3407 * the firmware. Return without any errors.
3408 */
3409 if (ret || !hma_size)
3410 return 0;
3411
3412 if (hma_size < HMA_MIN_TOTAL_SIZE ||
3413 hma_size > HMA_MAX_TOTAL_SIZE) {
3414 dev_err(adapter->pdev_dev,
3415 "HMA size %uMB beyond bounds(%u-%lu)MB\n",
3416 hma_size, HMA_MIN_TOTAL_SIZE, HMA_MAX_TOTAL_SIZE);
3417 return -EINVAL;
3418 }
3419
3420 page_size = HMA_PAGE_SIZE;
3421 page_order = HMA_PAGE_ORDER;
3422 adapter->hma.sgt = kzalloc(sizeof(*adapter->hma.sgt), GFP_KERNEL);
3423 if (unlikely(!adapter->hma.sgt)) {
3424 dev_err(adapter->pdev_dev, "HMA SG table allocation failed\n");
3425 return -ENOMEM;
3426 }
3427 sgt = adapter->hma.sgt;
3428 /* FW returned value will be in MB's
3429 */
3430 sgt->orig_nents = (hma_size << 20) / (page_size << page_order);
3431 if (sg_alloc_table(sgt, sgt->orig_nents, GFP_KERNEL)) {
3432 dev_err(adapter->pdev_dev, "HMA SGL allocation failed\n");
3433 kfree(adapter->hma.sgt);
3434 adapter->hma.sgt = NULL;
3435 return -ENOMEM;
3436 }
3437
3438 sgl = adapter->hma.sgt->sgl;
3439 node = dev_to_node(adapter->pdev_dev);
3440 for_each_sg(sgl, iter, sgt->orig_nents, i) {
2b928749
GG
3441 newpage = alloc_pages_node(node, __GFP_NOWARN | GFP_KERNEL |
3442 __GFP_ZERO, page_order);
8b4e6b3c
AV
3443 if (!newpage) {
3444 dev_err(adapter->pdev_dev,
3445 "Not enough memory for HMA page allocation\n");
3446 ret = -ENOMEM;
3447 goto free_hma;
3448 }
3449 sg_set_page(iter, newpage, page_size << page_order, 0);
3450 }
3451
3452 sgt->nents = dma_map_sg(adapter->pdev_dev, sgl, sgt->orig_nents,
3453 DMA_BIDIRECTIONAL);
3454 if (!sgt->nents) {
3455 dev_err(adapter->pdev_dev,
3456 "Not enough memory for HMA DMA mapping");
3457 ret = -ENOMEM;
3458 goto free_hma;
3459 }
3460 adapter->hma.flags |= HMA_DMA_MAPPED_FLAG;
3461
3462 adapter->hma.phy_addr = kcalloc(sgt->nents, sizeof(dma_addr_t),
3463 GFP_KERNEL);
3464 if (unlikely(!adapter->hma.phy_addr))
3465 goto free_hma;
3466
3467 for_each_sg(sgl, iter, sgt->nents, i) {
3468 newpage = sg_page(iter);
3469 adapter->hma.phy_addr[i] = sg_dma_address(iter);
3470 }
3471
3472 ncmds = DIV_ROUND_UP(sgt->nents, HMA_MAX_ADDR_IN_CMD);
3473 /* Pass on the addresses to firmware */
3474 for (i = 0, k = 0; i < ncmds; i++, k += HMA_MAX_ADDR_IN_CMD) {
3475 struct fw_hma_cmd hma_cmd;
3476 u8 naddr = HMA_MAX_ADDR_IN_CMD;
3477 u8 soc = 0, eoc = 0;
3478 u8 hma_mode = 1; /* Presently we support only Page table mode */
3479
3480 soc = (i == 0) ? 1 : 0;
3481 eoc = (i == ncmds - 1) ? 1 : 0;
3482
3483 /* For last cmd, set naddr corresponding to remaining
3484 * addresses
3485 */
3486 if (i == ncmds - 1) {
3487 naddr = sgt->nents % HMA_MAX_ADDR_IN_CMD;
3488 naddr = naddr ? naddr : HMA_MAX_ADDR_IN_CMD;
3489 }
3490 memset(&hma_cmd, 0, sizeof(hma_cmd));
3491 hma_cmd.op_pkd = htonl(FW_CMD_OP_V(FW_HMA_CMD) |
3492 FW_CMD_REQUEST_F | FW_CMD_WRITE_F);
3493 hma_cmd.retval_len16 = htonl(FW_LEN16(hma_cmd));
3494
3495 hma_cmd.mode_to_pcie_params =
3496 htonl(FW_HMA_CMD_MODE_V(hma_mode) |
3497 FW_HMA_CMD_SOC_V(soc) | FW_HMA_CMD_EOC_V(eoc));
3498
3499 /* HMA cmd size specified in MB's */
3500 hma_cmd.naddr_size =
3501 htonl(FW_HMA_CMD_SIZE_V(hma_size) |
3502 FW_HMA_CMD_NADDR_V(naddr));
3503
3504 /* Total Page size specified in units of 4K */
3505 hma_cmd.addr_size_pkd =
3506 htonl(FW_HMA_CMD_ADDR_SIZE_V
3507 ((page_size << page_order) >> 12));
3508
3509 /* Fill the 5 addresses */
3510 for (j = 0; j < naddr; j++) {
3511 hma_cmd.phy_address[j] =
3512 cpu_to_be64(adapter->hma.phy_addr[j + k]);
3513 }
3514 ret = t4_wr_mbox(adapter, adapter->mbox, &hma_cmd,
3515 sizeof(hma_cmd), &hma_cmd);
3516 if (ret) {
3517 dev_err(adapter->pdev_dev,
3518 "HMA FW command failed with err %d\n", ret);
3519 goto free_hma;
3520 }
3521 }
3522
3523 if (!ret)
3524 dev_info(adapter->pdev_dev,
3525 "Reserved %uMB host memory for HMA\n", hma_size);
3526 return ret;
3527
3528free_hma:
3529 adap_free_hma_mem(adapter);
3530 return ret;
3531}
3532
02b5fb8e
DM
3533static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c)
3534{
3535 u32 v;
3536 int ret;
3537
3538 /* get device capabilities */
3539 memset(c, 0, sizeof(*c));
e2ac9628
HS
3540 c->op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
3541 FW_CMD_REQUEST_F | FW_CMD_READ_F);
ce91a923 3542 c->cfvalid_to_len16 = htonl(FW_LEN16(*c));
b2612722 3543 ret = t4_wr_mbox(adap, adap->mbox, c, sizeof(*c), c);
02b5fb8e
DM
3544 if (ret < 0)
3545 return ret;
3546
e2ac9628
HS
3547 c->op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
3548 FW_CMD_REQUEST_F | FW_CMD_WRITE_F);
b2612722 3549 ret = t4_wr_mbox(adap, adap->mbox, c, sizeof(*c), NULL);
02b5fb8e
DM
3550 if (ret < 0)
3551 return ret;
3552
b2612722 3553 ret = t4_config_glbl_rss(adap, adap->pf,
02b5fb8e 3554 FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL,
b2e1a3f0
HS
3555 FW_RSS_GLB_CONFIG_CMD_TNLMAPEN_F |
3556 FW_RSS_GLB_CONFIG_CMD_TNLALLLKP_F);
02b5fb8e
DM
3557 if (ret < 0)
3558 return ret;
3559
b2612722 3560 ret = t4_cfg_pfvf(adap, adap->mbox, adap->pf, 0, adap->sge.egr_sz, 64,
4b8e27a8
HS
3561 MAX_INGQ, 0, 0, 4, 0xf, 0xf, 16, FW_CMD_CAP_PF,
3562 FW_CMD_CAP_PF);
02b5fb8e
DM
3563 if (ret < 0)
3564 return ret;
3565
3566 t4_sge_init(adap);
3567
02b5fb8e 3568 /* tweak some settings */
837e4a42 3569 t4_write_reg(adap, TP_SHIFT_CNT_A, 0x64f8849);
0d804338 3570 t4_write_reg(adap, ULP_RX_TDDP_PSZ_A, HPZ0_V(PAGE_SHIFT - 12));
837e4a42
HS
3571 t4_write_reg(adap, TP_PIO_ADDR_A, TP_INGRESS_CONFIG_A);
3572 v = t4_read_reg(adap, TP_PIO_DATA_A);
3573 t4_write_reg(adap, TP_PIO_DATA_A, v & ~CSUM_HAS_PSEUDO_HDR_F);
060e0c75 3574
dca4faeb
VP
3575 /* first 4 Tx modulation queues point to consecutive Tx channels */
3576 adap->params.tp.tx_modq_map = 0xE4;
0d804338
HS
3577 t4_write_reg(adap, TP_TX_MOD_QUEUE_REQ_MAP_A,
3578 TX_MOD_QUEUE_REQ_MAP_V(adap->params.tp.tx_modq_map));
dca4faeb
VP
3579
3580 /* associate each Tx modulation queue with consecutive Tx channels */
3581 v = 0x84218421;
837e4a42 3582 t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A,
0d804338 3583 &v, 1, TP_TX_SCHED_HDR_A);
837e4a42 3584 t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A,
0d804338 3585 &v, 1, TP_TX_SCHED_FIFO_A);
837e4a42 3586 t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A,
0d804338 3587 &v, 1, TP_TX_SCHED_PCMD_A);
dca4faeb
VP
3588
3589#define T4_TX_MODQ_10G_WEIGHT_DEFAULT 16 /* in KB units */
3590 if (is_offload(adap)) {
0d804338
HS
3591 t4_write_reg(adap, TP_TX_MOD_QUEUE_WEIGHT0_A,
3592 TX_MODQ_WEIGHT0_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
3593 TX_MODQ_WEIGHT1_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
3594 TX_MODQ_WEIGHT2_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
3595 TX_MODQ_WEIGHT3_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT));
3596 t4_write_reg(adap, TP_TX_MOD_CHANNEL_WEIGHT_A,
3597 TX_MODQ_WEIGHT0_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
3598 TX_MODQ_WEIGHT1_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
3599 TX_MODQ_WEIGHT2_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
3600 TX_MODQ_WEIGHT3_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT));
dca4faeb
VP
3601 }
3602
060e0c75 3603 /* get basic stuff going */
b2612722 3604 return t4_early_init(adap, adap->pf);
02b5fb8e
DM
3605}
3606
b8ff05a9
DM
3607/*
3608 * Max # of ATIDs. The absolute HW max is 16K but we keep it lower.
3609 */
3610#define MAX_ATIDS 8192U
3611
636f9d37
VP
3612/*
3613 * Phase 0 of initialization: contact FW, obtain config, perform basic init.
3614 *
3615 * If the firmware we're dealing with has Configuration File support, then
3616 * we use that to perform all configuration
3617 */
3618
3619/*
3620 * Tweak configuration based on module parameters, etc. Most of these have
3621 * defaults assigned to them by Firmware Configuration Files (if we're using
3622 * them) but need to be explicitly set if we're using hard-coded
3623 * initialization. But even in the case of using Firmware Configuration
3624 * Files, we'd like to expose the ability to change these via module
3625 * parameters so these are essentially common tweaks/settings for
3626 * Configuration Files and hard-coded initialization ...
3627 */
3628static int adap_init0_tweaks(struct adapter *adapter)
3629{
3630 /*
3631 * Fix up various Host-Dependent Parameters like Page Size, Cache
3632 * Line Size, etc. The firmware default is for a 4KB Page Size and
3633 * 64B Cache Line Size ...
3634 */
3635 t4_fixup_host_params(adapter, PAGE_SIZE, L1_CACHE_BYTES);
3636
3637 /*
3638 * Process module parameters which affect early initialization.
3639 */
3640 if (rx_dma_offset != 2 && rx_dma_offset != 0) {
3641 dev_err(&adapter->pdev->dev,
3642 "Ignoring illegal rx_dma_offset=%d, using 2\n",
3643 rx_dma_offset);
3644 rx_dma_offset = 2;
3645 }
f612b815
HS
3646 t4_set_reg_field(adapter, SGE_CONTROL_A,
3647 PKTSHIFT_V(PKTSHIFT_M),
3648 PKTSHIFT_V(rx_dma_offset));
636f9d37
VP
3649
3650 /*
3651 * Don't include the "IP Pseudo Header" in CPL_RX_PKT checksums: Linux
3652 * adds the pseudo header itself.
3653 */
837e4a42
HS
3654 t4_tp_wr_bits_indirect(adapter, TP_INGRESS_CONFIG_A,
3655 CSUM_HAS_PSEUDO_HDR_F, 0);
636f9d37
VP
3656
3657 return 0;
3658}
3659
01b69614
HS
3660/* 10Gb/s-BT PHY Support. chip-external 10Gb/s-BT PHYs are complex chips
3661 * unto themselves and they contain their own firmware to perform their
3662 * tasks ...
3663 */
3664static int phy_aq1202_version(const u8 *phy_fw_data,
3665 size_t phy_fw_size)
3666{
3667 int offset;
3668
3669 /* At offset 0x8 you're looking for the primary image's
3670 * starting offset which is 3 Bytes wide
3671 *
3672 * At offset 0xa of the primary image, you look for the offset
3673 * of the DRAM segment which is 3 Bytes wide.
3674 *
3675 * The FW version is at offset 0x27e of the DRAM and is 2 Bytes
3676 * wide
3677 */
3678 #define be16(__p) (((__p)[0] << 8) | (__p)[1])
3679 #define le16(__p) ((__p)[0] | ((__p)[1] << 8))
3680 #define le24(__p) (le16(__p) | ((__p)[2] << 16))
3681
3682 offset = le24(phy_fw_data + 0x8) << 12;
3683 offset = le24(phy_fw_data + offset + 0xa);
3684 return be16(phy_fw_data + offset + 0x27e);
3685
3686 #undef be16
3687 #undef le16
3688 #undef le24
3689}
3690
3691static struct info_10gbt_phy_fw {
3692 unsigned int phy_fw_id; /* PCI Device ID */
3693 char *phy_fw_file; /* /lib/firmware/ PHY Firmware file */
3694 int (*phy_fw_version)(const u8 *phy_fw_data, size_t phy_fw_size);
3695 int phy_flash; /* Has FLASH for PHY Firmware */
3696} phy_info_array[] = {
3697 {
3698 PHY_AQ1202_DEVICEID,
3699 PHY_AQ1202_FIRMWARE,
3700 phy_aq1202_version,
3701 1,
3702 },
3703 {
3704 PHY_BCM84834_DEVICEID,
3705 PHY_BCM84834_FIRMWARE,
3706 NULL,
3707 0,
3708 },
3709 { 0, NULL, NULL },
3710};
3711
3712static struct info_10gbt_phy_fw *find_phy_info(int devid)
3713{
3714 int i;
3715
3716 for (i = 0; i < ARRAY_SIZE(phy_info_array); i++) {
3717 if (phy_info_array[i].phy_fw_id == devid)
3718 return &phy_info_array[i];
3719 }
3720 return NULL;
3721}
3722
3723/* Handle updating of chip-external 10Gb/s-BT PHY firmware. This needs to
3724 * happen after the FW_RESET_CMD but before the FW_INITIALIZE_CMD. On error
3725 * we return a negative error number. If we transfer new firmware we return 1
3726 * (from t4_load_phy_fw()). If we don't do anything we return 0.
3727 */
3728static int adap_init0_phy(struct adapter *adap)
3729{
3730 const struct firmware *phyf;
3731 int ret;
3732 struct info_10gbt_phy_fw *phy_info;
3733
3734 /* Use the device ID to determine which PHY file to flash.
3735 */
3736 phy_info = find_phy_info(adap->pdev->device);
3737 if (!phy_info) {
3738 dev_warn(adap->pdev_dev,
3739 "No PHY Firmware file found for this PHY\n");
3740 return -EOPNOTSUPP;
3741 }
3742
3743 /* If we have a T4 PHY firmware file under /lib/firmware/cxgb4/, then
3744 * use that. The adapter firmware provides us with a memory buffer
3745 * where we can load a PHY firmware file from the host if we want to
3746 * override the PHY firmware File in flash.
3747 */
3748 ret = request_firmware_direct(&phyf, phy_info->phy_fw_file,
3749 adap->pdev_dev);
3750 if (ret < 0) {
3751 /* For adapters without FLASH attached to PHY for their
3752 * firmware, it's obviously a fatal error if we can't get the
3753 * firmware to the adapter. For adapters with PHY firmware
3754 * FLASH storage, it's worth a warning if we can't find the
3755 * PHY Firmware but we'll neuter the error ...
3756 */
3757 dev_err(adap->pdev_dev, "unable to find PHY Firmware image "
3758 "/lib/firmware/%s, error %d\n",
3759 phy_info->phy_fw_file, -ret);
3760 if (phy_info->phy_flash) {
3761 int cur_phy_fw_ver = 0;
3762
3763 t4_phy_fw_ver(adap, &cur_phy_fw_ver);
3764 dev_warn(adap->pdev_dev, "continuing with, on-adapter "
3765 "FLASH copy, version %#x\n", cur_phy_fw_ver);
3766 ret = 0;
3767 }
3768
3769 return ret;
3770 }
3771
3772 /* Load PHY Firmware onto adapter.
3773 */
3774 ret = t4_load_phy_fw(adap, MEMWIN_NIC, &adap->win0_lock,
3775 phy_info->phy_fw_version,
3776 (u8 *)phyf->data, phyf->size);
3777 if (ret < 0)
3778 dev_err(adap->pdev_dev, "PHY Firmware transfer error %d\n",
3779 -ret);
3780 else if (ret > 0) {
3781 int new_phy_fw_ver = 0;
3782
3783 if (phy_info->phy_fw_version)
3784 new_phy_fw_ver = phy_info->phy_fw_version(phyf->data,
3785 phyf->size);
3786 dev_info(adap->pdev_dev, "Successfully transferred PHY "
3787 "Firmware /lib/firmware/%s, version %#x\n",
3788 phy_info->phy_fw_file, new_phy_fw_ver);
3789 }
3790
3791 release_firmware(phyf);
3792
3793 return ret;
3794}
3795
636f9d37
VP
3796/*
3797 * Attempt to initialize the adapter via a Firmware Configuration File.
3798 */
3799static int adap_init0_config(struct adapter *adapter, int reset)
3800{
3801 struct fw_caps_config_cmd caps_cmd;
3802 const struct firmware *cf;
3803 unsigned long mtype = 0, maddr = 0;
3804 u32 finiver, finicsum, cfcsum;
16e47624
HS
3805 int ret;
3806 int config_issued = 0;
0a57a536 3807 char *fw_config_file, fw_config_file_path[256];
16e47624 3808 char *config_name = NULL;
636f9d37
VP
3809
3810 /*
3811 * Reset device if necessary.
3812 */
3813 if (reset) {
3814 ret = t4_fw_reset(adapter, adapter->mbox,
0d804338 3815 PIORSTMODE_F | PIORST_F);
636f9d37
VP
3816 if (ret < 0)
3817 goto bye;
3818 }
3819
01b69614
HS
3820 /* If this is a 10Gb/s-BT adapter make sure the chip-external
3821 * 10Gb/s-BT PHYs have up-to-date firmware. Note that this step needs
3822 * to be performed after any global adapter RESET above since some
3823 * PHYs only have local RAM copies of the PHY firmware.
3824 */
3825 if (is_10gbt_device(adapter->pdev->device)) {
3826 ret = adap_init0_phy(adapter);
3827 if (ret < 0)
3828 goto bye;
3829 }
636f9d37
VP
3830 /*
3831 * If we have a T4 configuration file under /lib/firmware/cxgb4/,
3832 * then use that. Otherwise, use the configuration file stored
3833 * in the adapter flash ...
3834 */
d14807dd 3835 switch (CHELSIO_CHIP_VERSION(adapter->params.chip)) {
0a57a536 3836 case CHELSIO_T4:
16e47624 3837 fw_config_file = FW4_CFNAME;
0a57a536
SR
3838 break;
3839 case CHELSIO_T5:
3840 fw_config_file = FW5_CFNAME;
3841 break;
3ccc6cf7
HS
3842 case CHELSIO_T6:
3843 fw_config_file = FW6_CFNAME;
3844 break;
0a57a536
SR
3845 default:
3846 dev_err(adapter->pdev_dev, "Device %d is not supported\n",
3847 adapter->pdev->device);
3848 ret = -EINVAL;
3849 goto bye;
3850 }
3851
3852 ret = request_firmware(&cf, fw_config_file, adapter->pdev_dev);
636f9d37 3853 if (ret < 0) {
16e47624 3854 config_name = "On FLASH";
636f9d37
VP
3855 mtype = FW_MEMTYPE_CF_FLASH;
3856 maddr = t4_flash_cfg_addr(adapter);
3857 } else {
3858 u32 params[7], val[7];
3859
16e47624
HS
3860 sprintf(fw_config_file_path,
3861 "/lib/firmware/%s", fw_config_file);
3862 config_name = fw_config_file_path;
3863
636f9d37
VP
3864 if (cf->size >= FLASH_CFG_MAX_SIZE)
3865 ret = -ENOMEM;
3866 else {
5167865a
HS
3867 params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
3868 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CF));
636f9d37 3869 ret = t4_query_params(adapter, adapter->mbox,
b2612722 3870 adapter->pf, 0, 1, params, val);
636f9d37
VP
3871 if (ret == 0) {
3872 /*
fc5ab020 3873 * For t4_memory_rw() below addresses and
636f9d37
VP
3874 * sizes have to be in terms of multiples of 4
3875 * bytes. So, if the Configuration File isn't
3876 * a multiple of 4 bytes in length we'll have
3877 * to write that out separately since we can't
3878 * guarantee that the bytes following the
3879 * residual byte in the buffer returned by
3880 * request_firmware() are zeroed out ...
3881 */
3882 size_t resid = cf->size & 0x3;
3883 size_t size = cf->size & ~0x3;
3884 __be32 *data = (__be32 *)cf->data;
3885
5167865a
HS
3886 mtype = FW_PARAMS_PARAM_Y_G(val[0]);
3887 maddr = FW_PARAMS_PARAM_Z_G(val[0]) << 16;
636f9d37 3888
fc5ab020
HS
3889 spin_lock(&adapter->win0_lock);
3890 ret = t4_memory_rw(adapter, 0, mtype, maddr,
3891 size, data, T4_MEMORY_WRITE);
636f9d37
VP
3892 if (ret == 0 && resid != 0) {
3893 union {
3894 __be32 word;
3895 char buf[4];
3896 } last;
3897 int i;
3898
3899 last.word = data[size >> 2];
3900 for (i = resid; i < 4; i++)
3901 last.buf[i] = 0;
fc5ab020
HS
3902 ret = t4_memory_rw(adapter, 0, mtype,
3903 maddr + size,
3904 4, &last.word,
3905 T4_MEMORY_WRITE);
636f9d37 3906 }
fc5ab020 3907 spin_unlock(&adapter->win0_lock);
636f9d37
VP
3908 }
3909 }
3910
3911 release_firmware(cf);
3912 if (ret)
3913 goto bye;
3914 }
3915
3916 /*
3917 * Issue a Capability Configuration command to the firmware to get it
3918 * to parse the Configuration File. We don't use t4_fw_config_file()
3919 * because we want the ability to modify various features after we've
3920 * processed the configuration file ...
3921 */
3922 memset(&caps_cmd, 0, sizeof(caps_cmd));
3923 caps_cmd.op_to_write =
e2ac9628
HS
3924 htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
3925 FW_CMD_REQUEST_F |
3926 FW_CMD_READ_F);
ce91a923 3927 caps_cmd.cfvalid_to_len16 =
5167865a
HS
3928 htonl(FW_CAPS_CONFIG_CMD_CFVALID_F |
3929 FW_CAPS_CONFIG_CMD_MEMTYPE_CF_V(mtype) |
3930 FW_CAPS_CONFIG_CMD_MEMADDR64K_CF_V(maddr >> 16) |
636f9d37
VP
3931 FW_LEN16(caps_cmd));
3932 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
3933 &caps_cmd);
16e47624
HS
3934
3935 /* If the CAPS_CONFIG failed with an ENOENT (for a Firmware
3936 * Configuration File in FLASH), our last gasp effort is to use the
3937 * Firmware Configuration File which is embedded in the firmware. A
3938 * very few early versions of the firmware didn't have one embedded
3939 * but we can ignore those.
3940 */
3941 if (ret == -ENOENT) {
3942 memset(&caps_cmd, 0, sizeof(caps_cmd));
3943 caps_cmd.op_to_write =
e2ac9628
HS
3944 htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
3945 FW_CMD_REQUEST_F |
3946 FW_CMD_READ_F);
16e47624
HS
3947 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
3948 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd,
3949 sizeof(caps_cmd), &caps_cmd);
3950 config_name = "Firmware Default";
3951 }
3952
3953 config_issued = 1;
636f9d37
VP
3954 if (ret < 0)
3955 goto bye;
3956
3957 finiver = ntohl(caps_cmd.finiver);
3958 finicsum = ntohl(caps_cmd.finicsum);
3959 cfcsum = ntohl(caps_cmd.cfcsum);
3960 if (finicsum != cfcsum)
3961 dev_warn(adapter->pdev_dev, "Configuration File checksum "\
3962 "mismatch: [fini] csum=%#x, computed csum=%#x\n",
3963 finicsum, cfcsum);
3964
636f9d37
VP
3965 /*
3966 * And now tell the firmware to use the configuration we just loaded.
3967 */
3968 caps_cmd.op_to_write =
e2ac9628
HS
3969 htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
3970 FW_CMD_REQUEST_F |
3971 FW_CMD_WRITE_F);
ce91a923 3972 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
636f9d37
VP
3973 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
3974 NULL);
3975 if (ret < 0)
3976 goto bye;
3977
3978 /*
3979 * Tweak configuration based on system architecture, module
3980 * parameters, etc.
3981 */
3982 ret = adap_init0_tweaks(adapter);
3983 if (ret < 0)
3984 goto bye;
3985
8b4e6b3c
AV
3986 /* We will proceed even if HMA init fails. */
3987 ret = adap_config_hma(adapter);
3988 if (ret)
3989 dev_err(adapter->pdev_dev,
3990 "HMA configuration failed with error %d\n", ret);
3991
636f9d37
VP
3992 /*
3993 * And finally tell the firmware to initialize itself using the
3994 * parameters from the Configuration File.
3995 */
3996 ret = t4_fw_initialize(adapter, adapter->mbox);
3997 if (ret < 0)
3998 goto bye;
3999
06640310
HS
4000 /* Emit Firmware Configuration File information and return
4001 * successfully.
636f9d37 4002 */
636f9d37 4003 dev_info(adapter->pdev_dev, "Successfully configured using Firmware "\
16e47624
HS
4004 "Configuration File \"%s\", version %#x, computed checksum %#x\n",
4005 config_name, finiver, cfcsum);
636f9d37
VP
4006 return 0;
4007
4008 /*
4009 * Something bad happened. Return the error ... (If the "error"
4010 * is that there's no Configuration File on the adapter we don't
4011 * want to issue a warning since this is fairly common.)
4012 */
4013bye:
16e47624
HS
4014 if (config_issued && ret != -ENOENT)
4015 dev_warn(adapter->pdev_dev, "\"%s\" configuration file error %d\n",
4016 config_name, -ret);
636f9d37
VP
4017 return ret;
4018}
4019
16e47624
HS
4020static struct fw_info fw_info_array[] = {
4021 {
4022 .chip = CHELSIO_T4,
4023 .fs_name = FW4_CFNAME,
4024 .fw_mod_name = FW4_FNAME,
4025 .fw_hdr = {
4026 .chip = FW_HDR_CHIP_T4,
4027 .fw_ver = __cpu_to_be32(FW_VERSION(T4)),
4028 .intfver_nic = FW_INTFVER(T4, NIC),
4029 .intfver_vnic = FW_INTFVER(T4, VNIC),
4030 .intfver_ri = FW_INTFVER(T4, RI),
4031 .intfver_iscsi = FW_INTFVER(T4, ISCSI),
4032 .intfver_fcoe = FW_INTFVER(T4, FCOE),
4033 },
4034 }, {
4035 .chip = CHELSIO_T5,
4036 .fs_name = FW5_CFNAME,
4037 .fw_mod_name = FW5_FNAME,
4038 .fw_hdr = {
4039 .chip = FW_HDR_CHIP_T5,
4040 .fw_ver = __cpu_to_be32(FW_VERSION(T5)),
4041 .intfver_nic = FW_INTFVER(T5, NIC),
4042 .intfver_vnic = FW_INTFVER(T5, VNIC),
4043 .intfver_ri = FW_INTFVER(T5, RI),
4044 .intfver_iscsi = FW_INTFVER(T5, ISCSI),
4045 .intfver_fcoe = FW_INTFVER(T5, FCOE),
4046 },
3ccc6cf7
HS
4047 }, {
4048 .chip = CHELSIO_T6,
4049 .fs_name = FW6_CFNAME,
4050 .fw_mod_name = FW6_FNAME,
4051 .fw_hdr = {
4052 .chip = FW_HDR_CHIP_T6,
4053 .fw_ver = __cpu_to_be32(FW_VERSION(T6)),
4054 .intfver_nic = FW_INTFVER(T6, NIC),
4055 .intfver_vnic = FW_INTFVER(T6, VNIC),
4056 .intfver_ofld = FW_INTFVER(T6, OFLD),
4057 .intfver_ri = FW_INTFVER(T6, RI),
4058 .intfver_iscsipdu = FW_INTFVER(T6, ISCSIPDU),
4059 .intfver_iscsi = FW_INTFVER(T6, ISCSI),
4060 .intfver_fcoepdu = FW_INTFVER(T6, FCOEPDU),
4061 .intfver_fcoe = FW_INTFVER(T6, FCOE),
4062 },
16e47624 4063 }
3ccc6cf7 4064
16e47624
HS
4065};
4066
4067static struct fw_info *find_fw_info(int chip)
4068{
4069 int i;
4070
4071 for (i = 0; i < ARRAY_SIZE(fw_info_array); i++) {
4072 if (fw_info_array[i].chip == chip)
4073 return &fw_info_array[i];
4074 }
4075 return NULL;
4076}
4077
b8ff05a9
DM
4078/*
4079 * Phase 0 of initialization: contact FW, obtain config, perform basic init.
4080 */
4081static int adap_init0(struct adapter *adap)
4082{
4083 int ret;
4084 u32 v, port_vec;
4085 enum dev_state state;
4086 u32 params[7], val[7];
9a4da2cd 4087 struct fw_caps_config_cmd caps_cmd;
dcf7b6f5 4088 int reset = 1;
b8ff05a9 4089
ae469b68
HS
4090 /* Grab Firmware Device Log parameters as early as possible so we have
4091 * access to it for debugging, etc.
4092 */
4093 ret = t4_init_devlog_params(adap);
4094 if (ret < 0)
4095 return ret;
4096
666224d4 4097 /* Contact FW, advertising Master capability */
c5a8c0f3
HS
4098 ret = t4_fw_hello(adap, adap->mbox, adap->mbox,
4099 is_kdump_kernel() ? MASTER_MUST : MASTER_MAY, &state);
b8ff05a9
DM
4100 if (ret < 0) {
4101 dev_err(adap->pdev_dev, "could not connect to FW, error %d\n",
4102 ret);
4103 return ret;
4104 }
636f9d37
VP
4105 if (ret == adap->mbox)
4106 adap->flags |= MASTER_PF;
b8ff05a9 4107
636f9d37
VP
4108 /*
4109 * If we're the Master PF Driver and the device is uninitialized,
4110 * then let's consider upgrading the firmware ... (We always want
4111 * to check the firmware version number in order to A. get it for
4112 * later reporting and B. to warn if the currently loaded firmware
4113 * is excessively mismatched relative to the driver.)
4114 */
0de72738 4115
760446f9 4116 t4_get_version_info(adap);
a69265e9
HS
4117 ret = t4_check_fw_version(adap);
4118 /* If firmware is too old (not supported by driver) force an update. */
21d11bd6 4119 if (ret)
a69265e9 4120 state = DEV_STATE_UNINIT;
636f9d37 4121 if ((adap->flags & MASTER_PF) && state != DEV_STATE_INIT) {
16e47624
HS
4122 struct fw_info *fw_info;
4123 struct fw_hdr *card_fw;
4124 const struct firmware *fw;
4125 const u8 *fw_data = NULL;
4126 unsigned int fw_size = 0;
4127
4128 /* This is the firmware whose headers the driver was compiled
4129 * against
4130 */
4131 fw_info = find_fw_info(CHELSIO_CHIP_VERSION(adap->params.chip));
4132 if (fw_info == NULL) {
4133 dev_err(adap->pdev_dev,
4134 "unable to get firmware info for chip %d.\n",
4135 CHELSIO_CHIP_VERSION(adap->params.chip));
4136 return -EINVAL;
636f9d37 4137 }
16e47624
HS
4138
4139 /* allocate memory to read the header of the firmware on the
4140 * card
4141 */
752ade68 4142 card_fw = kvzalloc(sizeof(*card_fw), GFP_KERNEL);
d624613e
Y
4143 if (!card_fw) {
4144 ret = -ENOMEM;
4145 goto bye;
4146 }
16e47624
HS
4147
4148 /* Get FW from from /lib/firmware/ */
4149 ret = request_firmware(&fw, fw_info->fw_mod_name,
4150 adap->pdev_dev);
4151 if (ret < 0) {
4152 dev_err(adap->pdev_dev,
4153 "unable to load firmware image %s, error %d\n",
4154 fw_info->fw_mod_name, ret);
4155 } else {
4156 fw_data = fw->data;
4157 fw_size = fw->size;
4158 }
4159
4160 /* upgrade FW logic */
4161 ret = t4_prep_fw(adap, fw_info, fw_data, fw_size, card_fw,
4162 state, &reset);
4163
4164 /* Cleaning up */
0b5b6bee 4165 release_firmware(fw);
752ade68 4166 kvfree(card_fw);
16e47624 4167
636f9d37 4168 if (ret < 0)
16e47624 4169 goto bye;
636f9d37 4170 }
b8ff05a9 4171
636f9d37
VP
4172 /*
4173 * Grab VPD parameters. This should be done after we establish a
4174 * connection to the firmware since some of the VPD parameters
4175 * (notably the Core Clock frequency) are retrieved via requests to
4176 * the firmware. On the other hand, we need these fairly early on
4177 * so we do this right after getting ahold of the firmware.
4178 */
098ef6c2 4179 ret = t4_get_vpd_params(adap, &adap->params.vpd);
a0881cab
DM
4180 if (ret < 0)
4181 goto bye;
a0881cab 4182
636f9d37 4183 /*
13ee15d3
VP
4184 * Find out what ports are available to us. Note that we need to do
4185 * this before calling adap_init0_no_config() since it needs nports
4186 * and portvec ...
636f9d37
VP
4187 */
4188 v =
5167865a
HS
4189 FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
4190 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PORTVEC);
b2612722 4191 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, &v, &port_vec);
a0881cab
DM
4192 if (ret < 0)
4193 goto bye;
4194
636f9d37
VP
4195 adap->params.nports = hweight32(port_vec);
4196 adap->params.portvec = port_vec;
4197
06640310
HS
4198 /* If the firmware is initialized already, emit a simply note to that
4199 * effect. Otherwise, it's time to try initializing the adapter.
636f9d37
VP
4200 */
4201 if (state == DEV_STATE_INIT) {
8b4e6b3c
AV
4202 ret = adap_config_hma(adap);
4203 if (ret)
4204 dev_err(adap->pdev_dev,
4205 "HMA configuration failed with error %d\n",
4206 ret);
636f9d37
VP
4207 dev_info(adap->pdev_dev, "Coming up as %s: "\
4208 "Adapter already initialized\n",
4209 adap->flags & MASTER_PF ? "MASTER" : "SLAVE");
636f9d37
VP
4210 } else {
4211 dev_info(adap->pdev_dev, "Coming up as MASTER: "\
4212 "Initializing adapter\n");
06640310
HS
4213
4214 /* Find out whether we're dealing with a version of the
4215 * firmware which has configuration file support.
636f9d37 4216 */
06640310
HS
4217 params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
4218 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CF));
b2612722 4219 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1,
06640310 4220 params, val);
13ee15d3 4221
06640310
HS
4222 /* If the firmware doesn't support Configuration Files,
4223 * return an error.
4224 */
4225 if (ret < 0) {
4226 dev_err(adap->pdev_dev, "firmware doesn't support "
4227 "Firmware Configuration Files\n");
4228 goto bye;
4229 }
4230
4231 /* The firmware provides us with a memory buffer where we can
4232 * load a Configuration File from the host if we want to
4233 * override the Configuration File in flash.
4234 */
4235 ret = adap_init0_config(adap, reset);
4236 if (ret == -ENOENT) {
4237 dev_err(adap->pdev_dev, "no Configuration File "
4238 "present on adapter.\n");
4239 goto bye;
636f9d37
VP
4240 }
4241 if (ret < 0) {
06640310
HS
4242 dev_err(adap->pdev_dev, "could not initialize "
4243 "adapter, error %d\n", -ret);
636f9d37
VP
4244 goto bye;
4245 }
4246 }
4247
06640310
HS
4248 /* Give the SGE code a chance to pull in anything that it needs ...
4249 * Note that this must be called after we retrieve our VPD parameters
4250 * in order to know how to convert core ticks to seconds, etc.
636f9d37 4251 */
06640310
HS
4252 ret = t4_sge_init(adap);
4253 if (ret < 0)
4254 goto bye;
636f9d37 4255
9a4da2cd
VP
4256 if (is_bypass_device(adap->pdev->device))
4257 adap->params.bypass = 1;
4258
636f9d37
VP
4259 /*
4260 * Grab some of our basic fundamental operating parameters.
4261 */
4262#define FW_PARAM_DEV(param) \
5167865a
HS
4263 (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | \
4264 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_##param))
636f9d37 4265
b8ff05a9 4266#define FW_PARAM_PFVF(param) \
5167865a
HS
4267 FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) | \
4268 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_##param)| \
4269 FW_PARAMS_PARAM_Y_V(0) | \
4270 FW_PARAMS_PARAM_Z_V(0)
b8ff05a9 4271
636f9d37 4272 params[0] = FW_PARAM_PFVF(EQ_START);
b8ff05a9
DM
4273 params[1] = FW_PARAM_PFVF(L2T_START);
4274 params[2] = FW_PARAM_PFVF(L2T_END);
4275 params[3] = FW_PARAM_PFVF(FILTER_START);
4276 params[4] = FW_PARAM_PFVF(FILTER_END);
e46dab4d 4277 params[5] = FW_PARAM_PFVF(IQFLINT_START);
b2612722 4278 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 6, params, val);
b8ff05a9
DM
4279 if (ret < 0)
4280 goto bye;
636f9d37
VP
4281 adap->sge.egr_start = val[0];
4282 adap->l2t_start = val[1];
4283 adap->l2t_end = val[2];
b8ff05a9
DM
4284 adap->tids.ftid_base = val[3];
4285 adap->tids.nftids = val[4] - val[3] + 1;
e46dab4d 4286 adap->sge.ingr_start = val[5];
b8ff05a9 4287
0e249898
AV
4288 if (CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) {
4289 /* Read the raw mps entries. In T6, the last 2 tcam entries
4290 * are reserved for raw mac addresses (rawf = 2, one per port).
4291 */
4292 params[0] = FW_PARAM_PFVF(RAWF_START);
4293 params[1] = FW_PARAM_PFVF(RAWF_END);
4294 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2,
4295 params, val);
4296 if (ret == 0) {
4297 adap->rawf_start = val[0];
4298 adap->rawf_cnt = val[1] - val[0] + 1;
4299 }
4300 }
4301
4b8e27a8
HS
4302 /* qids (ingress/egress) returned from firmware can be anywhere
4303 * in the range from EQ(IQFLINT)_START to EQ(IQFLINT)_END.
4304 * Hence driver needs to allocate memory for this range to
4305 * store the queue info. Get the highest IQFLINT/EQ index returned
4306 * in FW_EQ_*_CMD.alloc command.
4307 */
4308 params[0] = FW_PARAM_PFVF(EQ_END);
4309 params[1] = FW_PARAM_PFVF(IQFLINT_END);
b2612722 4310 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params, val);
4b8e27a8
HS
4311 if (ret < 0)
4312 goto bye;
4313 adap->sge.egr_sz = val[0] - adap->sge.egr_start + 1;
4314 adap->sge.ingr_sz = val[1] - adap->sge.ingr_start + 1;
4315
4316 adap->sge.egr_map = kcalloc(adap->sge.egr_sz,
4317 sizeof(*adap->sge.egr_map), GFP_KERNEL);
4318 if (!adap->sge.egr_map) {
4319 ret = -ENOMEM;
4320 goto bye;
4321 }
4322
4323 adap->sge.ingr_map = kcalloc(adap->sge.ingr_sz,
4324 sizeof(*adap->sge.ingr_map), GFP_KERNEL);
4325 if (!adap->sge.ingr_map) {
4326 ret = -ENOMEM;
4327 goto bye;
4328 }
4329
4330 /* Allocate the memory for the vaious egress queue bitmaps
5b377d11 4331 * ie starving_fl, txq_maperr and blocked_fl.
4b8e27a8
HS
4332 */
4333 adap->sge.starving_fl = kcalloc(BITS_TO_LONGS(adap->sge.egr_sz),
4334 sizeof(long), GFP_KERNEL);
4335 if (!adap->sge.starving_fl) {
4336 ret = -ENOMEM;
4337 goto bye;
4338 }
4339
4340 adap->sge.txq_maperr = kcalloc(BITS_TO_LONGS(adap->sge.egr_sz),
4341 sizeof(long), GFP_KERNEL);
4342 if (!adap->sge.txq_maperr) {
4343 ret = -ENOMEM;
4344 goto bye;
4345 }
4346
5b377d11
HS
4347#ifdef CONFIG_DEBUG_FS
4348 adap->sge.blocked_fl = kcalloc(BITS_TO_LONGS(adap->sge.egr_sz),
4349 sizeof(long), GFP_KERNEL);
4350 if (!adap->sge.blocked_fl) {
4351 ret = -ENOMEM;
4352 goto bye;
4353 }
4354#endif
4355
b5a02f50
AB
4356 params[0] = FW_PARAM_PFVF(CLIP_START);
4357 params[1] = FW_PARAM_PFVF(CLIP_END);
b2612722 4358 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params, val);
b5a02f50
AB
4359 if (ret < 0)
4360 goto bye;
4361 adap->clipt_start = val[0];
4362 adap->clipt_end = val[1];
4363
b72a32da
RL
4364 /* We don't yet have a PARAMs calls to retrieve the number of Traffic
4365 * Classes supported by the hardware/firmware so we hard code it here
4366 * for now.
4367 */
4368 adap->params.nsched_cls = is_t4(adap->params.chip) ? 15 : 16;
4369
636f9d37
VP
4370 /* query params related to active filter region */
4371 params[0] = FW_PARAM_PFVF(ACTIVE_FILTER_START);
4372 params[1] = FW_PARAM_PFVF(ACTIVE_FILTER_END);
b2612722 4373 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params, val);
636f9d37
VP
4374 /* If Active filter size is set we enable establishing
4375 * offload connection through firmware work request
4376 */
4377 if ((val[0] != val[1]) && (ret >= 0)) {
4378 adap->flags |= FW_OFLD_CONN;
4379 adap->tids.aftid_base = val[0];
4380 adap->tids.aftid_end = val[1];
4381 }
4382
b407a4a9
VP
4383 /* If we're running on newer firmware, let it know that we're
4384 * prepared to deal with encapsulated CPL messages. Older
4385 * firmware won't understand this and we'll just get
4386 * unencapsulated messages ...
4387 */
4388 params[0] = FW_PARAM_PFVF(CPLFW4MSG_ENCAP);
4389 val[0] = 1;
b2612722 4390 (void)t4_set_params(adap, adap->mbox, adap->pf, 0, 1, params, val);
b407a4a9 4391
1ac0f095
KS
4392 /*
4393 * Find out whether we're allowed to use the T5+ ULPTX MEMWRITE DSGL
4394 * capability. Earlier versions of the firmware didn't have the
4395 * ULPTX_MEMWRITE_DSGL so we'll interpret a query failure as no
4396 * permission to use ULPTX MEMWRITE DSGL.
4397 */
4398 if (is_t4(adap->params.chip)) {
4399 adap->params.ulptx_memwrite_dsgl = false;
4400 } else {
4401 params[0] = FW_PARAM_DEV(ULPTX_MEMWRITE_DSGL);
b2612722 4402 ret = t4_query_params(adap, adap->mbox, adap->pf, 0,
1ac0f095
KS
4403 1, params, val);
4404 adap->params.ulptx_memwrite_dsgl = (ret == 0 && val[0] != 0);
4405 }
4406
086de575
SW
4407 /* See if FW supports FW_RI_FR_NSMR_TPTE_WR work request */
4408 params[0] = FW_PARAM_DEV(RI_FR_NSMR_TPTE_WR);
4409 ret = t4_query_params(adap, adap->mbox, adap->pf, 0,
4410 1, params, val);
4411 adap->params.fr_nsmr_tpte_wr_support = (ret == 0 && val[0] != 0);
4412
0ff90994
KS
4413 /* See if FW supports FW_FILTER2 work request */
4414 if (is_t4(adap->params.chip)) {
4415 adap->params.filter2_wr_support = 0;
4416 } else {
4417 params[0] = FW_PARAM_DEV(FILTER2_WR);
4418 ret = t4_query_params(adap, adap->mbox, adap->pf, 0,
4419 1, params, val);
4420 adap->params.filter2_wr_support = (ret == 0 && val[0] != 0);
4421 }
4422
636f9d37
VP
4423 /*
4424 * Get device capabilities so we can determine what resources we need
4425 * to manage.
4426 */
4427 memset(&caps_cmd, 0, sizeof(caps_cmd));
e2ac9628
HS
4428 caps_cmd.op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
4429 FW_CMD_REQUEST_F | FW_CMD_READ_F);
ce91a923 4430 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
636f9d37
VP
4431 ret = t4_wr_mbox(adap, adap->mbox, &caps_cmd, sizeof(caps_cmd),
4432 &caps_cmd);
4433 if (ret < 0)
4434 goto bye;
4435
5c31254e
KS
4436 if (caps_cmd.ofldcaps ||
4437 (caps_cmd.niccaps & htons(FW_CAPS_CONFIG_NIC_HASHFILTER))) {
b8ff05a9
DM
4438 /* query offload-related parameters */
4439 params[0] = FW_PARAM_DEV(NTID);
4440 params[1] = FW_PARAM_PFVF(SERVER_START);
4441 params[2] = FW_PARAM_PFVF(SERVER_END);
4442 params[3] = FW_PARAM_PFVF(TDDP_START);
4443 params[4] = FW_PARAM_PFVF(TDDP_END);
4444 params[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
b2612722 4445 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 6,
636f9d37 4446 params, val);
b8ff05a9
DM
4447 if (ret < 0)
4448 goto bye;
4449 adap->tids.ntids = val[0];
4450 adap->tids.natids = min(adap->tids.ntids / 2, MAX_ATIDS);
4451 adap->tids.stid_base = val[1];
4452 adap->tids.nstids = val[2] - val[1] + 1;
636f9d37 4453 /*
dbedd44e 4454 * Setup server filter region. Divide the available filter
636f9d37
VP
4455 * region into two parts. Regular filters get 1/3rd and server
4456 * filters get 2/3rd part. This is only enabled if workarond
4457 * path is enabled.
4458 * 1. For regular filters.
4459 * 2. Server filter: This are special filters which are used
4460 * to redirect SYN packets to offload queue.
4461 */
4462 if (adap->flags & FW_OFLD_CONN && !is_bypass(adap)) {
4463 adap->tids.sftid_base = adap->tids.ftid_base +
4464 DIV_ROUND_UP(adap->tids.nftids, 3);
4465 adap->tids.nsftids = adap->tids.nftids -
4466 DIV_ROUND_UP(adap->tids.nftids, 3);
4467 adap->tids.nftids = adap->tids.sftid_base -
4468 adap->tids.ftid_base;
4469 }
b8ff05a9
DM
4470 adap->vres.ddp.start = val[3];
4471 adap->vres.ddp.size = val[4] - val[3] + 1;
4472 adap->params.ofldq_wr_cred = val[5];
636f9d37 4473
5c31254e 4474 if (caps_cmd.niccaps & htons(FW_CAPS_CONFIG_NIC_HASHFILTER)) {
004c3cf1
WY
4475 ret = init_hash_filter(adap);
4476 if (ret < 0)
5c31254e
KS
4477 goto bye;
4478 } else {
4479 adap->params.offload = 1;
4480 adap->num_ofld_uld += 1;
4481 }
b8ff05a9 4482 }
636f9d37 4483 if (caps_cmd.rdmacaps) {
b8ff05a9
DM
4484 params[0] = FW_PARAM_PFVF(STAG_START);
4485 params[1] = FW_PARAM_PFVF(STAG_END);
4486 params[2] = FW_PARAM_PFVF(RQ_START);
4487 params[3] = FW_PARAM_PFVF(RQ_END);
4488 params[4] = FW_PARAM_PFVF(PBL_START);
4489 params[5] = FW_PARAM_PFVF(PBL_END);
b2612722 4490 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 6,
636f9d37 4491 params, val);
b8ff05a9
DM
4492 if (ret < 0)
4493 goto bye;
4494 adap->vres.stag.start = val[0];
4495 adap->vres.stag.size = val[1] - val[0] + 1;
4496 adap->vres.rq.start = val[2];
4497 adap->vres.rq.size = val[3] - val[2] + 1;
4498 adap->vres.pbl.start = val[4];
4499 adap->vres.pbl.size = val[5] - val[4] + 1;
a0881cab 4500
c68644ef
RR
4501 params[0] = FW_PARAM_PFVF(SRQ_START);
4502 params[1] = FW_PARAM_PFVF(SRQ_END);
4503 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2,
4504 params, val);
4505 if (!ret) {
4506 adap->vres.srq.start = val[0];
4507 adap->vres.srq.size = val[1] - val[0] + 1;
4508 }
4509 if (adap->vres.srq.size) {
4510 adap->srq = t4_init_srq(adap->vres.srq.size);
4511 if (!adap->srq)
4512 dev_warn(&adap->pdev->dev, "could not allocate SRQ, continuing\n");
4513 }
4514
a0881cab
DM
4515 params[0] = FW_PARAM_PFVF(SQRQ_START);
4516 params[1] = FW_PARAM_PFVF(SQRQ_END);
4517 params[2] = FW_PARAM_PFVF(CQ_START);
4518 params[3] = FW_PARAM_PFVF(CQ_END);
1ae970e0
DM
4519 params[4] = FW_PARAM_PFVF(OCQ_START);
4520 params[5] = FW_PARAM_PFVF(OCQ_END);
b2612722 4521 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 6, params,
5c937dd3 4522 val);
a0881cab
DM
4523 if (ret < 0)
4524 goto bye;
4525 adap->vres.qp.start = val[0];
4526 adap->vres.qp.size = val[1] - val[0] + 1;
4527 adap->vres.cq.start = val[2];
4528 adap->vres.cq.size = val[3] - val[2] + 1;
1ae970e0
DM
4529 adap->vres.ocq.start = val[4];
4530 adap->vres.ocq.size = val[5] - val[4] + 1;
4c2c5763
HS
4531
4532 params[0] = FW_PARAM_DEV(MAXORDIRD_QP);
4533 params[1] = FW_PARAM_DEV(MAXIRD_ADAPTER);
b2612722 4534 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params,
5c937dd3 4535 val);
4c2c5763
HS
4536 if (ret < 0) {
4537 adap->params.max_ordird_qp = 8;
4538 adap->params.max_ird_adapter = 32 * adap->tids.ntids;
4539 ret = 0;
4540 } else {
4541 adap->params.max_ordird_qp = val[0];
4542 adap->params.max_ird_adapter = val[1];
4543 }
4544 dev_info(adap->pdev_dev,
4545 "max_ordird_qp %d max_ird_adapter %d\n",
4546 adap->params.max_ordird_qp,
4547 adap->params.max_ird_adapter);
43db9296
RR
4548
4549 /* Enable write_with_immediate if FW supports it */
4550 params[0] = FW_PARAM_DEV(RDMA_WRITE_WITH_IMM);
4551 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, params,
4552 val);
4553 adap->params.write_w_imm_support = (ret == 0 && val[0] != 0);
f3910c62
RR
4554
4555 /* Enable write_cmpl if FW supports it */
4556 params[0] = FW_PARAM_DEV(RI_WRITE_CMPL_WR);
4557 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, params,
4558 val);
4559 adap->params.write_cmpl_support = (ret == 0 && val[0] != 0);
0fbc81b3 4560 adap->num_ofld_uld += 2;
b8ff05a9 4561 }
636f9d37 4562 if (caps_cmd.iscsicaps) {
b8ff05a9
DM
4563 params[0] = FW_PARAM_PFVF(ISCSI_START);
4564 params[1] = FW_PARAM_PFVF(ISCSI_END);
b2612722 4565 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2,
636f9d37 4566 params, val);
b8ff05a9
DM
4567 if (ret < 0)
4568 goto bye;
4569 adap->vres.iscsi.start = val[0];
4570 adap->vres.iscsi.size = val[1] - val[0] + 1;
0fbc81b3
HS
4571 /* LIO target and cxgb4i initiaitor */
4572 adap->num_ofld_uld += 2;
b8ff05a9 4573 }
94cdb8bb 4574 if (caps_cmd.cryptocaps) {
e383f248
AG
4575 if (ntohs(caps_cmd.cryptocaps) &
4576 FW_CAPS_CONFIG_CRYPTO_LOOKASIDE) {
4577 params[0] = FW_PARAM_PFVF(NCRYPTO_LOOKASIDE);
4578 ret = t4_query_params(adap, adap->mbox, adap->pf, 0,
4579 2, params, val);
4580 if (ret < 0) {
4581 if (ret != -EINVAL)
4582 goto bye;
4583 } else {
4584 adap->vres.ncrypto_fc = val[0];
4585 }
4586 adap->num_ofld_uld += 1;
4587 }
4588 if (ntohs(caps_cmd.cryptocaps) &
4589 FW_CAPS_CONFIG_TLS_INLINE) {
4590 params[0] = FW_PARAM_PFVF(TLS_START);
4591 params[1] = FW_PARAM_PFVF(TLS_END);
4592 ret = t4_query_params(adap, adap->mbox, adap->pf, 0,
4593 2, params, val);
4594 if (ret < 0)
72a56ca9 4595 goto bye;
e383f248
AG
4596 adap->vres.key.start = val[0];
4597 adap->vres.key.size = val[1] - val[0] + 1;
4598 adap->num_uld += 1;
72a56ca9 4599 }
a6ec572b 4600 adap->params.crypto = ntohs(caps_cmd.cryptocaps);
94cdb8bb 4601 }
b8ff05a9
DM
4602#undef FW_PARAM_PFVF
4603#undef FW_PARAM_DEV
4604
92e7ae71
HS
4605 /* The MTU/MSS Table is initialized by now, so load their values. If
4606 * we're initializing the adapter, then we'll make any modifications
4607 * we want to the MTU/MSS Table and also initialize the congestion
4608 * parameters.
636f9d37 4609 */
b8ff05a9 4610 t4_read_mtu_tbl(adap, adap->params.mtus, NULL);
92e7ae71
HS
4611 if (state != DEV_STATE_INIT) {
4612 int i;
4613
4614 /* The default MTU Table contains values 1492 and 1500.
4615 * However, for TCP, it's better to have two values which are
4616 * a multiple of 8 +/- 4 bytes apart near this popular MTU.
4617 * This allows us to have a TCP Data Payload which is a
4618 * multiple of 8 regardless of what combination of TCP Options
4619 * are in use (always a multiple of 4 bytes) which is
4620 * important for performance reasons. For instance, if no
4621 * options are in use, then we have a 20-byte IP header and a
4622 * 20-byte TCP header. In this case, a 1500-byte MSS would
4623 * result in a TCP Data Payload of 1500 - 40 == 1460 bytes
4624 * which is not a multiple of 8. So using an MSS of 1488 in
4625 * this case results in a TCP Data Payload of 1448 bytes which
4626 * is a multiple of 8. On the other hand, if 12-byte TCP Time
4627 * Stamps have been negotiated, then an MTU of 1500 bytes
4628 * results in a TCP Data Payload of 1448 bytes which, as
4629 * above, is a multiple of 8 bytes ...
4630 */
4631 for (i = 0; i < NMTUS; i++)
4632 if (adap->params.mtus[i] == 1492) {
4633 adap->params.mtus[i] = 1488;
4634 break;
4635 }
7ee9ff94 4636
92e7ae71
HS
4637 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
4638 adap->params.b_wnd);
4639 }
df64e4d3 4640 t4_init_sge_params(adap);
636f9d37 4641 adap->flags |= FW_OK;
5ccf9d04 4642 t4_init_tp_params(adap, true);
b8ff05a9
DM
4643 return 0;
4644
4645 /*
636f9d37
VP
4646 * Something bad happened. If a command timed out or failed with EIO
4647 * FW does not operate within its spec or something catastrophic
4648 * happened to HW/FW, stop issuing commands.
b8ff05a9 4649 */
636f9d37 4650bye:
8b4e6b3c 4651 adap_free_hma_mem(adap);
4b8e27a8
HS
4652 kfree(adap->sge.egr_map);
4653 kfree(adap->sge.ingr_map);
4654 kfree(adap->sge.starving_fl);
4655 kfree(adap->sge.txq_maperr);
5b377d11
HS
4656#ifdef CONFIG_DEBUG_FS
4657 kfree(adap->sge.blocked_fl);
4658#endif
636f9d37
VP
4659 if (ret != -ETIMEDOUT && ret != -EIO)
4660 t4_fw_bye(adap, adap->mbox);
b8ff05a9
DM
4661 return ret;
4662}
4663
204dc3c0
DM
4664/* EEH callbacks */
4665
4666static pci_ers_result_t eeh_err_detected(struct pci_dev *pdev,
4667 pci_channel_state_t state)
4668{
4669 int i;
4670 struct adapter *adap = pci_get_drvdata(pdev);
4671
4672 if (!adap)
4673 goto out;
4674
4675 rtnl_lock();
4676 adap->flags &= ~FW_OK;
4677 notify_ulds(adap, CXGB4_STATE_START_RECOVERY);
9fe6cb58 4678 spin_lock(&adap->stats_lock);
204dc3c0
DM
4679 for_each_port(adap, i) {
4680 struct net_device *dev = adap->port[i];
025d0973
GP
4681 if (dev) {
4682 netif_device_detach(dev);
4683 netif_carrier_off(dev);
4684 }
204dc3c0 4685 }
9fe6cb58 4686 spin_unlock(&adap->stats_lock);
b37987e8 4687 disable_interrupts(adap);
204dc3c0
DM
4688 if (adap->flags & FULL_INIT_DONE)
4689 cxgb_down(adap);
4690 rtnl_unlock();
144be3d9
GS
4691 if ((adap->flags & DEV_ENABLED)) {
4692 pci_disable_device(pdev);
4693 adap->flags &= ~DEV_ENABLED;
4694 }
204dc3c0
DM
4695out: return state == pci_channel_io_perm_failure ?
4696 PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET;
4697}
4698
4699static pci_ers_result_t eeh_slot_reset(struct pci_dev *pdev)
4700{
4701 int i, ret;
4702 struct fw_caps_config_cmd c;
4703 struct adapter *adap = pci_get_drvdata(pdev);
4704
4705 if (!adap) {
4706 pci_restore_state(pdev);
4707 pci_save_state(pdev);
4708 return PCI_ERS_RESULT_RECOVERED;
4709 }
4710
144be3d9
GS
4711 if (!(adap->flags & DEV_ENABLED)) {
4712 if (pci_enable_device(pdev)) {
4713 dev_err(&pdev->dev, "Cannot reenable PCI "
4714 "device after reset\n");
4715 return PCI_ERS_RESULT_DISCONNECT;
4716 }
4717 adap->flags |= DEV_ENABLED;
204dc3c0
DM
4718 }
4719
4720 pci_set_master(pdev);
4721 pci_restore_state(pdev);
4722 pci_save_state(pdev);
4723 pci_cleanup_aer_uncorrect_error_status(pdev);
4724
8203b509 4725 if (t4_wait_dev_ready(adap->regs) < 0)
204dc3c0 4726 return PCI_ERS_RESULT_DISCONNECT;
b2612722 4727 if (t4_fw_hello(adap, adap->mbox, adap->pf, MASTER_MUST, NULL) < 0)
204dc3c0
DM
4728 return PCI_ERS_RESULT_DISCONNECT;
4729 adap->flags |= FW_OK;
4730 if (adap_init1(adap, &c))
4731 return PCI_ERS_RESULT_DISCONNECT;
4732
4733 for_each_port(adap, i) {
4734 struct port_info *p = adap2pinfo(adap, i);
4735
b2612722 4736 ret = t4_alloc_vi(adap, adap->mbox, p->tx_chan, adap->pf, 0, 1,
060e0c75 4737 NULL, NULL);
204dc3c0
DM
4738 if (ret < 0)
4739 return PCI_ERS_RESULT_DISCONNECT;
4740 p->viid = ret;
4741 p->xact_addr_filt = -1;
4742 }
4743
4744 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
4745 adap->params.b_wnd);
1ae970e0 4746 setup_memwin(adap);
204dc3c0
DM
4747 if (cxgb_up(adap))
4748 return PCI_ERS_RESULT_DISCONNECT;
4749 return PCI_ERS_RESULT_RECOVERED;
4750}
4751
4752static void eeh_resume(struct pci_dev *pdev)
4753{
4754 int i;
4755 struct adapter *adap = pci_get_drvdata(pdev);
4756
4757 if (!adap)
4758 return;
4759
4760 rtnl_lock();
4761 for_each_port(adap, i) {
4762 struct net_device *dev = adap->port[i];
025d0973
GP
4763 if (dev) {
4764 if (netif_running(dev)) {
4765 link_start(dev);
4766 cxgb_set_rxmode(dev);
4767 }
4768 netif_device_attach(dev);
204dc3c0 4769 }
204dc3c0
DM
4770 }
4771 rtnl_unlock();
4772}
4773
3646f0e5 4774static const struct pci_error_handlers cxgb4_eeh = {
204dc3c0
DM
4775 .error_detected = eeh_err_detected,
4776 .slot_reset = eeh_slot_reset,
4777 .resume = eeh_resume,
4778};
4779
9b86a8d1
HS
4780/* Return true if the Link Configuration supports "High Speeds" (those greater
4781 * than 1Gb/s).
4782 */
57d8b764 4783static inline bool is_x_10g_port(const struct link_config *lc)
b8ff05a9 4784{
9b86a8d1
HS
4785 unsigned int speeds, high_speeds;
4786
c3168cab
GG
4787 speeds = FW_PORT_CAP32_SPEED_V(FW_PORT_CAP32_SPEED_G(lc->pcaps));
4788 high_speeds = speeds &
4789 ~(FW_PORT_CAP32_SPEED_100M | FW_PORT_CAP32_SPEED_1G);
9b86a8d1
HS
4790
4791 return high_speeds != 0;
b8ff05a9
DM
4792}
4793
b8ff05a9
DM
4794/*
4795 * Perform default configuration of DMA queues depending on the number and type
4796 * of ports we found and the number of available CPUs. Most settings can be
4797 * modified by the admin prior to actual use.
4798 */
91744948 4799static void cfg_queues(struct adapter *adap)
b8ff05a9
DM
4800{
4801 struct sge *s = &adap->sge;
ab677ff4 4802 int i = 0, n10g = 0, qidx = 0;
688848b1
AB
4803#ifndef CONFIG_CHELSIO_T4_DCB
4804 int q10g = 0;
4805#endif
b8ff05a9 4806
94cdb8bb
HS
4807 /* Reduce memory usage in kdump environment, disable all offload.
4808 */
85eacf3f 4809 if (is_kdump_kernel() || (is_uld(adap) && t4_uld_mem_alloc(adap))) {
0fbc81b3 4810 adap->params.offload = 0;
94cdb8bb
HS
4811 adap->params.crypto = 0;
4812 }
4813
ab677ff4 4814 n10g += is_x_10g_port(&adap2pinfo(adap, i)->link_cfg);
688848b1
AB
4815#ifdef CONFIG_CHELSIO_T4_DCB
4816 /* For Data Center Bridging support we need to be able to support up
4817 * to 8 Traffic Priorities; each of which will be assigned to its
4818 * own TX Queue in order to prevent Head-Of-Line Blocking.
4819 */
4820 if (adap->params.nports * 8 > MAX_ETH_QSETS) {
4821 dev_err(adap->pdev_dev, "MAX_ETH_QSETS=%d < %d!\n",
4822 MAX_ETH_QSETS, adap->params.nports * 8);
4823 BUG_ON(1);
4824 }
b8ff05a9 4825
688848b1
AB
4826 for_each_port(adap, i) {
4827 struct port_info *pi = adap2pinfo(adap, i);
4828
4829 pi->first_qset = qidx;
85eacf3f 4830 pi->nqsets = is_kdump_kernel() ? 1 : 8;
688848b1
AB
4831 qidx += pi->nqsets;
4832 }
4833#else /* !CONFIG_CHELSIO_T4_DCB */
b8ff05a9
DM
4834 /*
4835 * We default to 1 queue per non-10G port and up to # of cores queues
4836 * per 10G port.
4837 */
4838 if (n10g)
4839 q10g = (MAX_ETH_QSETS - (adap->params.nports - n10g)) / n10g;
5952dde7
YM
4840 if (q10g > netif_get_num_default_rss_queues())
4841 q10g = netif_get_num_default_rss_queues();
b8ff05a9 4842
85eacf3f
GG
4843 if (is_kdump_kernel())
4844 q10g = 1;
4845
b8ff05a9
DM
4846 for_each_port(adap, i) {
4847 struct port_info *pi = adap2pinfo(adap, i);
4848
4849 pi->first_qset = qidx;
57d8b764 4850 pi->nqsets = is_x_10g_port(&pi->link_cfg) ? q10g : 1;
b8ff05a9
DM
4851 qidx += pi->nqsets;
4852 }
688848b1 4853#endif /* !CONFIG_CHELSIO_T4_DCB */
b8ff05a9
DM
4854
4855 s->ethqsets = qidx;
4856 s->max_ethqsets = qidx; /* MSI-X may lower it later */
4857
0fbc81b3 4858 if (is_uld(adap)) {
b8ff05a9
DM
4859 /*
4860 * For offload we use 1 queue/channel if all ports are up to 1G,
4861 * otherwise we divide all available queues amongst the channels
4862 * capped by the number of available cores.
4863 */
4864 if (n10g) {
a56177e1 4865 i = min_t(int, MAX_OFLD_QSETS, num_online_cpus());
0fbc81b3
HS
4866 s->ofldqsets = roundup(i, adap->params.nports);
4867 } else {
4868 s->ofldqsets = adap->params.nports;
4869 }
b8ff05a9
DM
4870 }
4871
4872 for (i = 0; i < ARRAY_SIZE(s->ethrxq); i++) {
4873 struct sge_eth_rxq *r = &s->ethrxq[i];
4874
c887ad0e 4875 init_rspq(adap, &r->rspq, 5, 10, 1024, 64);
b8ff05a9
DM
4876 r->fl.size = 72;
4877 }
4878
4879 for (i = 0; i < ARRAY_SIZE(s->ethtxq); i++)
4880 s->ethtxq[i].q.size = 1024;
4881
4882 for (i = 0; i < ARRAY_SIZE(s->ctrlq); i++)
4883 s->ctrlq[i].q.size = 512;
4884
a4569504
AG
4885 if (!is_t4(adap->params.chip))
4886 s->ptptxq.q.size = 8;
4887
c887ad0e 4888 init_rspq(adap, &s->fw_evtq, 0, 1, 1024, 64);
0fbc81b3 4889 init_rspq(adap, &s->intrq, 0, 1, 512, 64);
b8ff05a9
DM
4890}
4891
4892/*
4893 * Reduce the number of Ethernet queues across all ports to at most n.
4894 * n provides at least one queue per port.
4895 */
91744948 4896static void reduce_ethqs(struct adapter *adap, int n)
b8ff05a9
DM
4897{
4898 int i;
4899 struct port_info *pi;
4900
4901 while (n < adap->sge.ethqsets)
4902 for_each_port(adap, i) {
4903 pi = adap2pinfo(adap, i);
4904 if (pi->nqsets > 1) {
4905 pi->nqsets--;
4906 adap->sge.ethqsets--;
4907 if (adap->sge.ethqsets <= n)
4908 break;
4909 }
4910 }
4911
4912 n = 0;
4913 for_each_port(adap, i) {
4914 pi = adap2pinfo(adap, i);
4915 pi->first_qset = n;
4916 n += pi->nqsets;
4917 }
4918}
4919
94cdb8bb
HS
4920static int get_msix_info(struct adapter *adap)
4921{
4922 struct uld_msix_info *msix_info;
0fbc81b3
HS
4923 unsigned int max_ingq = 0;
4924
4925 if (is_offload(adap))
4926 max_ingq += MAX_OFLD_QSETS * adap->num_ofld_uld;
4927 if (is_pci_uld(adap))
4928 max_ingq += MAX_OFLD_QSETS * adap->num_uld;
4929
4930 if (!max_ingq)
4931 goto out;
94cdb8bb
HS
4932
4933 msix_info = kcalloc(max_ingq, sizeof(*msix_info), GFP_KERNEL);
4934 if (!msix_info)
4935 return -ENOMEM;
4936
4937 adap->msix_bmap_ulds.msix_bmap = kcalloc(BITS_TO_LONGS(max_ingq),
4938 sizeof(long), GFP_KERNEL);
4939 if (!adap->msix_bmap_ulds.msix_bmap) {
4940 kfree(msix_info);
4941 return -ENOMEM;
4942 }
4943 spin_lock_init(&adap->msix_bmap_ulds.lock);
4944 adap->msix_info_ulds = msix_info;
0fbc81b3 4945out:
94cdb8bb
HS
4946 return 0;
4947}
4948
4949static void free_msix_info(struct adapter *adap)
4950{
0fbc81b3 4951 if (!(adap->num_uld && adap->num_ofld_uld))
94cdb8bb
HS
4952 return;
4953
4954 kfree(adap->msix_info_ulds);
4955 kfree(adap->msix_bmap_ulds.msix_bmap);
4956}
4957
b8ff05a9
DM
4958/* 2 MSI-X vectors needed for the FW queue and non-data interrupts */
4959#define EXTRA_VECS 2
4960
91744948 4961static int enable_msix(struct adapter *adap)
b8ff05a9 4962{
94cdb8bb
HS
4963 int ofld_need = 0, uld_need = 0;
4964 int i, j, want, need, allocated;
b8ff05a9
DM
4965 struct sge *s = &adap->sge;
4966 unsigned int nchan = adap->params.nports;
f36e58e5 4967 struct msix_entry *entries;
94cdb8bb 4968 int max_ingq = MAX_INGQ;
f36e58e5 4969
0fbc81b3
HS
4970 if (is_pci_uld(adap))
4971 max_ingq += (MAX_OFLD_QSETS * adap->num_uld);
4972 if (is_offload(adap))
4973 max_ingq += (MAX_OFLD_QSETS * adap->num_ofld_uld);
94cdb8bb 4974 entries = kmalloc(sizeof(*entries) * (max_ingq + 1),
f36e58e5
HS
4975 GFP_KERNEL);
4976 if (!entries)
4977 return -ENOMEM;
b8ff05a9 4978
94cdb8bb 4979 /* map for msix */
0fbc81b3
HS
4980 if (get_msix_info(adap)) {
4981 adap->params.offload = 0;
94cdb8bb 4982 adap->params.crypto = 0;
0fbc81b3 4983 }
94cdb8bb
HS
4984
4985 for (i = 0; i < max_ingq + 1; ++i)
b8ff05a9
DM
4986 entries[i].entry = i;
4987
4988 want = s->max_ethqsets + EXTRA_VECS;
4989 if (is_offload(adap)) {
0fbc81b3
HS
4990 want += adap->num_ofld_uld * s->ofldqsets;
4991 ofld_need = adap->num_ofld_uld * nchan;
b8ff05a9 4992 }
94cdb8bb 4993 if (is_pci_uld(adap)) {
0fbc81b3
HS
4994 want += adap->num_uld * s->ofldqsets;
4995 uld_need = adap->num_uld * nchan;
94cdb8bb 4996 }
688848b1
AB
4997#ifdef CONFIG_CHELSIO_T4_DCB
4998 /* For Data Center Bridging we need 8 Ethernet TX Priority Queues for
4999 * each port.
5000 */
94cdb8bb 5001 need = 8 * adap->params.nports + EXTRA_VECS + ofld_need + uld_need;
688848b1 5002#else
94cdb8bb 5003 need = adap->params.nports + EXTRA_VECS + ofld_need + uld_need;
688848b1 5004#endif
f36e58e5
HS
5005 allocated = pci_enable_msix_range(adap->pdev, entries, need, want);
5006 if (allocated < 0) {
5007 dev_info(adap->pdev_dev, "not enough MSI-X vectors left,"
5008 " not using MSI-X\n");
5009 kfree(entries);
5010 return allocated;
5011 }
b8ff05a9 5012
f36e58e5 5013 /* Distribute available vectors to the various queue groups.
c32ad224
AG
5014 * Every group gets its minimum requirement and NIC gets top
5015 * priority for leftovers.
5016 */
94cdb8bb 5017 i = allocated - EXTRA_VECS - ofld_need - uld_need;
c32ad224
AG
5018 if (i < s->max_ethqsets) {
5019 s->max_ethqsets = i;
5020 if (i < s->ethqsets)
5021 reduce_ethqs(adap, i);
5022 }
0fbc81b3 5023 if (is_uld(adap)) {
94cdb8bb
HS
5024 if (allocated < want)
5025 s->nqs_per_uld = nchan;
5026 else
0fbc81b3 5027 s->nqs_per_uld = s->ofldqsets;
94cdb8bb
HS
5028 }
5029
0fbc81b3 5030 for (i = 0; i < (s->max_ethqsets + EXTRA_VECS); ++i)
c32ad224 5031 adap->msix_info[i].vec = entries[i].vector;
0fbc81b3
HS
5032 if (is_uld(adap)) {
5033 for (j = 0 ; i < allocated; ++i, j++) {
94cdb8bb 5034 adap->msix_info_ulds[j].vec = entries[i].vector;
0fbc81b3
HS
5035 adap->msix_info_ulds[j].idx = i;
5036 }
94cdb8bb
HS
5037 adap->msix_bmap_ulds.mapsize = j;
5038 }
43eb4e82 5039 dev_info(adap->pdev_dev, "%d MSI-X vectors allocated, "
0fbc81b3
HS
5040 "nic %d per uld %d\n",
5041 allocated, s->max_ethqsets, s->nqs_per_uld);
c32ad224 5042
f36e58e5 5043 kfree(entries);
c32ad224 5044 return 0;
b8ff05a9
DM
5045}
5046
5047#undef EXTRA_VECS
5048
91744948 5049static int init_rss(struct adapter *adap)
671b0060 5050{
c035e183
HS
5051 unsigned int i;
5052 int err;
5053
5054 err = t4_init_rss_mode(adap, adap->mbox);
5055 if (err)
5056 return err;
671b0060
DM
5057
5058 for_each_port(adap, i) {
5059 struct port_info *pi = adap2pinfo(adap, i);
5060
5061 pi->rss = kcalloc(pi->rss_size, sizeof(u16), GFP_KERNEL);
5062 if (!pi->rss)
5063 return -ENOMEM;
671b0060
DM
5064 }
5065 return 0;
5066}
5067
547fd272
HS
5068static int cxgb4_get_pcie_dev_link_caps(struct adapter *adap,
5069 enum pci_bus_speed *speed,
5070 enum pcie_link_width *width)
5071{
5072 u32 lnkcap1, lnkcap2;
5073 int err1, err2;
5074
5075#define PCIE_MLW_CAP_SHIFT 4 /* start of MLW mask in link capabilities */
5076
5077 *speed = PCI_SPEED_UNKNOWN;
5078 *width = PCIE_LNK_WIDTH_UNKNOWN;
5079
5080 err1 = pcie_capability_read_dword(adap->pdev, PCI_EXP_LNKCAP,
5081 &lnkcap1);
5082 err2 = pcie_capability_read_dword(adap->pdev, PCI_EXP_LNKCAP2,
5083 &lnkcap2);
5084 if (!err2 && lnkcap2) { /* PCIe r3.0-compliant */
5085 if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_8_0GB)
5086 *speed = PCIE_SPEED_8_0GT;
5087 else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_5_0GB)
5088 *speed = PCIE_SPEED_5_0GT;
5089 else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_2_5GB)
5090 *speed = PCIE_SPEED_2_5GT;
5091 }
5092 if (!err1) {
5093 *width = (lnkcap1 & PCI_EXP_LNKCAP_MLW) >> PCIE_MLW_CAP_SHIFT;
5094 if (!lnkcap2) { /* pre-r3.0 */
5095 if (lnkcap1 & PCI_EXP_LNKCAP_SLS_5_0GB)
5096 *speed = PCIE_SPEED_5_0GT;
5097 else if (lnkcap1 & PCI_EXP_LNKCAP_SLS_2_5GB)
5098 *speed = PCIE_SPEED_2_5GT;
5099 }
5100 }
5101
5102 if (*speed == PCI_SPEED_UNKNOWN || *width == PCIE_LNK_WIDTH_UNKNOWN)
5103 return err1 ? err1 : err2 ? err2 : -EINVAL;
5104 return 0;
5105}
5106
5107static void cxgb4_check_pcie_caps(struct adapter *adap)
5108{
5109 enum pcie_link_width width, width_cap;
5110 enum pci_bus_speed speed, speed_cap;
5111
5112#define PCIE_SPEED_STR(speed) \
5113 (speed == PCIE_SPEED_8_0GT ? "8.0GT/s" : \
5114 speed == PCIE_SPEED_5_0GT ? "5.0GT/s" : \
5115 speed == PCIE_SPEED_2_5GT ? "2.5GT/s" : \
5116 "Unknown")
5117
5118 if (cxgb4_get_pcie_dev_link_caps(adap, &speed_cap, &width_cap)) {
5119 dev_warn(adap->pdev_dev,
5120 "Unable to determine PCIe device BW capabilities\n");
5121 return;
5122 }
5123
5124 if (pcie_get_minimum_link(adap->pdev, &speed, &width) ||
5125 speed == PCI_SPEED_UNKNOWN || width == PCIE_LNK_WIDTH_UNKNOWN) {
5126 dev_warn(adap->pdev_dev,
5127 "Unable to determine PCI Express bandwidth.\n");
5128 return;
5129 }
5130
5131 dev_info(adap->pdev_dev, "PCIe link speed is %s, device supports %s\n",
5132 PCIE_SPEED_STR(speed), PCIE_SPEED_STR(speed_cap));
5133 dev_info(adap->pdev_dev, "PCIe link width is x%d, device supports x%d\n",
5134 width, width_cap);
5135 if (speed < speed_cap || width < width_cap)
5136 dev_info(adap->pdev_dev,
5137 "A slot with more lanes and/or higher speed is "
5138 "suggested for optimal performance.\n");
5139}
5140
0de72738
HS
5141/* Dump basic information about the adapter */
5142static void print_adapter_info(struct adapter *adapter)
5143{
760446f9
GG
5144 /* Hardware/Firmware/etc. Version/Revision IDs */
5145 t4_dump_version_info(adapter);
0de72738
HS
5146
5147 /* Software/Hardware configuration */
5148 dev_info(adapter->pdev_dev, "Configuration: %sNIC %s, %s capable\n",
5149 is_offload(adapter) ? "R" : "",
5150 ((adapter->flags & USING_MSIX) ? "MSI-X" :
5151 (adapter->flags & USING_MSI) ? "MSI" : ""),
5152 is_offload(adapter) ? "Offload" : "non-Offload");
5153}
5154
91744948 5155static void print_port_info(const struct net_device *dev)
b8ff05a9 5156{
b8ff05a9 5157 char buf[80];
118969ed 5158 char *bufp = buf;
f1a051b9 5159 const char *spd = "";
118969ed
DM
5160 const struct port_info *pi = netdev_priv(dev);
5161 const struct adapter *adap = pi->adapter;
f1a051b9
DM
5162
5163 if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_2_5GB)
5164 spd = " 2.5 GT/s";
5165 else if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_5_0GB)
5166 spd = " 5 GT/s";
d2e752db
RD
5167 else if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_8_0GB)
5168 spd = " 8 GT/s";
b8ff05a9 5169
c3168cab 5170 if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_100M)
5e78f7fd 5171 bufp += sprintf(bufp, "100M/");
c3168cab 5172 if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_1G)
5e78f7fd 5173 bufp += sprintf(bufp, "1G/");
c3168cab 5174 if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_10G)
118969ed 5175 bufp += sprintf(bufp, "10G/");
c3168cab 5176 if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_25G)
9b86a8d1 5177 bufp += sprintf(bufp, "25G/");
c3168cab 5178 if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_40G)
72aca4bf 5179 bufp += sprintf(bufp, "40G/");
c3168cab
GG
5180 if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_50G)
5181 bufp += sprintf(bufp, "50G/");
5182 if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_100G)
9b86a8d1 5183 bufp += sprintf(bufp, "100G/");
c3168cab
GG
5184 if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_200G)
5185 bufp += sprintf(bufp, "200G/");
5186 if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_400G)
5187 bufp += sprintf(bufp, "400G/");
118969ed
DM
5188 if (bufp != buf)
5189 --bufp;
72aca4bf 5190 sprintf(bufp, "BASE-%s", t4_get_port_type_description(pi->port_type));
118969ed 5191
0de72738
HS
5192 netdev_info(dev, "%s: Chelsio %s (%s) %s\n",
5193 dev->name, adap->params.vpd.id, adap->name, buf);
b8ff05a9
DM
5194}
5195
06546391
DM
5196/*
5197 * Free the following resources:
5198 * - memory used for tables
5199 * - MSI/MSI-X
5200 * - net devices
5201 * - resources FW is holding for us
5202 */
5203static void free_some_resources(struct adapter *adapter)
5204{
5205 unsigned int i;
5206
0e249898 5207 kvfree(adapter->mps_encap);
3bdb376e 5208 kvfree(adapter->smt);
752ade68 5209 kvfree(adapter->l2t);
c68644ef 5210 kvfree(adapter->srq);
b72a32da 5211 t4_cleanup_sched(adapter);
752ade68 5212 kvfree(adapter->tids.tid_tab);
e0f911c8 5213 cxgb4_cleanup_tc_flower(adapter);
d8931847 5214 cxgb4_cleanup_tc_u32(adapter);
4b8e27a8
HS
5215 kfree(adapter->sge.egr_map);
5216 kfree(adapter->sge.ingr_map);
5217 kfree(adapter->sge.starving_fl);
5218 kfree(adapter->sge.txq_maperr);
5b377d11
HS
5219#ifdef CONFIG_DEBUG_FS
5220 kfree(adapter->sge.blocked_fl);
5221#endif
06546391
DM
5222 disable_msi(adapter);
5223
5224 for_each_port(adapter, i)
671b0060 5225 if (adapter->port[i]) {
4f3a0fcf
HS
5226 struct port_info *pi = adap2pinfo(adapter, i);
5227
5228 if (pi->viid != 0)
5229 t4_free_vi(adapter, adapter->mbox, adapter->pf,
5230 0, pi->viid);
671b0060 5231 kfree(adap2pinfo(adapter, i)->rss);
06546391 5232 free_netdev(adapter->port[i]);
671b0060 5233 }
06546391 5234 if (adapter->flags & FW_OK)
b2612722 5235 t4_fw_bye(adapter, adapter->pf);
06546391
DM
5236}
5237
2ed28baa 5238#define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
35d35682 5239#define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \
b8ff05a9 5240 NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
22adfe0a 5241#define SEGMENT_SIZE 128
b8ff05a9 5242
e8d45292 5243static int t4_get_chip_type(struct adapter *adap, int ver)
d86bd29e 5244{
e8d45292 5245 u32 pl_rev = REV_G(t4_read_reg(adap, PL_REV_A));
46cdc9be 5246
e8d45292 5247 switch (ver) {
d86bd29e 5248 case CHELSIO_T4:
46cdc9be 5249 return CHELSIO_CHIP_CODE(CHELSIO_T4, pl_rev);
d86bd29e 5250 case CHELSIO_T5:
46cdc9be 5251 return CHELSIO_CHIP_CODE(CHELSIO_T5, pl_rev);
d86bd29e 5252 case CHELSIO_T6:
46cdc9be 5253 return CHELSIO_CHIP_CODE(CHELSIO_T6, pl_rev);
d86bd29e 5254 default:
e8d45292 5255 break;
d86bd29e 5256 }
46cdc9be 5257 return -EINVAL;
d86bd29e
HS
5258}
5259
b6244201 5260#ifdef CONFIG_PCI_IOV
baf50868 5261static void cxgb4_mgmt_setup(struct net_device *dev)
e7b48a32
HS
5262{
5263 dev->type = ARPHRD_NONE;
5264 dev->mtu = 0;
5265 dev->hard_header_len = 0;
5266 dev->addr_len = 0;
5267 dev->tx_queue_len = 0;
5268 dev->flags |= IFF_NOARP;
5269 dev->priv_flags |= IFF_NO_QUEUE;
5270
5271 /* Initialize the device structure. */
5272 dev->netdev_ops = &cxgb4_mgmt_netdev_ops;
5273 dev->ethtool_ops = &cxgb4_mgmt_ethtool_ops;
e7b48a32
HS
5274}
5275
b6244201
HS
5276static int cxgb4_iov_configure(struct pci_dev *pdev, int num_vfs)
5277{
7829451c 5278 struct adapter *adap = pci_get_drvdata(pdev);
b6244201
HS
5279 int err = 0;
5280 int current_vfs = pci_num_vf(pdev);
5281 u32 pcie_fw;
b6244201 5282
7829451c 5283 pcie_fw = readl(adap->regs + PCIE_FW_A);
7cfac881
AV
5284 /* Check if fw is initialized */
5285 if (!(pcie_fw & PCIE_FW_INIT_F)) {
5286 dev_warn(&pdev->dev, "Device not initialized\n");
b6244201
HS
5287 return -EOPNOTSUPP;
5288 }
5289
5290 /* If any of the VF's is already assigned to Guest OS, then
5291 * SRIOV for the same cannot be modified
5292 */
5293 if (current_vfs && pci_vfs_assigned(pdev)) {
5294 dev_err(&pdev->dev,
5295 "Cannot modify SR-IOV while VFs are assigned\n");
baf50868 5296 return current_vfs;
b6244201 5297 }
baf50868
GG
5298 /* Note that the upper-level code ensures that we're never called with
5299 * a non-zero "num_vfs" when we already have VFs instantiated. But
5300 * it never hurts to code defensively.
b6244201 5301 */
baf50868
GG
5302 if (num_vfs != 0 && current_vfs != 0)
5303 return -EBUSY;
5304
5305 /* Nothing to do for no change. */
5306 if (num_vfs == current_vfs)
5307 return num_vfs;
5308
5309 /* Disable SRIOV when zero is passed. */
b6244201
HS
5310 if (!num_vfs) {
5311 pci_disable_sriov(pdev);
baf50868
GG
5312 /* free VF Management Interface */
5313 unregister_netdev(adap->port[0]);
5314 free_netdev(adap->port[0]);
5315 adap->port[0] = NULL;
5316
661dbeb9 5317 /* free VF resources */
baf50868 5318 adap->num_vfs = 0;
661dbeb9
HS
5319 kfree(adap->vfinfo);
5320 adap->vfinfo = NULL;
baf50868 5321 return 0;
b6244201
HS
5322 }
5323
baf50868
GG
5324 if (!current_vfs) {
5325 struct fw_pfvf_cmd port_cmd, port_rpl;
5326 struct net_device *netdev;
5327 unsigned int pmask, port;
5328 struct pci_dev *pbridge;
5329 struct port_info *pi;
5330 char name[IFNAMSIZ];
5331 u32 devcap2;
5332 u16 flags;
5333 int pos;
5334
5335 /* If we want to instantiate Virtual Functions, then our
5336 * parent bridge's PCI-E needs to support Alternative Routing
5337 * ID (ARI) because our VFs will show up at function offset 8
5338 * and above.
5339 */
5340 pbridge = pdev->bus->self;
5341 pos = pci_find_capability(pbridge, PCI_CAP_ID_EXP);
5342 pci_read_config_word(pbridge, pos + PCI_EXP_FLAGS, &flags);
5343 pci_read_config_dword(pbridge, pos + PCI_EXP_DEVCAP2, &devcap2);
5344
5345 if ((flags & PCI_EXP_FLAGS_VERS) < 2 ||
5346 !(devcap2 & PCI_EXP_DEVCAP2_ARI)) {
5347 /* Our parent bridge does not support ARI so issue a
5348 * warning and skip instantiating the VFs. They
5349 * won't be reachable.
5350 */
5351 dev_warn(&pdev->dev, "Parent bridge %02x:%02x.%x doesn't support ARI; can't instantiate Virtual Functions\n",
5352 pbridge->bus->number, PCI_SLOT(pbridge->devfn),
5353 PCI_FUNC(pbridge->devfn));
5354 return -ENOTSUPP;
5355 }
5356 memset(&port_cmd, 0, sizeof(port_cmd));
5357 port_cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PFVF_CMD) |
5358 FW_CMD_REQUEST_F |
5359 FW_CMD_READ_F |
5360 FW_PFVF_CMD_PFN_V(adap->pf) |
5361 FW_PFVF_CMD_VFN_V(0));
5362 port_cmd.retval_len16 = cpu_to_be32(FW_LEN16(port_cmd));
5363 err = t4_wr_mbox(adap, adap->mbox, &port_cmd, sizeof(port_cmd),
5364 &port_rpl);
b6244201
HS
5365 if (err)
5366 return err;
baf50868
GG
5367 pmask = FW_PFVF_CMD_PMASK_G(be32_to_cpu(port_rpl.type_to_neq));
5368 port = ffs(pmask) - 1;
5369 /* Allocate VF Management Interface. */
5370 snprintf(name, IFNAMSIZ, "mgmtpf%d,%d", adap->adap_idx,
5371 adap->pf);
5372 netdev = alloc_netdev(sizeof(struct port_info),
5373 name, NET_NAME_UNKNOWN, cxgb4_mgmt_setup);
5374 if (!netdev)
5375 return -ENOMEM;
7829451c 5376
baf50868
GG
5377 pi = netdev_priv(netdev);
5378 pi->adapter = adap;
5379 pi->lport = port;
5380 pi->tx_chan = port;
5381 SET_NETDEV_DEV(netdev, &pdev->dev);
5382
5383 adap->port[0] = netdev;
5384 pi->port_id = 0;
5385
5386 err = register_netdev(adap->port[0]);
5387 if (err) {
5388 pr_info("Unable to register VF mgmt netdev %s\n", name);
5389 free_netdev(adap->port[0]);
5390 adap->port[0] = NULL;
e7b48a32 5391 return err;
baf50868
GG
5392 }
5393 /* Allocate and set up VF Information. */
5394 adap->vfinfo = kcalloc(pci_sriov_get_totalvfs(pdev),
5395 sizeof(struct vf_info), GFP_KERNEL);
5396 if (!adap->vfinfo) {
5397 unregister_netdev(adap->port[0]);
5398 free_netdev(adap->port[0]);
5399 adap->port[0] = NULL;
5400 return -ENOMEM;
5401 }
5402 cxgb4_mgmt_fill_vf_station_mac_addr(adap);
5403 }
5404 /* Instantiate the requested number of VFs. */
5405 err = pci_enable_sriov(pdev, num_vfs);
5406 if (err) {
5407 pr_info("Unable to instantiate %d VFs\n", num_vfs);
5408 if (!current_vfs) {
5409 unregister_netdev(adap->port[0]);
5410 free_netdev(adap->port[0]);
5411 adap->port[0] = NULL;
5412 kfree(adap->vfinfo);
5413 adap->vfinfo = NULL;
5414 }
5415 return err;
b6244201 5416 }
661dbeb9 5417
baf50868 5418 adap->num_vfs = num_vfs;
b6244201
HS
5419 return num_vfs;
5420}
baf50868 5421#endif /* CONFIG_PCI_IOV */
b6244201 5422
1dd06ae8 5423static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
b8ff05a9 5424{
e8d45292
GG
5425 struct net_device *netdev;
5426 struct adapter *adapter;
5427 static int adap_idx = 1;
5428 int s_qpp, qpp, num_seg;
b8ff05a9 5429 struct port_info *pi;
c8f44aff 5430 bool highdma = false;
d86bd29e 5431 enum chip_type chip;
e8d45292
GG
5432 void __iomem *regs;
5433 int func, chip_ver;
5434 u16 device_id;
5435 int i, err;
5436 u32 whoami;
b8ff05a9
DM
5437
5438 printk_once(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
5439
5440 err = pci_request_regions(pdev, KBUILD_MODNAME);
5441 if (err) {
5442 /* Just info, some other driver may have claimed the device. */
5443 dev_info(&pdev->dev, "cannot obtain PCI resources\n");
5444 return err;
5445 }
5446
b8ff05a9
DM
5447 err = pci_enable_device(pdev);
5448 if (err) {
5449 dev_err(&pdev->dev, "cannot enable PCI device\n");
5450 goto out_release_regions;
5451 }
5452
d6ce2628
HS
5453 regs = pci_ioremap_bar(pdev, 0);
5454 if (!regs) {
5455 dev_err(&pdev->dev, "cannot map device registers\n");
5456 err = -ENOMEM;
5457 goto out_disable_device;
5458 }
5459
baf50868
GG
5460 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
5461 if (!adapter) {
5462 err = -ENOMEM;
5463 goto out_unmap_bar0;
5464 }
5465
5466 adapter->regs = regs;
8203b509
HS
5467 err = t4_wait_dev_ready(regs);
5468 if (err < 0)
e729452e 5469 goto out_free_adapter;
8203b509 5470
d6ce2628 5471 /* We control everything through one PF */
e8d45292
GG
5472 whoami = t4_read_reg(adapter, PL_WHOAMI_A);
5473 pci_read_config_word(pdev, PCI_DEVICE_ID, &device_id);
5474 chip = t4_get_chip_type(adapter, CHELSIO_PCI_ID_VER(device_id));
5475 if (chip < 0) {
5476 dev_err(&pdev->dev, "Device %d is not supported\n", device_id);
5477 err = chip;
5478 goto out_free_adapter;
5479 }
5480 chip_ver = CHELSIO_CHIP_VERSION(chip);
5481 func = chip_ver <= CHELSIO_T5 ?
5482 SOURCEPF_G(whoami) : T6_SOURCEPF_G(whoami);
baf50868
GG
5483
5484 adapter->pdev = pdev;
5485 adapter->pdev_dev = &pdev->dev;
5486 adapter->name = pci_name(pdev);
5487 adapter->mbox = func;
5488 adapter->pf = func;
016764de
GG
5489 adapter->params.chip = chip;
5490 adapter->adap_idx = adap_idx;
baf50868
GG
5491 adapter->msg_enable = DFLT_MSG_ENABLE;
5492 adapter->mbox_log = kzalloc(sizeof(*adapter->mbox_log) +
5493 (sizeof(struct mbox_cmd) *
5494 T4_OS_LOG_MBOX_CMDS),
5495 GFP_KERNEL);
5496 if (!adapter->mbox_log) {
5497 err = -ENOMEM;
5498 goto out_free_adapter;
5499 }
5500 spin_lock_init(&adapter->mbox_lock);
5501 INIT_LIST_HEAD(&adapter->mlist.list);
aca06eaf 5502 adapter->mbox_log->size = T4_OS_LOG_MBOX_CMDS;
baf50868
GG
5503 pci_set_drvdata(pdev, adapter);
5504
d6ce2628 5505 if (func != ent->driver_data) {
d6ce2628
HS
5506 pci_disable_device(pdev);
5507 pci_save_state(pdev); /* to restore SR-IOV later */
baf50868 5508 return 0;
d6ce2628
HS
5509 }
5510
b8ff05a9 5511 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
c8f44aff 5512 highdma = true;
b8ff05a9
DM
5513 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
5514 if (err) {
5515 dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
5516 "coherent allocations\n");
baf50868 5517 goto out_free_adapter;
b8ff05a9
DM
5518 }
5519 } else {
5520 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
5521 if (err) {
5522 dev_err(&pdev->dev, "no usable DMA configuration\n");
baf50868 5523 goto out_free_adapter;
b8ff05a9
DM
5524 }
5525 }
5526
5527 pci_enable_pcie_error_reporting(pdev);
5528 pci_set_master(pdev);
5529 pci_save_state(pdev);
7829451c 5530 adap_idx++;
29aaee65
AB
5531 adapter->workq = create_singlethread_workqueue("cxgb4");
5532 if (!adapter->workq) {
5533 err = -ENOMEM;
5534 goto out_free_adapter;
5535 }
5536
144be3d9
GS
5537 /* PCI device has been enabled */
5538 adapter->flags |= DEV_ENABLED;
b8ff05a9
DM
5539 memset(adapter->chan_map, 0xff, sizeof(adapter->chan_map));
5540
b0ba9d5f
CL
5541 /* If possible, we use PCIe Relaxed Ordering Attribute to deliver
5542 * Ingress Packet Data to Free List Buffers in order to allow for
5543 * chipset performance optimizations between the Root Complex and
5544 * Memory Controllers. (Messages to the associated Ingress Queue
5545 * notifying new Packet Placement in the Free Lists Buffers will be
5546 * send without the Relaxed Ordering Attribute thus guaranteeing that
5547 * all preceding PCIe Transaction Layer Packets will be processed
5548 * first.) But some Root Complexes have various issues with Upstream
5549 * Transaction Layer Packets with the Relaxed Ordering Attribute set.
5550 * The PCIe devices which under the Root Complexes will be cleared the
5551 * Relaxed Ordering bit in the configuration space, So we check our
5552 * PCIe configuration space to see if it's flagged with advice against
5553 * using Relaxed Ordering.
5554 */
5555 if (!pcie_relaxed_ordering_enabled(pdev))
5556 adapter->flags |= ROOT_NO_RELAXED_ORDERING;
5557
b8ff05a9
DM
5558 spin_lock_init(&adapter->stats_lock);
5559 spin_lock_init(&adapter->tid_release_lock);
e327c225 5560 spin_lock_init(&adapter->win0_lock);
b8ff05a9
DM
5561
5562 INIT_WORK(&adapter->tid_release_task, process_tid_release_list);
881806bc
VP
5563 INIT_WORK(&adapter->db_full_task, process_db_full);
5564 INIT_WORK(&adapter->db_drop_task, process_db_drop);
8b7372c1 5565 INIT_WORK(&adapter->fatal_err_notify_task, notify_fatal_err);
b8ff05a9
DM
5566
5567 err = t4_prep_adapter(adapter);
5568 if (err)
d6ce2628
HS
5569 goto out_free_adapter;
5570
1dde532d
RL
5571 if (is_kdump_kernel()) {
5572 /* Collect hardware state and append to /proc/vmcore */
5573 err = cxgb4_cudbg_vmcore_add_dump(adapter);
5574 if (err) {
5575 dev_warn(adapter->pdev_dev,
5576 "Fail collecting vmcore device dump, err: %d. Continuing\n",
5577 err);
5578 err = 0;
5579 }
5580 }
22adfe0a 5581
d14807dd 5582 if (!is_t4(adapter->params.chip)) {
f612b815
HS
5583 s_qpp = (QUEUESPERPAGEPF0_S +
5584 (QUEUESPERPAGEPF1_S - QUEUESPERPAGEPF0_S) *
b2612722 5585 adapter->pf);
f612b815
HS
5586 qpp = 1 << QUEUESPERPAGEPF0_G(t4_read_reg(adapter,
5587 SGE_EGRESS_QUEUES_PER_PAGE_PF_A) >> s_qpp);
22adfe0a
SR
5588 num_seg = PAGE_SIZE / SEGMENT_SIZE;
5589
5590 /* Each segment size is 128B. Write coalescing is enabled only
5591 * when SGE_EGRESS_QUEUES_PER_PAGE_PF reg value for the
5592 * queue is less no of segments that can be accommodated in
5593 * a page size.
5594 */
5595 if (qpp > num_seg) {
5596 dev_err(&pdev->dev,
5597 "Incorrect number of egress queues per page\n");
5598 err = -EINVAL;
d6ce2628 5599 goto out_free_adapter;
22adfe0a
SR
5600 }
5601 adapter->bar2 = ioremap_wc(pci_resource_start(pdev, 2),
5602 pci_resource_len(pdev, 2));
5603 if (!adapter->bar2) {
5604 dev_err(&pdev->dev, "cannot map device bar2 region\n");
5605 err = -ENOMEM;
d6ce2628 5606 goto out_free_adapter;
22adfe0a
SR
5607 }
5608 }
5609
636f9d37 5610 setup_memwin(adapter);
b8ff05a9 5611 err = adap_init0(adapter);
5b377d11
HS
5612#ifdef CONFIG_DEBUG_FS
5613 bitmap_zero(adapter->sge.blocked_fl, adapter->sge.egr_sz);
5614#endif
636f9d37 5615 setup_memwin_rdma(adapter);
b8ff05a9
DM
5616 if (err)
5617 goto out_unmap_bar;
5618
2a485cf7
HS
5619 /* configure SGE_STAT_CFG_A to read WC stats */
5620 if (!is_t4(adapter->params.chip))
676d6a75
HS
5621 t4_write_reg(adapter, SGE_STAT_CFG_A, STATSOURCE_T5_V(7) |
5622 (is_t5(adapter->params.chip) ? STATMODE_V(0) :
5623 T6_STATMODE_V(0)));
2a485cf7 5624
b8ff05a9 5625 for_each_port(adapter, i) {
b8ff05a9
DM
5626 netdev = alloc_etherdev_mq(sizeof(struct port_info),
5627 MAX_ETH_QSETS);
5628 if (!netdev) {
5629 err = -ENOMEM;
5630 goto out_free_dev;
5631 }
5632
5633 SET_NETDEV_DEV(netdev, &pdev->dev);
5634
5635 adapter->port[i] = netdev;
5636 pi = netdev_priv(netdev);
5637 pi->adapter = adapter;
5638 pi->xact_addr_filt = -1;
b8ff05a9 5639 pi->port_id = i;
b8ff05a9
DM
5640 netdev->irq = pdev->irq;
5641
2ed28baa
MM
5642 netdev->hw_features = NETIF_F_SG | TSO_FLAGS |
5643 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
5644 NETIF_F_RXCSUM | NETIF_F_RXHASH |
d8931847
RL
5645 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
5646 NETIF_F_HW_TC;
d0a1299c 5647
e8d45292 5648 if (chip_ver > CHELSIO_T5) {
c50ae55e
GG
5649 netdev->hw_enc_features |= NETIF_F_IP_CSUM |
5650 NETIF_F_IPV6_CSUM |
5651 NETIF_F_RXCSUM |
5652 NETIF_F_GSO_UDP_TUNNEL |
5653 NETIF_F_TSO | NETIF_F_TSO6;
5654
d0a1299c 5655 netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
c50ae55e 5656 }
d0a1299c 5657
c8f44aff
MM
5658 if (highdma)
5659 netdev->hw_features |= NETIF_F_HIGHDMA;
5660 netdev->features |= netdev->hw_features;
b8ff05a9
DM
5661 netdev->vlan_features = netdev->features & VLAN_FEAT;
5662
01789349
JP
5663 netdev->priv_flags |= IFF_UNICAST_FLT;
5664
d894be57 5665 /* MTU range: 81 - 9600 */
a047fbae 5666 netdev->min_mtu = 81; /* accommodate SACK */
d894be57
JW
5667 netdev->max_mtu = MAX_MTU;
5668
b8ff05a9 5669 netdev->netdev_ops = &cxgb4_netdev_ops;
688848b1
AB
5670#ifdef CONFIG_CHELSIO_T4_DCB
5671 netdev->dcbnl_ops = &cxgb4_dcb_ops;
5672 cxgb4_dcb_state_init(netdev);
5673#endif
812034f1 5674 cxgb4_set_ethtool_ops(netdev);
b8ff05a9
DM
5675 }
5676
ad75b7d3
RL
5677 cxgb4_init_ethtool_dump(adapter);
5678
b8ff05a9
DM
5679 pci_set_drvdata(pdev, adapter);
5680
5681 if (adapter->flags & FW_OK) {
060e0c75 5682 err = t4_port_init(adapter, func, func, 0);
b8ff05a9
DM
5683 if (err)
5684 goto out_free_dev;
098ef6c2
HS
5685 } else if (adapter->params.nports == 1) {
5686 /* If we don't have a connection to the firmware -- possibly
5687 * because of an error -- grab the raw VPD parameters so we
5688 * can set the proper MAC Address on the debug network
5689 * interface that we've created.
5690 */
5691 u8 hw_addr[ETH_ALEN];
5692 u8 *na = adapter->params.vpd.na;
5693
5694 err = t4_get_raw_vpd_params(adapter, &adapter->params.vpd);
5695 if (!err) {
5696 for (i = 0; i < ETH_ALEN; i++)
5697 hw_addr[i] = (hex2val(na[2 * i + 0]) * 16 +
5698 hex2val(na[2 * i + 1]));
5699 t4_set_hw_addr(adapter, 0, hw_addr);
5700 }
b8ff05a9
DM
5701 }
5702
098ef6c2 5703 /* Configure queues and allocate tables now, they can be needed as
b8ff05a9
DM
5704 * soon as the first register_netdev completes.
5705 */
5706 cfg_queues(adapter);
5707
3bdb376e
KS
5708 adapter->smt = t4_init_smt();
5709 if (!adapter->smt) {
5710 /* We tolerate a lack of SMT, giving up some functionality */
5711 dev_warn(&pdev->dev, "could not allocate SMT, continuing\n");
5712 }
5713
5be9ed8d 5714 adapter->l2t = t4_init_l2t(adapter->l2t_start, adapter->l2t_end);
b8ff05a9
DM
5715 if (!adapter->l2t) {
5716 /* We tolerate a lack of L2T, giving up some functionality */
5717 dev_warn(&pdev->dev, "could not allocate L2T, continuing\n");
5718 adapter->params.offload = 0;
5719 }
5720
0e249898
AV
5721 adapter->mps_encap = kvzalloc(sizeof(struct mps_encap_entry) *
5722 adapter->params.arch.mps_tcam_size,
5723 GFP_KERNEL);
5724 if (!adapter->mps_encap)
5725 dev_warn(&pdev->dev, "could not allocate MPS Encap entries, continuing\n");
5726
b5a02f50 5727#if IS_ENABLED(CONFIG_IPV6)
e8d45292 5728 if (chip_ver <= CHELSIO_T5 &&
eb72f74f
HS
5729 (!(t4_read_reg(adapter, LE_DB_CONFIG_A) & ASLIPCOMPEN_F))) {
5730 /* CLIP functionality is not present in hardware,
5731 * hence disable all offload features
b5a02f50
AB
5732 */
5733 dev_warn(&pdev->dev,
eb72f74f 5734 "CLIP not enabled in hardware, continuing\n");
b5a02f50 5735 adapter->params.offload = 0;
eb72f74f
HS
5736 } else {
5737 adapter->clipt = t4_init_clip_tbl(adapter->clipt_start,
5738 adapter->clipt_end);
5739 if (!adapter->clipt) {
5740 /* We tolerate a lack of clip_table, giving up
5741 * some functionality
5742 */
5743 dev_warn(&pdev->dev,
5744 "could not allocate Clip table, continuing\n");
5745 adapter->params.offload = 0;
5746 }
b5a02f50
AB
5747 }
5748#endif
b72a32da
RL
5749
5750 for_each_port(adapter, i) {
5751 pi = adap2pinfo(adapter, i);
5752 pi->sched_tbl = t4_init_sched(adapter->params.nsched_cls);
5753 if (!pi->sched_tbl)
5754 dev_warn(&pdev->dev,
5755 "could not activate scheduling on port %d\n",
5756 i);
5757 }
5758
578b46b9 5759 if (tid_init(&adapter->tids) < 0) {
b8ff05a9
DM
5760 dev_warn(&pdev->dev, "could not allocate TID table, "
5761 "continuing\n");
5762 adapter->params.offload = 0;
d8931847 5763 } else {
45da1ca2 5764 adapter->tc_u32 = cxgb4_init_tc_u32(adapter);
d8931847
RL
5765 if (!adapter->tc_u32)
5766 dev_warn(&pdev->dev,
5767 "could not offload tc u32, continuing\n");
62488e4b 5768
79e6d46a
KS
5769 if (cxgb4_init_tc_flower(adapter))
5770 dev_warn(&pdev->dev,
5771 "could not offload tc flower, continuing\n");
b8ff05a9
DM
5772 }
5773
5c31254e 5774 if (is_offload(adapter) || is_hashfilter(adapter)) {
9a1bb9f6
HS
5775 if (t4_read_reg(adapter, LE_DB_CONFIG_A) & HASHEN_F) {
5776 u32 hash_base, hash_reg;
5777
5778 if (chip <= CHELSIO_T5) {
5779 hash_reg = LE_DB_TID_HASHBASE_A;
5780 hash_base = t4_read_reg(adapter, hash_reg);
5781 adapter->tids.hash_base = hash_base / 4;
5782 } else {
5783 hash_reg = T6_LE_DB_HASH_TID_BASE_A;
5784 hash_base = t4_read_reg(adapter, hash_reg);
5785 adapter->tids.hash_base = hash_base;
5786 }
5787 }
5788 }
5789
f7cabcdd
DM
5790 /* See what interrupts we'll be using */
5791 if (msi > 1 && enable_msix(adapter) == 0)
5792 adapter->flags |= USING_MSIX;
94cdb8bb 5793 else if (msi > 0 && pci_enable_msi(pdev) == 0) {
f7cabcdd 5794 adapter->flags |= USING_MSI;
94cdb8bb
HS
5795 if (msi > 1)
5796 free_msix_info(adapter);
5797 }
f7cabcdd 5798
547fd272
HS
5799 /* check for PCI Express bandwidth capabiltites */
5800 cxgb4_check_pcie_caps(adapter);
5801
671b0060
DM
5802 err = init_rss(adapter);
5803 if (err)
5804 goto out_free_dev;
5805
843bd7db
AV
5806 err = setup_fw_sge_queues(adapter);
5807 if (err) {
5808 dev_err(adapter->pdev_dev,
5809 "FW sge queue allocation failed, err %d", err);
5810 goto out_free_dev;
5811 }
5812
b8ff05a9
DM
5813 /*
5814 * The card is now ready to go. If any errors occur during device
5815 * registration we do not fail the whole card but rather proceed only
5816 * with the ports we manage to register successfully. However we must
5817 * register at least one net device.
5818 */
5819 for_each_port(adapter, i) {
a57cabe0 5820 pi = adap2pinfo(adapter, i);
d2a007ab 5821 adapter->port[i]->dev_port = pi->lport;
a57cabe0
DM
5822 netif_set_real_num_tx_queues(adapter->port[i], pi->nqsets);
5823 netif_set_real_num_rx_queues(adapter->port[i], pi->nqsets);
5824
b1a73af9
SM
5825 netif_carrier_off(adapter->port[i]);
5826
b8ff05a9
DM
5827 err = register_netdev(adapter->port[i]);
5828 if (err)
b1a3c2b6 5829 break;
b1a3c2b6
DM
5830 adapter->chan_map[pi->tx_chan] = i;
5831 print_port_info(adapter->port[i]);
b8ff05a9 5832 }
b1a3c2b6 5833 if (i == 0) {
b8ff05a9
DM
5834 dev_err(&pdev->dev, "could not register any net devices\n");
5835 goto out_free_dev;
5836 }
b1a3c2b6
DM
5837 if (err) {
5838 dev_warn(&pdev->dev, "only %d net devices registered\n", i);
5839 err = 0;
6403eab1 5840 }
b8ff05a9
DM
5841
5842 if (cxgb4_debugfs_root) {
5843 adapter->debugfs_root = debugfs_create_dir(pci_name(pdev),
5844 cxgb4_debugfs_root);
5845 setup_debugfs(adapter);
5846 }
5847
6482aa7c
DLR
5848 /* PCIe EEH recovery on powerpc platforms needs fundamental reset */
5849 pdev->needs_freset = 1;
5850
0fbc81b3
HS
5851 if (is_uld(adapter)) {
5852 mutex_lock(&uld_mutex);
5853 list_add_tail(&adapter->list_node, &adapter_list);
5854 mutex_unlock(&uld_mutex);
5855 }
b8ff05a9 5856
9c33e420
AG
5857 if (!is_t4(adapter->params.chip))
5858 cxgb4_ptp_init(adapter);
5859
0de72738 5860 print_adapter_info(adapter);
7829451c 5861 return 0;
0de72738 5862
b8ff05a9 5863 out_free_dev:
843bd7db 5864 t4_free_sge_resources(adapter);
06546391 5865 free_some_resources(adapter);
94cdb8bb
HS
5866 if (adapter->flags & USING_MSIX)
5867 free_msix_info(adapter);
0fbc81b3
HS
5868 if (adapter->num_uld || adapter->num_ofld_uld)
5869 t4_uld_mem_free(adapter);
b8ff05a9 5870 out_unmap_bar:
d14807dd 5871 if (!is_t4(adapter->params.chip))
22adfe0a 5872 iounmap(adapter->bar2);
b8ff05a9 5873 out_free_adapter:
29aaee65
AB
5874 if (adapter->workq)
5875 destroy_workqueue(adapter->workq);
5876
7f080c3f 5877 kfree(adapter->mbox_log);
b8ff05a9 5878 kfree(adapter);
d6ce2628
HS
5879 out_unmap_bar0:
5880 iounmap(regs);
b8ff05a9
DM
5881 out_disable_device:
5882 pci_disable_pcie_error_reporting(pdev);
5883 pci_disable_device(pdev);
5884 out_release_regions:
5885 pci_release_regions(pdev);
b8ff05a9
DM
5886 return err;
5887}
5888
91744948 5889static void remove_one(struct pci_dev *pdev)
b8ff05a9
DM
5890{
5891 struct adapter *adapter = pci_get_drvdata(pdev);
5892
7829451c
HS
5893 if (!adapter) {
5894 pci_release_regions(pdev);
5895 return;
5896 }
636f9d37 5897
e1f6198e
GG
5898 adapter->flags |= SHUTTING_DOWN;
5899
7829451c 5900 if (adapter->pf == 4) {
b8ff05a9
DM
5901 int i;
5902
29aaee65
AB
5903 /* Tear down per-adapter Work Queue first since it can contain
5904 * references to our adapter data structure.
5905 */
5906 destroy_workqueue(adapter->workq);
5907
6a146f3a 5908 if (is_uld(adapter)) {
b8ff05a9 5909 detach_ulds(adapter);
6a146f3a
GP
5910 t4_uld_clean_up(adapter);
5911 }
b8ff05a9 5912
8b4e6b3c
AV
5913 adap_free_hma_mem(adapter);
5914
b37987e8
HS
5915 disable_interrupts(adapter);
5916
b8ff05a9 5917 for_each_port(adapter, i)
8f3a7676 5918 if (adapter->port[i]->reg_state == NETREG_REGISTERED)
b8ff05a9
DM
5919 unregister_netdev(adapter->port[i]);
5920
9f16dc2e 5921 debugfs_remove_recursive(adapter->debugfs_root);
b8ff05a9 5922
9c33e420
AG
5923 if (!is_t4(adapter->params.chip))
5924 cxgb4_ptp_stop(adapter);
5925
f2b7e78d
VP
5926 /* If we allocated filters, free up state associated with any
5927 * valid filters ...
5928 */
578b46b9 5929 clear_all_filters(adapter);
f2b7e78d 5930
aaefae9b
DM
5931 if (adapter->flags & FULL_INIT_DONE)
5932 cxgb_down(adapter);
b8ff05a9 5933
94cdb8bb
HS
5934 if (adapter->flags & USING_MSIX)
5935 free_msix_info(adapter);
0fbc81b3
HS
5936 if (adapter->num_uld || adapter->num_ofld_uld)
5937 t4_uld_mem_free(adapter);
06546391 5938 free_some_resources(adapter);
b5a02f50
AB
5939#if IS_ENABLED(CONFIG_IPV6)
5940 t4_cleanup_clip_tbl(adapter);
5941#endif
d14807dd 5942 if (!is_t4(adapter->params.chip))
22adfe0a 5943 iounmap(adapter->bar2);
7829451c
HS
5944 }
5945#ifdef CONFIG_PCI_IOV
5946 else {
baf50868 5947 cxgb4_iov_configure(adapter->pdev, 0);
7829451c
HS
5948 }
5949#endif
c4e43e14
GG
5950 iounmap(adapter->regs);
5951 pci_disable_pcie_error_reporting(pdev);
5952 if ((adapter->flags & DEV_ENABLED)) {
5953 pci_disable_device(pdev);
5954 adapter->flags &= ~DEV_ENABLED;
5955 }
5956 pci_release_regions(pdev);
5957 kfree(adapter->mbox_log);
5958 synchronize_rcu();
5959 kfree(adapter);
b8ff05a9
DM
5960}
5961
0fbc81b3
HS
5962/* "Shutdown" quiesces the device, stopping Ingress Packet and Interrupt
5963 * delivery. This is essentially a stripped down version of the PCI remove()
5964 * function where we do the minimal amount of work necessary to shutdown any
5965 * further activity.
5966 */
5967static void shutdown_one(struct pci_dev *pdev)
5968{
5969 struct adapter *adapter = pci_get_drvdata(pdev);
5970
5971 /* As with remove_one() above (see extended comment), we only want do
5972 * do cleanup on PCI Devices which went all the way through init_one()
5973 * ...
5974 */
5975 if (!adapter) {
5976 pci_release_regions(pdev);
5977 return;
5978 }
5979
e1f6198e
GG
5980 adapter->flags |= SHUTTING_DOWN;
5981
0fbc81b3
HS
5982 if (adapter->pf == 4) {
5983 int i;
5984
5985 for_each_port(adapter, i)
5986 if (adapter->port[i]->reg_state == NETREG_REGISTERED)
5987 cxgb_close(adapter->port[i]);
5988
6a146f3a
GP
5989 if (is_uld(adapter)) {
5990 detach_ulds(adapter);
5991 t4_uld_clean_up(adapter);
5992 }
5993
0fbc81b3
HS
5994 disable_interrupts(adapter);
5995 disable_msi(adapter);
5996
5997 t4_sge_stop(adapter);
5998 if (adapter->flags & FW_OK)
5999 t4_fw_bye(adapter, adapter->mbox);
6000 }
0fbc81b3
HS
6001}
6002
b8ff05a9
DM
6003static struct pci_driver cxgb4_driver = {
6004 .name = KBUILD_MODNAME,
6005 .id_table = cxgb4_pci_tbl,
6006 .probe = init_one,
91744948 6007 .remove = remove_one,
0fbc81b3 6008 .shutdown = shutdown_one,
b6244201
HS
6009#ifdef CONFIG_PCI_IOV
6010 .sriov_configure = cxgb4_iov_configure,
6011#endif
204dc3c0 6012 .err_handler = &cxgb4_eeh,
b8ff05a9
DM
6013};
6014
6015static int __init cxgb4_init_module(void)
6016{
6017 int ret;
6018
6019 /* Debugfs support is optional, just warn if this fails */
6020 cxgb4_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
6021 if (!cxgb4_debugfs_root)
428ac43f 6022 pr_warn("could not create debugfs entry, continuing\n");
b8ff05a9
DM
6023
6024 ret = pci_register_driver(&cxgb4_driver);
29aaee65 6025 if (ret < 0)
b8ff05a9 6026 debugfs_remove(cxgb4_debugfs_root);
01bcca68 6027
1bb60376 6028#if IS_ENABLED(CONFIG_IPV6)
b5a02f50
AB
6029 if (!inet6addr_registered) {
6030 register_inet6addr_notifier(&cxgb4_inet6addr_notifier);
6031 inet6addr_registered = true;
6032 }
1bb60376 6033#endif
01bcca68 6034
b8ff05a9
DM
6035 return ret;
6036}
6037
6038static void __exit cxgb4_cleanup_module(void)
6039{
1bb60376 6040#if IS_ENABLED(CONFIG_IPV6)
1793c798 6041 if (inet6addr_registered) {
b5a02f50
AB
6042 unregister_inet6addr_notifier(&cxgb4_inet6addr_notifier);
6043 inet6addr_registered = false;
6044 }
1bb60376 6045#endif
b8ff05a9
DM
6046 pci_unregister_driver(&cxgb4_driver);
6047 debugfs_remove(cxgb4_debugfs_root); /* NULL ok */
6048}
6049
6050module_init(cxgb4_init_module);
6051module_exit(cxgb4_cleanup_module);