]> git.ipfire.org Git - thirdparty/linux.git/blame - drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
udp: fix SO_BINDTODEVICE
[thirdparty/linux.git] / drivers / net / ethernet / chelsio / cxgb4 / cxgb4_main.c
CommitLineData
b8ff05a9
DM
1/*
2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
3 *
b72a32da 4 * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved.
b8ff05a9
DM
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
36
37#include <linux/bitmap.h>
38#include <linux/crc32.h>
39#include <linux/ctype.h>
40#include <linux/debugfs.h>
41#include <linux/err.h>
42#include <linux/etherdevice.h>
43#include <linux/firmware.h>
01789349 44#include <linux/if.h>
b8ff05a9
DM
45#include <linux/if_vlan.h>
46#include <linux/init.h>
47#include <linux/log2.h>
48#include <linux/mdio.h>
49#include <linux/module.h>
50#include <linux/moduleparam.h>
51#include <linux/mutex.h>
52#include <linux/netdevice.h>
53#include <linux/pci.h>
54#include <linux/aer.h>
55#include <linux/rtnetlink.h>
56#include <linux/sched.h>
57#include <linux/seq_file.h>
58#include <linux/sockios.h>
59#include <linux/vmalloc.h>
60#include <linux/workqueue.h>
61#include <net/neighbour.h>
62#include <net/netevent.h>
01bcca68 63#include <net/addrconf.h>
1ef8019b 64#include <net/bonding.h>
b5a02f50 65#include <net/addrconf.h>
7c0f6ba6 66#include <linux/uaccess.h>
c5a8c0f3 67#include <linux/crash_dump.h>
846eac3f 68#include <net/udp_tunnel.h>
b8ff05a9
DM
69
70#include "cxgb4.h"
d57fd6ca 71#include "cxgb4_filter.h"
b8ff05a9 72#include "t4_regs.h"
f612b815 73#include "t4_values.h"
b8ff05a9
DM
74#include "t4_msg.h"
75#include "t4fw_api.h"
cd6c2f12 76#include "t4fw_version.h"
688848b1 77#include "cxgb4_dcb.h"
c68644ef 78#include "srq.h"
fd88b31a 79#include "cxgb4_debugfs.h"
b5a02f50 80#include "clip_tbl.h"
b8ff05a9 81#include "l2t.h"
3bdb376e 82#include "smt.h"
b72a32da 83#include "sched.h"
d8931847 84#include "cxgb4_tc_u32.h"
6a345b3d 85#include "cxgb4_tc_flower.h"
a4569504 86#include "cxgb4_ptp.h"
ad75b7d3 87#include "cxgb4_cudbg.h"
b8ff05a9 88
812034f1
HS
89char cxgb4_driver_name[] = KBUILD_MODNAME;
90
01bcca68
VP
91#ifdef DRV_VERSION
92#undef DRV_VERSION
93#endif
3a7f8554 94#define DRV_VERSION "2.0.0-ko"
812034f1 95const char cxgb4_driver_version[] = DRV_VERSION;
52a5f846 96#define DRV_DESC "Chelsio T4/T5/T6 Network Driver"
b8ff05a9 97
b8ff05a9
DM
98#define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
99 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
100 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
101
3fedeab1
HS
102/* Macros needed to support the PCI Device ID Table ...
103 */
104#define CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN \
768ffc66 105 static const struct pci_device_id cxgb4_pci_tbl[] = {
baf50868
GG
106#define CXGB4_UNIFIED_PF 0x4
107
108#define CH_PCI_DEVICE_ID_FUNCTION CXGB4_UNIFIED_PF
b8ff05a9 109
3fedeab1
HS
110/* Include PCI Device IDs for both PF4 and PF0-3 so our PCI probe() routine is
111 * called for both.
112 */
113#define CH_PCI_DEVICE_ID_FUNCTION2 0x0
114
115#define CH_PCI_ID_TABLE_ENTRY(devid) \
baf50868 116 {PCI_VDEVICE(CHELSIO, (devid)), CXGB4_UNIFIED_PF}
3fedeab1
HS
117
118#define CH_PCI_DEVICE_ID_TABLE_DEFINE_END \
119 { 0, } \
120 }
121
122#include "t4_pci_id_tbl.h"
b8ff05a9 123
16e47624 124#define FW4_FNAME "cxgb4/t4fw.bin"
0a57a536 125#define FW5_FNAME "cxgb4/t5fw.bin"
3ccc6cf7 126#define FW6_FNAME "cxgb4/t6fw.bin"
16e47624 127#define FW4_CFNAME "cxgb4/t4-config.txt"
0a57a536 128#define FW5_CFNAME "cxgb4/t5-config.txt"
3ccc6cf7 129#define FW6_CFNAME "cxgb4/t6-config.txt"
01b69614
HS
130#define PHY_AQ1202_FIRMWARE "cxgb4/aq1202_fw.cld"
131#define PHY_BCM84834_FIRMWARE "cxgb4/bcm8483.bin"
132#define PHY_AQ1202_DEVICEID 0x4409
133#define PHY_BCM84834_DEVICEID 0x4486
b8ff05a9
DM
134
135MODULE_DESCRIPTION(DRV_DESC);
136MODULE_AUTHOR("Chelsio Communications");
137MODULE_LICENSE("Dual BSD/GPL");
138MODULE_VERSION(DRV_VERSION);
139MODULE_DEVICE_TABLE(pci, cxgb4_pci_tbl);
16e47624 140MODULE_FIRMWARE(FW4_FNAME);
0a57a536 141MODULE_FIRMWARE(FW5_FNAME);
52a5f846 142MODULE_FIRMWARE(FW6_FNAME);
b8ff05a9 143
b8ff05a9
DM
144/*
145 * The driver uses the best interrupt scheme available on a platform in the
146 * order MSI-X, MSI, legacy INTx interrupts. This parameter determines which
147 * of these schemes the driver may consider as follows:
148 *
149 * msi = 2: choose from among all three options
150 * msi = 1: only consider MSI and INTx interrupts
151 * msi = 0: force INTx interrupts
152 */
153static int msi = 2;
154
155module_param(msi, int, 0644);
156MODULE_PARM_DESC(msi, "whether to use INTx (0), MSI (1) or MSI-X (2)");
157
636f9d37
VP
158/*
159 * Normally we tell the chip to deliver Ingress Packets into our DMA buffers
160 * offset by 2 bytes in order to have the IP headers line up on 4-byte
161 * boundaries. This is a requirement for many architectures which will throw
162 * a machine check fault if an attempt is made to access one of the 4-byte IP
163 * header fields on a non-4-byte boundary. And it's a major performance issue
164 * even on some architectures which allow it like some implementations of the
165 * x86 ISA. However, some architectures don't mind this and for some very
166 * edge-case performance sensitive applications (like forwarding large volumes
167 * of small packets), setting this DMA offset to 0 will decrease the number of
168 * PCI-E Bus transfers enough to measurably affect performance.
169 */
170static int rx_dma_offset = 2;
171
688848b1
AB
172/* TX Queue select used to determine what algorithm to use for selecting TX
173 * queue. Select between the kernel provided function (select_queue=0) or user
174 * cxgb_select_queue function (select_queue=1)
175 *
176 * Default: select_queue=0
177 */
178static int select_queue;
179module_param(select_queue, int, 0644);
180MODULE_PARM_DESC(select_queue,
181 "Select between kernel provided method of selecting or driver method of selecting TX queue. Default is kernel method.");
182
b8ff05a9
DM
183static struct dentry *cxgb4_debugfs_root;
184
94cdb8bb
HS
185LIST_HEAD(adapter_list);
186DEFINE_MUTEX(uld_mutex);
b8ff05a9
DM
187
188static void link_report(struct net_device *dev)
189{
190 if (!netif_carrier_ok(dev))
191 netdev_info(dev, "link down\n");
192 else {
193 static const char *fc[] = { "no", "Rx", "Tx", "Tx/Rx" };
194
85412255 195 const char *s;
b8ff05a9
DM
196 const struct port_info *p = netdev_priv(dev);
197
198 switch (p->link_cfg.speed) {
5e78f7fd
GG
199 case 100:
200 s = "100Mbps";
b8ff05a9 201 break;
e8b39015 202 case 1000:
5e78f7fd 203 s = "1Gbps";
b8ff05a9 204 break;
5e78f7fd
GG
205 case 10000:
206 s = "10Gbps";
207 break;
208 case 25000:
209 s = "25Gbps";
b8ff05a9 210 break;
e8b39015 211 case 40000:
72aca4bf
KS
212 s = "40Gbps";
213 break;
7cbe543c
GG
214 case 50000:
215 s = "50Gbps";
216 break;
5e78f7fd
GG
217 case 100000:
218 s = "100Gbps";
219 break;
85412255
HS
220 default:
221 pr_info("%s: unsupported speed: %d\n",
222 dev->name, p->link_cfg.speed);
223 return;
b8ff05a9
DM
224 }
225
226 netdev_info(dev, "link up, %s, full-duplex, %s PAUSE\n", s,
227 fc[p->link_cfg.fc]);
228 }
229}
230
688848b1
AB
231#ifdef CONFIG_CHELSIO_T4_DCB
232/* Set up/tear down Data Center Bridging Priority mapping for a net device. */
233static void dcb_tx_queue_prio_enable(struct net_device *dev, int enable)
234{
235 struct port_info *pi = netdev_priv(dev);
236 struct adapter *adap = pi->adapter;
237 struct sge_eth_txq *txq = &adap->sge.ethtxq[pi->first_qset];
238 int i;
239
240 /* We use a simple mapping of Port TX Queue Index to DCB
241 * Priority when we're enabling DCB.
242 */
243 for (i = 0; i < pi->nqsets; i++, txq++) {
244 u32 name, value;
245 int err;
246
5167865a
HS
247 name = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
248 FW_PARAMS_PARAM_X_V(
249 FW_PARAMS_PARAM_DMAQ_EQ_DCBPRIO_ETH) |
250 FW_PARAMS_PARAM_YZ_V(txq->q.cntxt_id));
688848b1
AB
251 value = enable ? i : 0xffffffff;
252
253 /* Since we can be called while atomic (from "interrupt
254 * level") we need to issue the Set Parameters Commannd
255 * without sleeping (timeout < 0).
256 */
b2612722 257 err = t4_set_params_timeout(adap, adap->mbox, adap->pf, 0, 1,
01b69614
HS
258 &name, &value,
259 -FW_CMD_MAX_TIMEOUT);
688848b1
AB
260
261 if (err)
262 dev_err(adap->pdev_dev,
263 "Can't %s DCB Priority on port %d, TX Queue %d: err=%d\n",
264 enable ? "set" : "unset", pi->port_id, i, -err);
10b00466
AB
265 else
266 txq->dcb_prio = value;
688848b1
AB
267 }
268}
688848b1 269
50935857 270static int cxgb4_dcb_enabled(const struct net_device *dev)
218d48e7 271{
218d48e7
HS
272 struct port_info *pi = netdev_priv(dev);
273
274 if (!pi->dcb.enabled)
275 return 0;
276
277 return ((pi->dcb.state == CXGB4_DCB_STATE_FW_ALLSYNCED) ||
278 (pi->dcb.state == CXGB4_DCB_STATE_HOST));
218d48e7 279}
7c70c4f8 280#endif /* CONFIG_CHELSIO_T4_DCB */
218d48e7 281
b8ff05a9
DM
282void t4_os_link_changed(struct adapter *adapter, int port_id, int link_stat)
283{
284 struct net_device *dev = adapter->port[port_id];
285
286 /* Skip changes from disabled ports. */
287 if (netif_running(dev) && link_stat != netif_carrier_ok(dev)) {
288 if (link_stat)
289 netif_carrier_on(dev);
688848b1
AB
290 else {
291#ifdef CONFIG_CHELSIO_T4_DCB
218d48e7 292 if (cxgb4_dcb_enabled(dev)) {
ba581f77 293 cxgb4_dcb_reset(dev);
218d48e7
HS
294 dcb_tx_queue_prio_enable(dev, false);
295 }
688848b1 296#endif /* CONFIG_CHELSIO_T4_DCB */
b8ff05a9 297 netif_carrier_off(dev);
688848b1 298 }
b8ff05a9
DM
299
300 link_report(dev);
301 }
302}
303
304void t4_os_portmod_changed(const struct adapter *adap, int port_id)
305{
306 static const char *mod_str[] = {
a0881cab 307 NULL, "LR", "SR", "ER", "passive DA", "active DA", "LRM"
b8ff05a9
DM
308 };
309
310 const struct net_device *dev = adap->port[port_id];
311 const struct port_info *pi = netdev_priv(dev);
312
313 if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
314 netdev_info(dev, "port module unplugged\n");
a0881cab 315 else if (pi->mod_type < ARRAY_SIZE(mod_str))
b8ff05a9 316 netdev_info(dev, "%s module inserted\n", mod_str[pi->mod_type]);
be81a2de
HS
317 else if (pi->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED)
318 netdev_info(dev, "%s: unsupported port module inserted\n",
319 dev->name);
320 else if (pi->mod_type == FW_PORT_MOD_TYPE_UNKNOWN)
321 netdev_info(dev, "%s: unknown port module inserted\n",
322 dev->name);
323 else if (pi->mod_type == FW_PORT_MOD_TYPE_ERROR)
324 netdev_info(dev, "%s: transceiver module error\n", dev->name);
325 else
326 netdev_info(dev, "%s: unknown module type %d inserted\n",
327 dev->name, pi->mod_type);
b8ff05a9
DM
328}
329
fc08a01a
HS
330int dbfifo_int_thresh = 10; /* 10 == 640 entry threshold */
331module_param(dbfifo_int_thresh, int, 0644);
332MODULE_PARM_DESC(dbfifo_int_thresh, "doorbell fifo interrupt threshold");
333
b8ff05a9 334/*
fc08a01a 335 * usecs to sleep while draining the dbfifo
b8ff05a9 336 */
fc08a01a
HS
337static int dbfifo_drain_delay = 1000;
338module_param(dbfifo_drain_delay, int, 0644);
339MODULE_PARM_DESC(dbfifo_drain_delay,
340 "usecs to sleep while draining the dbfifo");
341
342static inline int cxgb4_set_addr_hash(struct port_info *pi)
b8ff05a9 343{
fc08a01a
HS
344 struct adapter *adap = pi->adapter;
345 u64 vec = 0;
346 bool ucast = false;
347 struct hash_mac_addr *entry;
348
349 /* Calculate the hash vector for the updated list and program it */
350 list_for_each_entry(entry, &adap->mac_hlist, list) {
351 ucast |= is_unicast_ether_addr(entry->addr);
352 vec |= (1ULL << hash_mac_addr(entry->addr));
353 }
354 return t4_set_addr_hash(adap, adap->mbox, pi->viid, ucast,
355 vec, false);
356}
357
358static int cxgb4_mac_sync(struct net_device *netdev, const u8 *mac_addr)
359{
360 struct port_info *pi = netdev_priv(netdev);
361 struct adapter *adap = pi->adapter;
362 int ret;
b8ff05a9
DM
363 u64 mhash = 0;
364 u64 uhash = 0;
fc08a01a
HS
365 bool free = false;
366 bool ucast = is_unicast_ether_addr(mac_addr);
367 const u8 *maclist[1] = {mac_addr};
368 struct hash_mac_addr *new_entry;
369
370 ret = t4_alloc_mac_filt(adap, adap->mbox, pi->viid, free, 1, maclist,
371 NULL, ucast ? &uhash : &mhash, false);
372 if (ret < 0)
373 goto out;
374 /* if hash != 0, then add the addr to hash addr list
375 * so on the end we will calculate the hash for the
376 * list and program it
377 */
378 if (uhash || mhash) {
379 new_entry = kzalloc(sizeof(*new_entry), GFP_ATOMIC);
380 if (!new_entry)
381 return -ENOMEM;
382 ether_addr_copy(new_entry->addr, mac_addr);
383 list_add_tail(&new_entry->list, &adap->mac_hlist);
384 ret = cxgb4_set_addr_hash(pi);
b8ff05a9 385 }
fc08a01a
HS
386out:
387 return ret < 0 ? ret : 0;
388}
b8ff05a9 389
fc08a01a
HS
390static int cxgb4_mac_unsync(struct net_device *netdev, const u8 *mac_addr)
391{
392 struct port_info *pi = netdev_priv(netdev);
393 struct adapter *adap = pi->adapter;
394 int ret;
395 const u8 *maclist[1] = {mac_addr};
396 struct hash_mac_addr *entry, *tmp;
b8ff05a9 397
fc08a01a
HS
398 /* If the MAC address to be removed is in the hash addr
399 * list, delete it from the list and update hash vector
400 */
401 list_for_each_entry_safe(entry, tmp, &adap->mac_hlist, list) {
402 if (ether_addr_equal(entry->addr, mac_addr)) {
403 list_del(&entry->list);
404 kfree(entry);
405 return cxgb4_set_addr_hash(pi);
b8ff05a9
DM
406 }
407 }
408
fc08a01a
HS
409 ret = t4_free_mac_filt(adap, adap->mbox, pi->viid, 1, maclist, false);
410 return ret < 0 ? -EINVAL : 0;
b8ff05a9
DM
411}
412
413/*
414 * Set Rx properties of a port, such as promiscruity, address filters, and MTU.
415 * If @mtu is -1 it is left unchanged.
416 */
417static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok)
418{
b8ff05a9 419 struct port_info *pi = netdev_priv(dev);
fc08a01a 420 struct adapter *adapter = pi->adapter;
b8ff05a9 421
d01f7abc
HS
422 __dev_uc_sync(dev, cxgb4_mac_sync, cxgb4_mac_unsync);
423 __dev_mc_sync(dev, cxgb4_mac_sync, cxgb4_mac_unsync);
fc08a01a
HS
424
425 return t4_set_rxmode(adapter, adapter->mbox, pi->viid, mtu,
426 (dev->flags & IFF_PROMISC) ? 1 : 0,
427 (dev->flags & IFF_ALLMULTI) ? 1 : 0, 1, -1,
428 sleep_ok);
b8ff05a9
DM
429}
430
431/**
432 * link_start - enable a port
433 * @dev: the port to enable
434 *
435 * Performs the MAC and PHY actions needed to enable a port.
436 */
437static int link_start(struct net_device *dev)
438{
439 int ret;
440 struct port_info *pi = netdev_priv(dev);
b2612722 441 unsigned int mb = pi->adapter->pf;
b8ff05a9
DM
442
443 /*
444 * We do not set address filters and promiscuity here, the stack does
445 * that step explicitly.
446 */
060e0c75 447 ret = t4_set_rxmode(pi->adapter, mb, pi->viid, dev->mtu, -1, -1, -1,
f646968f 448 !!(dev->features & NETIF_F_HW_VLAN_CTAG_RX), true);
b8ff05a9 449 if (ret == 0) {
060e0c75 450 ret = t4_change_mac(pi->adapter, mb, pi->viid,
b8ff05a9 451 pi->xact_addr_filt, dev->dev_addr, true,
b6bd29e7 452 true);
b8ff05a9
DM
453 if (ret >= 0) {
454 pi->xact_addr_filt = ret;
455 ret = 0;
456 }
457 }
458 if (ret == 0)
4036da90 459 ret = t4_link_l1cfg(pi->adapter, mb, pi->tx_chan,
060e0c75 460 &pi->link_cfg);
30f00847
AB
461 if (ret == 0) {
462 local_bh_disable();
688848b1
AB
463 ret = t4_enable_vi_params(pi->adapter, mb, pi->viid, true,
464 true, CXGB4_DCB_ENABLED);
30f00847
AB
465 local_bh_enable();
466 }
688848b1 467
b8ff05a9
DM
468 return ret;
469}
470
688848b1
AB
471#ifdef CONFIG_CHELSIO_T4_DCB
472/* Handle a Data Center Bridging update message from the firmware. */
473static void dcb_rpl(struct adapter *adap, const struct fw_port_cmd *pcmd)
474{
2b5fb1f2 475 int port = FW_PORT_CMD_PORTID_G(ntohl(pcmd->op_to_portid));
134491fd 476 struct net_device *dev = adap->port[adap->chan_map[port]];
688848b1
AB
477 int old_dcb_enabled = cxgb4_dcb_enabled(dev);
478 int new_dcb_enabled;
479
480 cxgb4_dcb_handle_fw_update(adap, pcmd);
481 new_dcb_enabled = cxgb4_dcb_enabled(dev);
482
483 /* If the DCB has become enabled or disabled on the port then we're
484 * going to need to set up/tear down DCB Priority parameters for the
485 * TX Queues associated with the port.
486 */
487 if (new_dcb_enabled != old_dcb_enabled)
488 dcb_tx_queue_prio_enable(dev, new_dcb_enabled);
489}
490#endif /* CONFIG_CHELSIO_T4_DCB */
491
f2b7e78d 492/* Response queue handler for the FW event queue.
b8ff05a9
DM
493 */
494static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,
495 const struct pkt_gl *gl)
496{
497 u8 opcode = ((const struct rss_header *)rsp)->opcode;
498
499 rsp++; /* skip RSS header */
b407a4a9
VP
500
501 /* FW can send EGR_UPDATEs encapsulated in a CPL_FW4_MSG.
502 */
503 if (unlikely(opcode == CPL_FW4_MSG &&
504 ((const struct cpl_fw4_msg *)rsp)->type == FW_TYPE_RSSCPL)) {
505 rsp++;
506 opcode = ((const struct rss_header *)rsp)->opcode;
507 rsp++;
508 if (opcode != CPL_SGE_EGR_UPDATE) {
509 dev_err(q->adap->pdev_dev, "unexpected FW4/CPL %#x on FW event queue\n"
510 , opcode);
511 goto out;
512 }
513 }
514
b8ff05a9
DM
515 if (likely(opcode == CPL_SGE_EGR_UPDATE)) {
516 const struct cpl_sge_egr_update *p = (void *)rsp;
bdc590b9 517 unsigned int qid = EGR_QID_G(ntohl(p->opcode_qid));
e46dab4d 518 struct sge_txq *txq;
b8ff05a9 519
e46dab4d 520 txq = q->adap->sge.egr_map[qid - q->adap->sge.egr_start];
b8ff05a9 521 txq->restarts++;
ab677ff4 522 if (txq->q_type == CXGB4_TXQ_ETH) {
b8ff05a9
DM
523 struct sge_eth_txq *eq;
524
525 eq = container_of(txq, struct sge_eth_txq, q);
526 netif_tx_wake_queue(eq->txq);
527 } else {
ab677ff4 528 struct sge_uld_txq *oq;
b8ff05a9 529
ab677ff4 530 oq = container_of(txq, struct sge_uld_txq, q);
b8ff05a9
DM
531 tasklet_schedule(&oq->qresume_tsk);
532 }
533 } else if (opcode == CPL_FW6_MSG || opcode == CPL_FW4_MSG) {
534 const struct cpl_fw6_msg *p = (void *)rsp;
535
688848b1
AB
536#ifdef CONFIG_CHELSIO_T4_DCB
537 const struct fw_port_cmd *pcmd = (const void *)p->data;
e2ac9628 538 unsigned int cmd = FW_CMD_OP_G(ntohl(pcmd->op_to_portid));
688848b1 539 unsigned int action =
2b5fb1f2 540 FW_PORT_CMD_ACTION_G(ntohl(pcmd->action_to_len16));
688848b1
AB
541
542 if (cmd == FW_PORT_CMD &&
c3168cab
GG
543 (action == FW_PORT_ACTION_GET_PORT_INFO ||
544 action == FW_PORT_ACTION_GET_PORT_INFO32)) {
2b5fb1f2 545 int port = FW_PORT_CMD_PORTID_G(
688848b1 546 be32_to_cpu(pcmd->op_to_portid));
c3168cab
GG
547 struct net_device *dev;
548 int dcbxdis, state_input;
549
550 dev = q->adap->port[q->adap->chan_map[port]];
551 dcbxdis = (action == FW_PORT_ACTION_GET_PORT_INFO
552 ? !!(pcmd->u.info.dcbxdis_pkd &
553 FW_PORT_CMD_DCBXDIS_F)
554 : !!(pcmd->u.info32.lstatus32_to_cbllen32 &
555 FW_PORT_CMD_DCBXDIS32_F));
556 state_input = (dcbxdis
557 ? CXGB4_DCB_INPUT_FW_DISABLED
558 : CXGB4_DCB_INPUT_FW_ENABLED);
688848b1
AB
559
560 cxgb4_dcb_state_fsm(dev, state_input);
561 }
562
563 if (cmd == FW_PORT_CMD &&
564 action == FW_PORT_ACTION_L2_DCB_CFG)
565 dcb_rpl(q->adap, pcmd);
566 else
567#endif
568 if (p->type == 0)
569 t4_handle_fw_rpl(q->adap, p->data);
b8ff05a9
DM
570 } else if (opcode == CPL_L2T_WRITE_RPL) {
571 const struct cpl_l2t_write_rpl *p = (void *)rsp;
572
573 do_l2t_write_rpl(q->adap, p);
3bdb376e
KS
574 } else if (opcode == CPL_SMT_WRITE_RPL) {
575 const struct cpl_smt_write_rpl *p = (void *)rsp;
576
577 do_smt_write_rpl(q->adap, p);
f2b7e78d
VP
578 } else if (opcode == CPL_SET_TCB_RPL) {
579 const struct cpl_set_tcb_rpl *p = (void *)rsp;
580
581 filter_rpl(q->adap, p);
12b276fb
KS
582 } else if (opcode == CPL_ACT_OPEN_RPL) {
583 const struct cpl_act_open_rpl *p = (void *)rsp;
584
585 hash_filter_rpl(q->adap, p);
3b0b3bee
KS
586 } else if (opcode == CPL_ABORT_RPL_RSS) {
587 const struct cpl_abort_rpl_rss *p = (void *)rsp;
588
589 hash_del_filter_rpl(q->adap, p);
c68644ef
RR
590 } else if (opcode == CPL_SRQ_TABLE_RPL) {
591 const struct cpl_srq_table_rpl *p = (void *)rsp;
592
593 do_srq_table_rpl(q->adap, p);
b8ff05a9
DM
594 } else
595 dev_err(q->adap->pdev_dev,
596 "unexpected CPL %#x on FW event queue\n", opcode);
b407a4a9 597out:
b8ff05a9
DM
598 return 0;
599}
600
b8ff05a9
DM
601static void disable_msi(struct adapter *adapter)
602{
603 if (adapter->flags & USING_MSIX) {
604 pci_disable_msix(adapter->pdev);
605 adapter->flags &= ~USING_MSIX;
606 } else if (adapter->flags & USING_MSI) {
607 pci_disable_msi(adapter->pdev);
608 adapter->flags &= ~USING_MSI;
609 }
610}
611
612/*
613 * Interrupt handler for non-data events used with MSI-X.
614 */
615static irqreturn_t t4_nondata_intr(int irq, void *cookie)
616{
617 struct adapter *adap = cookie;
0d804338 618 u32 v = t4_read_reg(adap, MYPF_REG(PL_PF_INT_CAUSE_A));
b8ff05a9 619
0d804338 620 if (v & PFSW_F) {
b8ff05a9 621 adap->swintr = 1;
0d804338 622 t4_write_reg(adap, MYPF_REG(PL_PF_INT_CAUSE_A), v);
b8ff05a9 623 }
c3c7b121
HS
624 if (adap->flags & MASTER_PF)
625 t4_slow_intr_handler(adap);
b8ff05a9
DM
626 return IRQ_HANDLED;
627}
628
629/*
630 * Name the MSI-X interrupts.
631 */
632static void name_msix_vecs(struct adapter *adap)
633{
ba27816c 634 int i, j, msi_idx = 2, n = sizeof(adap->msix_info[0].desc);
b8ff05a9
DM
635
636 /* non-data interrupts */
b1a3c2b6 637 snprintf(adap->msix_info[0].desc, n, "%s", adap->port[0]->name);
b8ff05a9
DM
638
639 /* FW events */
b1a3c2b6
DM
640 snprintf(adap->msix_info[1].desc, n, "%s-FWeventq",
641 adap->port[0]->name);
b8ff05a9
DM
642
643 /* Ethernet queues */
644 for_each_port(adap, j) {
645 struct net_device *d = adap->port[j];
646 const struct port_info *pi = netdev_priv(d);
647
ba27816c 648 for (i = 0; i < pi->nqsets; i++, msi_idx++)
b8ff05a9
DM
649 snprintf(adap->msix_info[msi_idx].desc, n, "%s-Rx%d",
650 d->name, i);
b8ff05a9 651 }
b8ff05a9
DM
652}
653
654static int request_msix_queue_irqs(struct adapter *adap)
655{
656 struct sge *s = &adap->sge;
0fbc81b3 657 int err, ethqidx;
cf38be6d 658 int msi_index = 2;
b8ff05a9
DM
659
660 err = request_irq(adap->msix_info[1].vec, t4_sge_intr_msix, 0,
661 adap->msix_info[1].desc, &s->fw_evtq);
662 if (err)
663 return err;
664
665 for_each_ethrxq(s, ethqidx) {
404d9e3f
VP
666 err = request_irq(adap->msix_info[msi_index].vec,
667 t4_sge_intr_msix, 0,
668 adap->msix_info[msi_index].desc,
b8ff05a9
DM
669 &s->ethrxq[ethqidx].rspq);
670 if (err)
671 goto unwind;
404d9e3f 672 msi_index++;
b8ff05a9 673 }
b8ff05a9
DM
674 return 0;
675
676unwind:
b8ff05a9 677 while (--ethqidx >= 0)
404d9e3f
VP
678 free_irq(adap->msix_info[--msi_index].vec,
679 &s->ethrxq[ethqidx].rspq);
b8ff05a9
DM
680 free_irq(adap->msix_info[1].vec, &s->fw_evtq);
681 return err;
682}
683
684static void free_msix_queue_irqs(struct adapter *adap)
685{
404d9e3f 686 int i, msi_index = 2;
b8ff05a9
DM
687 struct sge *s = &adap->sge;
688
689 free_irq(adap->msix_info[1].vec, &s->fw_evtq);
690 for_each_ethrxq(s, i)
404d9e3f 691 free_irq(adap->msix_info[msi_index++].vec, &s->ethrxq[i].rspq);
b8ff05a9
DM
692}
693
671b0060 694/**
812034f1 695 * cxgb4_write_rss - write the RSS table for a given port
671b0060
DM
696 * @pi: the port
697 * @queues: array of queue indices for RSS
698 *
699 * Sets up the portion of the HW RSS table for the port's VI to distribute
700 * packets to the Rx queues in @queues.
c035e183 701 * Should never be called before setting up sge eth rx queues
671b0060 702 */
812034f1 703int cxgb4_write_rss(const struct port_info *pi, const u16 *queues)
671b0060
DM
704{
705 u16 *rss;
706 int i, err;
c035e183
HS
707 struct adapter *adapter = pi->adapter;
708 const struct sge_eth_rxq *rxq;
671b0060 709
c035e183 710 rxq = &adapter->sge.ethrxq[pi->first_qset];
671b0060
DM
711 rss = kmalloc(pi->rss_size * sizeof(u16), GFP_KERNEL);
712 if (!rss)
713 return -ENOMEM;
714
715 /* map the queue indices to queue ids */
716 for (i = 0; i < pi->rss_size; i++, queues++)
c035e183 717 rss[i] = rxq[*queues].rspq.abs_id;
671b0060 718
b2612722 719 err = t4_config_rss_range(adapter, adapter->pf, pi->viid, 0,
060e0c75 720 pi->rss_size, rss, pi->rss_size);
c035e183
HS
721 /* If Tunnel All Lookup isn't specified in the global RSS
722 * Configuration, then we need to specify a default Ingress
723 * Queue for any ingress packets which aren't hashed. We'll
724 * use our first ingress queue ...
725 */
726 if (!err)
727 err = t4_config_vi_rss(adapter, adapter->mbox, pi->viid,
728 FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F |
729 FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F |
730 FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F |
731 FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F |
732 FW_RSS_VI_CONFIG_CMD_UDPEN_F,
733 rss[0]);
671b0060
DM
734 kfree(rss);
735 return err;
736}
737
b8ff05a9
DM
738/**
739 * setup_rss - configure RSS
740 * @adap: the adapter
741 *
671b0060 742 * Sets up RSS for each port.
b8ff05a9
DM
743 */
744static int setup_rss(struct adapter *adap)
745{
c035e183 746 int i, j, err;
b8ff05a9
DM
747
748 for_each_port(adap, i) {
749 const struct port_info *pi = adap2pinfo(adap, i);
b8ff05a9 750
c035e183
HS
751 /* Fill default values with equal distribution */
752 for (j = 0; j < pi->rss_size; j++)
753 pi->rss[j] = j % pi->nqsets;
754
812034f1 755 err = cxgb4_write_rss(pi, pi->rss);
b8ff05a9
DM
756 if (err)
757 return err;
758 }
759 return 0;
760}
761
e46dab4d
DM
762/*
763 * Return the channel of the ingress queue with the given qid.
764 */
765static unsigned int rxq_to_chan(const struct sge *p, unsigned int qid)
766{
767 qid -= p->ingr_start;
768 return netdev2pinfo(p->ingr_map[qid]->netdev)->tx_chan;
769}
770
b8ff05a9
DM
771/*
772 * Wait until all NAPI handlers are descheduled.
773 */
774static void quiesce_rx(struct adapter *adap)
775{
776 int i;
777
4b8e27a8 778 for (i = 0; i < adap->sge.ingr_sz; i++) {
b8ff05a9
DM
779 struct sge_rspq *q = adap->sge.ingr_map[i];
780
5226b791 781 if (q && q->handler)
b8ff05a9
DM
782 napi_disable(&q->napi);
783 }
784}
785
b37987e8
HS
786/* Disable interrupt and napi handler */
787static void disable_interrupts(struct adapter *adap)
788{
789 if (adap->flags & FULL_INIT_DONE) {
790 t4_intr_disable(adap);
791 if (adap->flags & USING_MSIX) {
792 free_msix_queue_irqs(adap);
793 free_irq(adap->msix_info[0].vec, adap);
794 } else {
795 free_irq(adap->pdev->irq, adap);
796 }
797 quiesce_rx(adap);
798 }
799}
800
b8ff05a9
DM
801/*
802 * Enable NAPI scheduling and interrupt generation for all Rx queues.
803 */
804static void enable_rx(struct adapter *adap)
805{
806 int i;
807
4b8e27a8 808 for (i = 0; i < adap->sge.ingr_sz; i++) {
b8ff05a9
DM
809 struct sge_rspq *q = adap->sge.ingr_map[i];
810
811 if (!q)
812 continue;
5226b791 813 if (q->handler)
b8ff05a9 814 napi_enable(&q->napi);
5226b791 815
b8ff05a9 816 /* 0-increment GTS to start the timer and enable interrupts */
f612b815
HS
817 t4_write_reg(adap, MYPF_REG(SGE_PF_GTS_A),
818 SEINTARM_V(q->intr_params) |
819 INGRESSQID_V(q->cntxt_id));
b8ff05a9
DM
820 }
821}
822
1c6a5b0e 823
0fbc81b3 824static int setup_fw_sge_queues(struct adapter *adap)
b8ff05a9 825{
b8ff05a9 826 struct sge *s = &adap->sge;
0fbc81b3 827 int err = 0;
b8ff05a9 828
4b8e27a8
HS
829 bitmap_zero(s->starving_fl, s->egr_sz);
830 bitmap_zero(s->txq_maperr, s->egr_sz);
b8ff05a9
DM
831
832 if (adap->flags & USING_MSIX)
94cdb8bb 833 adap->msi_idx = 1; /* vector 0 is for non-queue interrupts */
b8ff05a9
DM
834 else {
835 err = t4_sge_alloc_rxq(adap, &s->intrq, false, adap->port[0], 0,
2337ba42 836 NULL, NULL, NULL, -1);
b8ff05a9
DM
837 if (err)
838 return err;
94cdb8bb 839 adap->msi_idx = -((int)s->intrq.abs_id + 1);
b8ff05a9
DM
840 }
841
842 err = t4_sge_alloc_rxq(adap, &s->fw_evtq, true, adap->port[0],
94cdb8bb 843 adap->msi_idx, NULL, fwevtq_handler, NULL, -1);
0fbc81b3
HS
844 return err;
845}
846
847/**
848 * setup_sge_queues - configure SGE Tx/Rx/response queues
849 * @adap: the adapter
850 *
851 * Determines how many sets of SGE queues to use and initializes them.
852 * We support multiple queue sets per port if we have MSI-X, otherwise
853 * just one queue set per port.
854 */
855static int setup_sge_queues(struct adapter *adap)
856{
857 int err, i, j;
858 struct sge *s = &adap->sge;
d427caee 859 struct sge_uld_rxq_info *rxq_info = NULL;
0fbc81b3 860 unsigned int cmplqid = 0;
b8ff05a9 861
d427caee
GG
862 if (is_uld(adap))
863 rxq_info = s->uld_rxq_info[CXGB4_ULD_RDMA];
864
b8ff05a9
DM
865 for_each_port(adap, i) {
866 struct net_device *dev = adap->port[i];
867 struct port_info *pi = netdev_priv(dev);
868 struct sge_eth_rxq *q = &s->ethrxq[pi->first_qset];
869 struct sge_eth_txq *t = &s->ethtxq[pi->first_qset];
870
871 for (j = 0; j < pi->nqsets; j++, q++) {
94cdb8bb
HS
872 if (adap->msi_idx > 0)
873 adap->msi_idx++;
b8ff05a9 874 err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev,
94cdb8bb 875 adap->msi_idx, &q->fl,
145ef8a5 876 t4_ethrx_handler,
2337ba42 877 NULL,
193c4c28
AV
878 t4_get_tp_ch_map(adap,
879 pi->tx_chan));
b8ff05a9
DM
880 if (err)
881 goto freeout;
882 q->rspq.idx = j;
883 memset(&q->stats, 0, sizeof(q->stats));
884 }
885 for (j = 0; j < pi->nqsets; j++, t++) {
886 err = t4_sge_alloc_eth_txq(adap, t, dev,
887 netdev_get_tx_queue(dev, j),
888 s->fw_evtq.cntxt_id);
889 if (err)
890 goto freeout;
891 }
892 }
893
b8ff05a9 894 for_each_port(adap, i) {
0fbc81b3 895 /* Note that cmplqid below is 0 if we don't
b8ff05a9
DM
896 * have RDMA queues, and that's the right value.
897 */
0fbc81b3
HS
898 if (rxq_info)
899 cmplqid = rxq_info->uldrxq[i].rspq.cntxt_id;
900
b8ff05a9 901 err = t4_sge_alloc_ctrl_txq(adap, &s->ctrlq[i], adap->port[i],
0fbc81b3 902 s->fw_evtq.cntxt_id, cmplqid);
b8ff05a9
DM
903 if (err)
904 goto freeout;
905 }
906
a4569504
AG
907 if (!is_t4(adap->params.chip)) {
908 err = t4_sge_alloc_eth_txq(adap, &s->ptptxq, adap->port[0],
909 netdev_get_tx_queue(adap->port[0], 0)
910 , s->fw_evtq.cntxt_id);
911 if (err)
912 goto freeout;
913 }
914
9bb59b96 915 t4_write_reg(adap, is_t4(adap->params.chip) ?
837e4a42
HS
916 MPS_TRC_RSS_CONTROL_A :
917 MPS_T5_TRC_RSS_CONTROL_A,
918 RSSCONTROL_V(netdev2pinfo(adap->port[0])->tx_chan) |
919 QUEUENUMBER_V(s->ethrxq[0].rspq.abs_id));
b8ff05a9 920 return 0;
0fbc81b3
HS
921freeout:
922 t4_free_sge_resources(adap);
923 return err;
b8ff05a9
DM
924}
925
688848b1
AB
926static u16 cxgb_select_queue(struct net_device *dev, struct sk_buff *skb,
927 void *accel_priv, select_queue_fallback_t fallback)
928{
929 int txq;
930
931#ifdef CONFIG_CHELSIO_T4_DCB
932 /* If a Data Center Bridging has been successfully negotiated on this
933 * link then we'll use the skb's priority to map it to a TX Queue.
934 * The skb's priority is determined via the VLAN Tag Priority Code
935 * Point field.
936 */
85eacf3f 937 if (cxgb4_dcb_enabled(dev) && !is_kdump_kernel()) {
688848b1
AB
938 u16 vlan_tci;
939 int err;
940
941 err = vlan_get_tag(skb, &vlan_tci);
942 if (unlikely(err)) {
943 if (net_ratelimit())
944 netdev_warn(dev,
945 "TX Packet without VLAN Tag on DCB Link\n");
946 txq = 0;
947 } else {
948 txq = (vlan_tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
84a200b3
VP
949#ifdef CONFIG_CHELSIO_T4_FCOE
950 if (skb->protocol == htons(ETH_P_FCOE))
951 txq = skb->priority & 0x7;
952#endif /* CONFIG_CHELSIO_T4_FCOE */
688848b1
AB
953 }
954 return txq;
955 }
956#endif /* CONFIG_CHELSIO_T4_DCB */
957
958 if (select_queue) {
959 txq = (skb_rx_queue_recorded(skb)
960 ? skb_get_rx_queue(skb)
961 : smp_processor_id());
962
963 while (unlikely(txq >= dev->real_num_tx_queues))
964 txq -= dev->real_num_tx_queues;
965
966 return txq;
967 }
968
969 return fallback(dev, skb) % dev->real_num_tx_queues;
970}
971
b8ff05a9
DM
972static int closest_timer(const struct sge *s, int time)
973{
974 int i, delta, match = 0, min_delta = INT_MAX;
975
976 for (i = 0; i < ARRAY_SIZE(s->timer_val); i++) {
977 delta = time - s->timer_val[i];
978 if (delta < 0)
979 delta = -delta;
980 if (delta < min_delta) {
981 min_delta = delta;
982 match = i;
983 }
984 }
985 return match;
986}
987
988static int closest_thres(const struct sge *s, int thres)
989{
990 int i, delta, match = 0, min_delta = INT_MAX;
991
992 for (i = 0; i < ARRAY_SIZE(s->counter_val); i++) {
993 delta = thres - s->counter_val[i];
994 if (delta < 0)
995 delta = -delta;
996 if (delta < min_delta) {
997 min_delta = delta;
998 match = i;
999 }
1000 }
1001 return match;
1002}
1003
b8ff05a9 1004/**
812034f1 1005 * cxgb4_set_rspq_intr_params - set a queue's interrupt holdoff parameters
b8ff05a9
DM
1006 * @q: the Rx queue
1007 * @us: the hold-off time in us, or 0 to disable timer
1008 * @cnt: the hold-off packet count, or 0 to disable counter
1009 *
1010 * Sets an Rx queue's interrupt hold-off time and packet count. At least
1011 * one of the two needs to be enabled for the queue to generate interrupts.
1012 */
812034f1
HS
1013int cxgb4_set_rspq_intr_params(struct sge_rspq *q,
1014 unsigned int us, unsigned int cnt)
b8ff05a9 1015{
c887ad0e
HS
1016 struct adapter *adap = q->adap;
1017
b8ff05a9
DM
1018 if ((us | cnt) == 0)
1019 cnt = 1;
1020
1021 if (cnt) {
1022 int err;
1023 u32 v, new_idx;
1024
1025 new_idx = closest_thres(&adap->sge, cnt);
1026 if (q->desc && q->pktcnt_idx != new_idx) {
1027 /* the queue has already been created, update it */
5167865a
HS
1028 v = FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
1029 FW_PARAMS_PARAM_X_V(
1030 FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH) |
1031 FW_PARAMS_PARAM_YZ_V(q->cntxt_id);
b2612722
HS
1032 err = t4_set_params(adap, adap->mbox, adap->pf, 0, 1,
1033 &v, &new_idx);
b8ff05a9
DM
1034 if (err)
1035 return err;
1036 }
1037 q->pktcnt_idx = new_idx;
1038 }
1039
1040 us = us == 0 ? 6 : closest_timer(&adap->sge, us);
1ecc7b7a 1041 q->intr_params = QINTR_TIMER_IDX_V(us) | QINTR_CNT_EN_V(cnt > 0);
b8ff05a9
DM
1042 return 0;
1043}
1044
c8f44aff 1045static int cxgb_set_features(struct net_device *dev, netdev_features_t features)
87b6cf51 1046{
2ed28baa 1047 const struct port_info *pi = netdev_priv(dev);
c8f44aff 1048 netdev_features_t changed = dev->features ^ features;
19ecae2c 1049 int err;
19ecae2c 1050
f646968f 1051 if (!(changed & NETIF_F_HW_VLAN_CTAG_RX))
2ed28baa 1052 return 0;
19ecae2c 1053
b2612722 1054 err = t4_set_rxmode(pi->adapter, pi->adapter->pf, pi->viid, -1,
2ed28baa 1055 -1, -1, -1,
f646968f 1056 !!(features & NETIF_F_HW_VLAN_CTAG_RX), true);
2ed28baa 1057 if (unlikely(err))
f646968f 1058 dev->features = features ^ NETIF_F_HW_VLAN_CTAG_RX;
19ecae2c 1059 return err;
87b6cf51
DM
1060}
1061
91744948 1062static int setup_debugfs(struct adapter *adap)
b8ff05a9 1063{
b8ff05a9
DM
1064 if (IS_ERR_OR_NULL(adap->debugfs_root))
1065 return -1;
1066
fd88b31a
HS
1067#ifdef CONFIG_DEBUG_FS
1068 t4_setup_debugfs(adap);
1069#endif
b8ff05a9
DM
1070 return 0;
1071}
1072
1073/*
1074 * upper-layer driver support
1075 */
1076
1077/*
1078 * Allocate an active-open TID and set it to the supplied value.
1079 */
1080int cxgb4_alloc_atid(struct tid_info *t, void *data)
1081{
1082 int atid = -1;
1083
1084 spin_lock_bh(&t->atid_lock);
1085 if (t->afree) {
1086 union aopen_entry *p = t->afree;
1087
f2b7e78d 1088 atid = (p - t->atid_tab) + t->atid_base;
b8ff05a9
DM
1089 t->afree = p->next;
1090 p->data = data;
1091 t->atids_in_use++;
1092 }
1093 spin_unlock_bh(&t->atid_lock);
1094 return atid;
1095}
1096EXPORT_SYMBOL(cxgb4_alloc_atid);
1097
1098/*
1099 * Release an active-open TID.
1100 */
1101void cxgb4_free_atid(struct tid_info *t, unsigned int atid)
1102{
f2b7e78d 1103 union aopen_entry *p = &t->atid_tab[atid - t->atid_base];
b8ff05a9
DM
1104
1105 spin_lock_bh(&t->atid_lock);
1106 p->next = t->afree;
1107 t->afree = p;
1108 t->atids_in_use--;
1109 spin_unlock_bh(&t->atid_lock);
1110}
1111EXPORT_SYMBOL(cxgb4_free_atid);
1112
1113/*
1114 * Allocate a server TID and set it to the supplied value.
1115 */
1116int cxgb4_alloc_stid(struct tid_info *t, int family, void *data)
1117{
1118 int stid;
1119
1120 spin_lock_bh(&t->stid_lock);
1121 if (family == PF_INET) {
1122 stid = find_first_zero_bit(t->stid_bmap, t->nstids);
1123 if (stid < t->nstids)
1124 __set_bit(stid, t->stid_bmap);
1125 else
1126 stid = -1;
1127 } else {
a99c683e 1128 stid = bitmap_find_free_region(t->stid_bmap, t->nstids, 1);
b8ff05a9
DM
1129 if (stid < 0)
1130 stid = -1;
1131 }
1132 if (stid >= 0) {
1133 t->stid_tab[stid].data = data;
1134 stid += t->stid_base;
15f63b74
KS
1135 /* IPv6 requires max of 520 bits or 16 cells in TCAM
1136 * This is equivalent to 4 TIDs. With CLIP enabled it
1137 * needs 2 TIDs.
1138 */
1dec4cec 1139 if (family == PF_INET6) {
a99c683e 1140 t->stids_in_use += 2;
1dec4cec
GG
1141 t->v6_stids_in_use += 2;
1142 } else {
1143 t->stids_in_use++;
1144 }
b8ff05a9
DM
1145 }
1146 spin_unlock_bh(&t->stid_lock);
1147 return stid;
1148}
1149EXPORT_SYMBOL(cxgb4_alloc_stid);
1150
dca4faeb
VP
1151/* Allocate a server filter TID and set it to the supplied value.
1152 */
1153int cxgb4_alloc_sftid(struct tid_info *t, int family, void *data)
1154{
1155 int stid;
1156
1157 spin_lock_bh(&t->stid_lock);
1158 if (family == PF_INET) {
1159 stid = find_next_zero_bit(t->stid_bmap,
1160 t->nstids + t->nsftids, t->nstids);
1161 if (stid < (t->nstids + t->nsftids))
1162 __set_bit(stid, t->stid_bmap);
1163 else
1164 stid = -1;
1165 } else {
1166 stid = -1;
1167 }
1168 if (stid >= 0) {
1169 t->stid_tab[stid].data = data;
470c60c4
KS
1170 stid -= t->nstids;
1171 stid += t->sftid_base;
2248b293 1172 t->sftids_in_use++;
dca4faeb
VP
1173 }
1174 spin_unlock_bh(&t->stid_lock);
1175 return stid;
1176}
1177EXPORT_SYMBOL(cxgb4_alloc_sftid);
1178
1179/* Release a server TID.
b8ff05a9
DM
1180 */
1181void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family)
1182{
470c60c4
KS
1183 /* Is it a server filter TID? */
1184 if (t->nsftids && (stid >= t->sftid_base)) {
1185 stid -= t->sftid_base;
1186 stid += t->nstids;
1187 } else {
1188 stid -= t->stid_base;
1189 }
1190
b8ff05a9
DM
1191 spin_lock_bh(&t->stid_lock);
1192 if (family == PF_INET)
1193 __clear_bit(stid, t->stid_bmap);
1194 else
a99c683e 1195 bitmap_release_region(t->stid_bmap, stid, 1);
b8ff05a9 1196 t->stid_tab[stid].data = NULL;
2248b293 1197 if (stid < t->nstids) {
1dec4cec 1198 if (family == PF_INET6) {
a99c683e 1199 t->stids_in_use -= 2;
1dec4cec
GG
1200 t->v6_stids_in_use -= 2;
1201 } else {
1202 t->stids_in_use--;
1203 }
2248b293
HS
1204 } else {
1205 t->sftids_in_use--;
1206 }
1dec4cec 1207
b8ff05a9
DM
1208 spin_unlock_bh(&t->stid_lock);
1209}
1210EXPORT_SYMBOL(cxgb4_free_stid);
1211
1212/*
1213 * Populate a TID_RELEASE WR. Caller must properly size the skb.
1214 */
1215static void mk_tid_release(struct sk_buff *skb, unsigned int chan,
1216 unsigned int tid)
1217{
1218 struct cpl_tid_release *req;
1219
1220 set_wr_txq(skb, CPL_PRIORITY_SETUP, chan);
4df864c1 1221 req = __skb_put(skb, sizeof(*req));
b8ff05a9
DM
1222 INIT_TP_WR(req, tid);
1223 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, tid));
1224}
1225
1226/*
1227 * Queue a TID release request and if necessary schedule a work queue to
1228 * process it.
1229 */
31b9c19b 1230static void cxgb4_queue_tid_release(struct tid_info *t, unsigned int chan,
1231 unsigned int tid)
b8ff05a9
DM
1232{
1233 void **p = &t->tid_tab[tid];
1234 struct adapter *adap = container_of(t, struct adapter, tids);
1235
1236 spin_lock_bh(&adap->tid_release_lock);
1237 *p = adap->tid_release_head;
1238 /* Low 2 bits encode the Tx channel number */
1239 adap->tid_release_head = (void **)((uintptr_t)p | chan);
1240 if (!adap->tid_release_task_busy) {
1241 adap->tid_release_task_busy = true;
29aaee65 1242 queue_work(adap->workq, &adap->tid_release_task);
b8ff05a9
DM
1243 }
1244 spin_unlock_bh(&adap->tid_release_lock);
1245}
b8ff05a9
DM
1246
1247/*
1248 * Process the list of pending TID release requests.
1249 */
1250static void process_tid_release_list(struct work_struct *work)
1251{
1252 struct sk_buff *skb;
1253 struct adapter *adap;
1254
1255 adap = container_of(work, struct adapter, tid_release_task);
1256
1257 spin_lock_bh(&adap->tid_release_lock);
1258 while (adap->tid_release_head) {
1259 void **p = adap->tid_release_head;
1260 unsigned int chan = (uintptr_t)p & 3;
1261 p = (void *)p - chan;
1262
1263 adap->tid_release_head = *p;
1264 *p = NULL;
1265 spin_unlock_bh(&adap->tid_release_lock);
1266
1267 while (!(skb = alloc_skb(sizeof(struct cpl_tid_release),
1268 GFP_KERNEL)))
1269 schedule_timeout_uninterruptible(1);
1270
1271 mk_tid_release(skb, chan, p - adap->tids.tid_tab);
1272 t4_ofld_send(adap, skb);
1273 spin_lock_bh(&adap->tid_release_lock);
1274 }
1275 adap->tid_release_task_busy = false;
1276 spin_unlock_bh(&adap->tid_release_lock);
1277}
1278
1279/*
1280 * Release a TID and inform HW. If we are unable to allocate the release
1281 * message we defer to a work queue.
1282 */
1dec4cec
GG
1283void cxgb4_remove_tid(struct tid_info *t, unsigned int chan, unsigned int tid,
1284 unsigned short family)
b8ff05a9 1285{
b8ff05a9
DM
1286 struct sk_buff *skb;
1287 struct adapter *adap = container_of(t, struct adapter, tids);
1288
9a1bb9f6
HS
1289 WARN_ON(tid >= t->ntids);
1290
1291 if (t->tid_tab[tid]) {
1292 t->tid_tab[tid] = NULL;
1dec4cec
GG
1293 atomic_dec(&t->conns_in_use);
1294 if (t->hash_base && (tid >= t->hash_base)) {
1295 if (family == AF_INET6)
1296 atomic_sub(2, &t->hash_tids_in_use);
1297 else
1298 atomic_dec(&t->hash_tids_in_use);
1299 } else {
1300 if (family == AF_INET6)
1301 atomic_sub(2, &t->tids_in_use);
1302 else
1303 atomic_dec(&t->tids_in_use);
1304 }
9a1bb9f6
HS
1305 }
1306
b8ff05a9
DM
1307 skb = alloc_skb(sizeof(struct cpl_tid_release), GFP_ATOMIC);
1308 if (likely(skb)) {
b8ff05a9
DM
1309 mk_tid_release(skb, chan, tid);
1310 t4_ofld_send(adap, skb);
1311 } else
1312 cxgb4_queue_tid_release(t, chan, tid);
b8ff05a9
DM
1313}
1314EXPORT_SYMBOL(cxgb4_remove_tid);
1315
1316/*
1317 * Allocate and initialize the TID tables. Returns 0 on success.
1318 */
1319static int tid_init(struct tid_info *t)
1320{
b6f8eaec 1321 struct adapter *adap = container_of(t, struct adapter, tids);
578b46b9
RL
1322 unsigned int max_ftids = t->nftids + t->nsftids;
1323 unsigned int natids = t->natids;
1324 unsigned int stid_bmap_size;
1325 unsigned int ftid_bmap_size;
1326 size_t size;
b8ff05a9 1327
dca4faeb 1328 stid_bmap_size = BITS_TO_LONGS(t->nstids + t->nsftids);
578b46b9 1329 ftid_bmap_size = BITS_TO_LONGS(t->nftids);
f2b7e78d
VP
1330 size = t->ntids * sizeof(*t->tid_tab) +
1331 natids * sizeof(*t->atid_tab) +
b8ff05a9 1332 t->nstids * sizeof(*t->stid_tab) +
dca4faeb 1333 t->nsftids * sizeof(*t->stid_tab) +
f2b7e78d 1334 stid_bmap_size * sizeof(long) +
578b46b9
RL
1335 max_ftids * sizeof(*t->ftid_tab) +
1336 ftid_bmap_size * sizeof(long);
f2b7e78d 1337
752ade68 1338 t->tid_tab = kvzalloc(size, GFP_KERNEL);
b8ff05a9
DM
1339 if (!t->tid_tab)
1340 return -ENOMEM;
1341
1342 t->atid_tab = (union aopen_entry *)&t->tid_tab[t->ntids];
1343 t->stid_tab = (struct serv_entry *)&t->atid_tab[natids];
dca4faeb 1344 t->stid_bmap = (unsigned long *)&t->stid_tab[t->nstids + t->nsftids];
f2b7e78d 1345 t->ftid_tab = (struct filter_entry *)&t->stid_bmap[stid_bmap_size];
578b46b9 1346 t->ftid_bmap = (unsigned long *)&t->ftid_tab[max_ftids];
b8ff05a9
DM
1347 spin_lock_init(&t->stid_lock);
1348 spin_lock_init(&t->atid_lock);
578b46b9 1349 spin_lock_init(&t->ftid_lock);
b8ff05a9
DM
1350
1351 t->stids_in_use = 0;
1dec4cec 1352 t->v6_stids_in_use = 0;
2248b293 1353 t->sftids_in_use = 0;
b8ff05a9
DM
1354 t->afree = NULL;
1355 t->atids_in_use = 0;
1356 atomic_set(&t->tids_in_use, 0);
1dec4cec 1357 atomic_set(&t->conns_in_use, 0);
9a1bb9f6 1358 atomic_set(&t->hash_tids_in_use, 0);
b8ff05a9
DM
1359
1360 /* Setup the free list for atid_tab and clear the stid bitmap. */
1361 if (natids) {
1362 while (--natids)
1363 t->atid_tab[natids - 1].next = &t->atid_tab[natids];
1364 t->afree = t->atid_tab;
1365 }
b6f8eaec 1366
578b46b9
RL
1367 if (is_offload(adap)) {
1368 bitmap_zero(t->stid_bmap, t->nstids + t->nsftids);
1369 /* Reserve stid 0 for T4/T5 adapters */
1370 if (!t->stid_base &&
1371 CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
1372 __set_bit(0, t->stid_bmap);
1373 }
1374
1375 bitmap_zero(t->ftid_bmap, t->nftids);
b8ff05a9
DM
1376 return 0;
1377}
1378
1379/**
1380 * cxgb4_create_server - create an IP server
1381 * @dev: the device
1382 * @stid: the server TID
1383 * @sip: local IP address to bind server to
1384 * @sport: the server's TCP port
1385 * @queue: queue to direct messages from this server to
1386 *
1387 * Create an IP server for the given port and address.
1388 * Returns <0 on error and one of the %NET_XMIT_* values on success.
1389 */
1390int cxgb4_create_server(const struct net_device *dev, unsigned int stid,
793dad94
VP
1391 __be32 sip, __be16 sport, __be16 vlan,
1392 unsigned int queue)
b8ff05a9
DM
1393{
1394 unsigned int chan;
1395 struct sk_buff *skb;
1396 struct adapter *adap;
1397 struct cpl_pass_open_req *req;
80f40c1f 1398 int ret;
b8ff05a9
DM
1399
1400 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
1401 if (!skb)
1402 return -ENOMEM;
1403
1404 adap = netdev2adap(dev);
4df864c1 1405 req = __skb_put(skb, sizeof(*req));
b8ff05a9
DM
1406 INIT_TP_WR(req, 0);
1407 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ, stid));
1408 req->local_port = sport;
1409 req->peer_port = htons(0);
1410 req->local_ip = sip;
1411 req->peer_ip = htonl(0);
e46dab4d 1412 chan = rxq_to_chan(&adap->sge, queue);
d7990b0c 1413 req->opt0 = cpu_to_be64(TX_CHAN_V(chan));
6c53e938
HS
1414 req->opt1 = cpu_to_be64(CONN_POLICY_V(CPL_CONN_POLICY_ASK) |
1415 SYN_RSS_ENABLE_F | SYN_RSS_QUEUE_V(queue));
80f40c1f
VP
1416 ret = t4_mgmt_tx(adap, skb);
1417 return net_xmit_eval(ret);
b8ff05a9
DM
1418}
1419EXPORT_SYMBOL(cxgb4_create_server);
1420
80f40c1f
VP
1421/* cxgb4_create_server6 - create an IPv6 server
1422 * @dev: the device
1423 * @stid: the server TID
1424 * @sip: local IPv6 address to bind server to
1425 * @sport: the server's TCP port
1426 * @queue: queue to direct messages from this server to
1427 *
1428 * Create an IPv6 server for the given port and address.
1429 * Returns <0 on error and one of the %NET_XMIT_* values on success.
1430 */
1431int cxgb4_create_server6(const struct net_device *dev, unsigned int stid,
1432 const struct in6_addr *sip, __be16 sport,
1433 unsigned int queue)
1434{
1435 unsigned int chan;
1436 struct sk_buff *skb;
1437 struct adapter *adap;
1438 struct cpl_pass_open_req6 *req;
1439 int ret;
1440
1441 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
1442 if (!skb)
1443 return -ENOMEM;
1444
1445 adap = netdev2adap(dev);
4df864c1 1446 req = __skb_put(skb, sizeof(*req));
80f40c1f
VP
1447 INIT_TP_WR(req, 0);
1448 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ6, stid));
1449 req->local_port = sport;
1450 req->peer_port = htons(0);
1451 req->local_ip_hi = *(__be64 *)(sip->s6_addr);
1452 req->local_ip_lo = *(__be64 *)(sip->s6_addr + 8);
1453 req->peer_ip_hi = cpu_to_be64(0);
1454 req->peer_ip_lo = cpu_to_be64(0);
1455 chan = rxq_to_chan(&adap->sge, queue);
d7990b0c 1456 req->opt0 = cpu_to_be64(TX_CHAN_V(chan));
6c53e938
HS
1457 req->opt1 = cpu_to_be64(CONN_POLICY_V(CPL_CONN_POLICY_ASK) |
1458 SYN_RSS_ENABLE_F | SYN_RSS_QUEUE_V(queue));
80f40c1f
VP
1459 ret = t4_mgmt_tx(adap, skb);
1460 return net_xmit_eval(ret);
1461}
1462EXPORT_SYMBOL(cxgb4_create_server6);
1463
1464int cxgb4_remove_server(const struct net_device *dev, unsigned int stid,
1465 unsigned int queue, bool ipv6)
1466{
1467 struct sk_buff *skb;
1468 struct adapter *adap;
1469 struct cpl_close_listsvr_req *req;
1470 int ret;
1471
1472 adap = netdev2adap(dev);
1473
1474 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
1475 if (!skb)
1476 return -ENOMEM;
1477
4df864c1 1478 req = __skb_put(skb, sizeof(*req));
80f40c1f
VP
1479 INIT_TP_WR(req, 0);
1480 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ, stid));
bdc590b9
HS
1481 req->reply_ctrl = htons(NO_REPLY_V(0) | (ipv6 ? LISTSVR_IPV6_V(1) :
1482 LISTSVR_IPV6_V(0)) | QUEUENO_V(queue));
80f40c1f
VP
1483 ret = t4_mgmt_tx(adap, skb);
1484 return net_xmit_eval(ret);
1485}
1486EXPORT_SYMBOL(cxgb4_remove_server);
1487
b8ff05a9
DM
1488/**
1489 * cxgb4_best_mtu - find the entry in the MTU table closest to an MTU
1490 * @mtus: the HW MTU table
1491 * @mtu: the target MTU
1492 * @idx: index of selected entry in the MTU table
1493 *
1494 * Returns the index and the value in the HW MTU table that is closest to
1495 * but does not exceed @mtu, unless @mtu is smaller than any value in the
1496 * table, in which case that smallest available value is selected.
1497 */
1498unsigned int cxgb4_best_mtu(const unsigned short *mtus, unsigned short mtu,
1499 unsigned int *idx)
1500{
1501 unsigned int i = 0;
1502
1503 while (i < NMTUS - 1 && mtus[i + 1] <= mtu)
1504 ++i;
1505 if (idx)
1506 *idx = i;
1507 return mtus[i];
1508}
1509EXPORT_SYMBOL(cxgb4_best_mtu);
1510
92e7ae71
HS
1511/**
1512 * cxgb4_best_aligned_mtu - find best MTU, [hopefully] data size aligned
1513 * @mtus: the HW MTU table
1514 * @header_size: Header Size
1515 * @data_size_max: maximum Data Segment Size
1516 * @data_size_align: desired Data Segment Size Alignment (2^N)
1517 * @mtu_idxp: HW MTU Table Index return value pointer (possibly NULL)
1518 *
1519 * Similar to cxgb4_best_mtu() but instead of searching the Hardware
1520 * MTU Table based solely on a Maximum MTU parameter, we break that
1521 * parameter up into a Header Size and Maximum Data Segment Size, and
1522 * provide a desired Data Segment Size Alignment. If we find an MTU in
1523 * the Hardware MTU Table which will result in a Data Segment Size with
1524 * the requested alignment _and_ that MTU isn't "too far" from the
1525 * closest MTU, then we'll return that rather than the closest MTU.
1526 */
1527unsigned int cxgb4_best_aligned_mtu(const unsigned short *mtus,
1528 unsigned short header_size,
1529 unsigned short data_size_max,
1530 unsigned short data_size_align,
1531 unsigned int *mtu_idxp)
1532{
1533 unsigned short max_mtu = header_size + data_size_max;
1534 unsigned short data_size_align_mask = data_size_align - 1;
1535 int mtu_idx, aligned_mtu_idx;
1536
1537 /* Scan the MTU Table till we find an MTU which is larger than our
1538 * Maximum MTU or we reach the end of the table. Along the way,
1539 * record the last MTU found, if any, which will result in a Data
1540 * Segment Length matching the requested alignment.
1541 */
1542 for (mtu_idx = 0, aligned_mtu_idx = -1; mtu_idx < NMTUS; mtu_idx++) {
1543 unsigned short data_size = mtus[mtu_idx] - header_size;
1544
1545 /* If this MTU minus the Header Size would result in a
1546 * Data Segment Size of the desired alignment, remember it.
1547 */
1548 if ((data_size & data_size_align_mask) == 0)
1549 aligned_mtu_idx = mtu_idx;
1550
1551 /* If we're not at the end of the Hardware MTU Table and the
1552 * next element is larger than our Maximum MTU, drop out of
1553 * the loop.
1554 */
1555 if (mtu_idx+1 < NMTUS && mtus[mtu_idx+1] > max_mtu)
1556 break;
1557 }
1558
1559 /* If we fell out of the loop because we ran to the end of the table,
1560 * then we just have to use the last [largest] entry.
1561 */
1562 if (mtu_idx == NMTUS)
1563 mtu_idx--;
1564
1565 /* If we found an MTU which resulted in the requested Data Segment
1566 * Length alignment and that's "not far" from the largest MTU which is
1567 * less than or equal to the maximum MTU, then use that.
1568 */
1569 if (aligned_mtu_idx >= 0 &&
1570 mtu_idx - aligned_mtu_idx <= 1)
1571 mtu_idx = aligned_mtu_idx;
1572
1573 /* If the caller has passed in an MTU Index pointer, pass the
1574 * MTU Index back. Return the MTU value.
1575 */
1576 if (mtu_idxp)
1577 *mtu_idxp = mtu_idx;
1578 return mtus[mtu_idx];
1579}
1580EXPORT_SYMBOL(cxgb4_best_aligned_mtu);
1581
27999805
H
1582/**
1583 * cxgb4_tp_smt_idx - Get the Source Mac Table index for this VI
1584 * @chip: chip type
1585 * @viid: VI id of the given port
1586 *
1587 * Return the SMT index for this VI.
1588 */
1589unsigned int cxgb4_tp_smt_idx(enum chip_type chip, unsigned int viid)
1590{
1591 /* In T4/T5, SMT contains 256 SMAC entries organized in
1592 * 128 rows of 2 entries each.
1593 * In T6, SMT contains 256 SMAC entries in 256 rows.
1594 * TODO: The below code needs to be updated when we add support
1595 * for 256 VFs.
1596 */
1597 if (CHELSIO_CHIP_VERSION(chip) <= CHELSIO_T5)
1598 return ((viid & 0x7f) << 1);
1599 else
1600 return (viid & 0x7f);
1601}
1602EXPORT_SYMBOL(cxgb4_tp_smt_idx);
1603
b8ff05a9
DM
1604/**
1605 * cxgb4_port_chan - get the HW channel of a port
1606 * @dev: the net device for the port
1607 *
1608 * Return the HW Tx channel of the given port.
1609 */
1610unsigned int cxgb4_port_chan(const struct net_device *dev)
1611{
1612 return netdev2pinfo(dev)->tx_chan;
1613}
1614EXPORT_SYMBOL(cxgb4_port_chan);
1615
881806bc
VP
1616unsigned int cxgb4_dbfifo_count(const struct net_device *dev, int lpfifo)
1617{
1618 struct adapter *adap = netdev2adap(dev);
2cc301d2 1619 u32 v1, v2, lp_count, hp_count;
881806bc 1620
f061de42
HS
1621 v1 = t4_read_reg(adap, SGE_DBFIFO_STATUS_A);
1622 v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2_A);
d14807dd 1623 if (is_t4(adap->params.chip)) {
f061de42
HS
1624 lp_count = LP_COUNT_G(v1);
1625 hp_count = HP_COUNT_G(v1);
2cc301d2 1626 } else {
f061de42
HS
1627 lp_count = LP_COUNT_T5_G(v1);
1628 hp_count = HP_COUNT_T5_G(v2);
2cc301d2
SR
1629 }
1630 return lpfifo ? lp_count : hp_count;
881806bc
VP
1631}
1632EXPORT_SYMBOL(cxgb4_dbfifo_count);
1633
b8ff05a9
DM
1634/**
1635 * cxgb4_port_viid - get the VI id of a port
1636 * @dev: the net device for the port
1637 *
1638 * Return the VI id of the given port.
1639 */
1640unsigned int cxgb4_port_viid(const struct net_device *dev)
1641{
1642 return netdev2pinfo(dev)->viid;
1643}
1644EXPORT_SYMBOL(cxgb4_port_viid);
1645
1646/**
1647 * cxgb4_port_idx - get the index of a port
1648 * @dev: the net device for the port
1649 *
1650 * Return the index of the given port.
1651 */
1652unsigned int cxgb4_port_idx(const struct net_device *dev)
1653{
1654 return netdev2pinfo(dev)->port_id;
1655}
1656EXPORT_SYMBOL(cxgb4_port_idx);
1657
b8ff05a9
DM
1658void cxgb4_get_tcp_stats(struct pci_dev *pdev, struct tp_tcp_stats *v4,
1659 struct tp_tcp_stats *v6)
1660{
1661 struct adapter *adap = pci_get_drvdata(pdev);
1662
1663 spin_lock(&adap->stats_lock);
5ccf9d04 1664 t4_tp_get_tcp_stats(adap, v4, v6, false);
b8ff05a9
DM
1665 spin_unlock(&adap->stats_lock);
1666}
1667EXPORT_SYMBOL(cxgb4_get_tcp_stats);
1668
1669void cxgb4_iscsi_init(struct net_device *dev, unsigned int tag_mask,
1670 const unsigned int *pgsz_order)
1671{
1672 struct adapter *adap = netdev2adap(dev);
1673
0d804338
HS
1674 t4_write_reg(adap, ULP_RX_ISCSI_TAGMASK_A, tag_mask);
1675 t4_write_reg(adap, ULP_RX_ISCSI_PSZ_A, HPZ0_V(pgsz_order[0]) |
1676 HPZ1_V(pgsz_order[1]) | HPZ2_V(pgsz_order[2]) |
1677 HPZ3_V(pgsz_order[3]));
b8ff05a9
DM
1678}
1679EXPORT_SYMBOL(cxgb4_iscsi_init);
1680
3069ee9b
VP
1681int cxgb4_flush_eq_cache(struct net_device *dev)
1682{
1683 struct adapter *adap = netdev2adap(dev);
3069ee9b 1684
736c3b94 1685 return t4_sge_ctxt_flush(adap, adap->mbox, CTXT_EGRESS);
3069ee9b
VP
1686}
1687EXPORT_SYMBOL(cxgb4_flush_eq_cache);
1688
1689static int read_eq_indices(struct adapter *adap, u16 qid, u16 *pidx, u16 *cidx)
1690{
f061de42 1691 u32 addr = t4_read_reg(adap, SGE_DBQ_CTXT_BADDR_A) + 24 * qid + 8;
3069ee9b
VP
1692 __be64 indices;
1693 int ret;
1694
fc5ab020
HS
1695 spin_lock(&adap->win0_lock);
1696 ret = t4_memory_rw(adap, 0, MEM_EDC0, addr,
1697 sizeof(indices), (__be32 *)&indices,
1698 T4_MEMORY_READ);
1699 spin_unlock(&adap->win0_lock);
3069ee9b 1700 if (!ret) {
404d9e3f
VP
1701 *cidx = (be64_to_cpu(indices) >> 25) & 0xffff;
1702 *pidx = (be64_to_cpu(indices) >> 9) & 0xffff;
3069ee9b
VP
1703 }
1704 return ret;
1705}
1706
1707int cxgb4_sync_txq_pidx(struct net_device *dev, u16 qid, u16 pidx,
1708 u16 size)
1709{
1710 struct adapter *adap = netdev2adap(dev);
1711 u16 hw_pidx, hw_cidx;
1712 int ret;
1713
1714 ret = read_eq_indices(adap, qid, &hw_pidx, &hw_cidx);
1715 if (ret)
1716 goto out;
1717
1718 if (pidx != hw_pidx) {
1719 u16 delta;
f612b815 1720 u32 val;
3069ee9b
VP
1721
1722 if (pidx >= hw_pidx)
1723 delta = pidx - hw_pidx;
1724 else
1725 delta = size - hw_pidx + pidx;
f612b815
HS
1726
1727 if (is_t4(adap->params.chip))
1728 val = PIDX_V(delta);
1729 else
1730 val = PIDX_T5_V(delta);
3069ee9b 1731 wmb();
f612b815
HS
1732 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A),
1733 QID_V(qid) | val);
3069ee9b
VP
1734 }
1735out:
1736 return ret;
1737}
1738EXPORT_SYMBOL(cxgb4_sync_txq_pidx);
1739
031cf476
HS
1740int cxgb4_read_tpte(struct net_device *dev, u32 stag, __be32 *tpte)
1741{
6559a7e8 1742 u32 edc0_size, edc1_size, mc0_size, mc1_size, size;
031cf476 1743 u32 edc0_end, edc1_end, mc0_end, mc1_end;
8b4e6b3c
AV
1744 u32 offset, memtype, memaddr;
1745 struct adapter *adap;
1746 u32 hma_size = 0;
031cf476
HS
1747 int ret;
1748
1749 adap = netdev2adap(dev);
1750
1751 offset = ((stag >> 8) * 32) + adap->vres.stag.start;
1752
1753 /* Figure out where the offset lands in the Memory Type/Address scheme.
1754 * This code assumes that the memory is laid out starting at offset 0
1755 * with no breaks as: EDC0, EDC1, MC0, MC1. All cards have both EDC0
1756 * and EDC1. Some cards will have neither MC0 nor MC1, most cards have
1757 * MC0, and some have both MC0 and MC1.
1758 */
6559a7e8
HS
1759 size = t4_read_reg(adap, MA_EDRAM0_BAR_A);
1760 edc0_size = EDRAM0_SIZE_G(size) << 20;
1761 size = t4_read_reg(adap, MA_EDRAM1_BAR_A);
1762 edc1_size = EDRAM1_SIZE_G(size) << 20;
1763 size = t4_read_reg(adap, MA_EXT_MEMORY0_BAR_A);
1764 mc0_size = EXT_MEM0_SIZE_G(size) << 20;
031cf476 1765
8b4e6b3c
AV
1766 if (t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A) & HMA_MUX_F) {
1767 size = t4_read_reg(adap, MA_EXT_MEMORY1_BAR_A);
1768 hma_size = EXT_MEM1_SIZE_G(size) << 20;
1769 }
031cf476
HS
1770 edc0_end = edc0_size;
1771 edc1_end = edc0_end + edc1_size;
1772 mc0_end = edc1_end + mc0_size;
1773
1774 if (offset < edc0_end) {
1775 memtype = MEM_EDC0;
1776 memaddr = offset;
1777 } else if (offset < edc1_end) {
1778 memtype = MEM_EDC1;
1779 memaddr = offset - edc0_end;
1780 } else {
8b4e6b3c
AV
1781 if (hma_size && (offset < (edc1_end + hma_size))) {
1782 memtype = MEM_HMA;
1783 memaddr = offset - edc1_end;
1784 } else if (offset < mc0_end) {
031cf476
HS
1785 memtype = MEM_MC0;
1786 memaddr = offset - edc1_end;
3ccc6cf7 1787 } else if (is_t5(adap->params.chip)) {
6559a7e8
HS
1788 size = t4_read_reg(adap, MA_EXT_MEMORY1_BAR_A);
1789 mc1_size = EXT_MEM1_SIZE_G(size) << 20;
031cf476
HS
1790 mc1_end = mc0_end + mc1_size;
1791 if (offset < mc1_end) {
1792 memtype = MEM_MC1;
1793 memaddr = offset - mc0_end;
1794 } else {
1795 /* offset beyond the end of any memory */
1796 goto err;
1797 }
3ccc6cf7
HS
1798 } else {
1799 /* T4/T6 only has a single memory channel */
1800 goto err;
031cf476
HS
1801 }
1802 }
1803
1804 spin_lock(&adap->win0_lock);
1805 ret = t4_memory_rw(adap, 0, memtype, memaddr, 32, tpte, T4_MEMORY_READ);
1806 spin_unlock(&adap->win0_lock);
1807 return ret;
1808
1809err:
1810 dev_err(adap->pdev_dev, "stag %#x, offset %#x out of range\n",
1811 stag, offset);
1812 return -EINVAL;
1813}
1814EXPORT_SYMBOL(cxgb4_read_tpte);
1815
7730b4c7
HS
1816u64 cxgb4_read_sge_timestamp(struct net_device *dev)
1817{
1818 u32 hi, lo;
1819 struct adapter *adap;
1820
1821 adap = netdev2adap(dev);
f612b815
HS
1822 lo = t4_read_reg(adap, SGE_TIMESTAMP_LO_A);
1823 hi = TSVAL_G(t4_read_reg(adap, SGE_TIMESTAMP_HI_A));
7730b4c7
HS
1824
1825 return ((u64)hi << 32) | (u64)lo;
1826}
1827EXPORT_SYMBOL(cxgb4_read_sge_timestamp);
1828
df64e4d3
HS
1829int cxgb4_bar2_sge_qregs(struct net_device *dev,
1830 unsigned int qid,
1831 enum cxgb4_bar2_qtype qtype,
66cf188e 1832 int user,
df64e4d3
HS
1833 u64 *pbar2_qoffset,
1834 unsigned int *pbar2_qid)
1835{
b2612722 1836 return t4_bar2_sge_qregs(netdev2adap(dev),
df64e4d3
HS
1837 qid,
1838 (qtype == CXGB4_BAR2_QTYPE_EGRESS
1839 ? T4_BAR2_QTYPE_EGRESS
1840 : T4_BAR2_QTYPE_INGRESS),
66cf188e 1841 user,
df64e4d3
HS
1842 pbar2_qoffset,
1843 pbar2_qid);
1844}
1845EXPORT_SYMBOL(cxgb4_bar2_sge_qregs);
1846
b8ff05a9
DM
1847static struct pci_driver cxgb4_driver;
1848
1849static void check_neigh_update(struct neighbour *neigh)
1850{
1851 const struct device *parent;
1852 const struct net_device *netdev = neigh->dev;
1853
d0d7b10b 1854 if (is_vlan_dev(netdev))
b8ff05a9
DM
1855 netdev = vlan_dev_real_dev(netdev);
1856 parent = netdev->dev.parent;
1857 if (parent && parent->driver == &cxgb4_driver.driver)
1858 t4_l2t_update(dev_get_drvdata(parent), neigh);
1859}
1860
1861static int netevent_cb(struct notifier_block *nb, unsigned long event,
1862 void *data)
1863{
1864 switch (event) {
1865 case NETEVENT_NEIGH_UPDATE:
1866 check_neigh_update(data);
1867 break;
b8ff05a9
DM
1868 case NETEVENT_REDIRECT:
1869 default:
1870 break;
1871 }
1872 return 0;
1873}
1874
1875static bool netevent_registered;
1876static struct notifier_block cxgb4_netevent_nb = {
1877 .notifier_call = netevent_cb
1878};
1879
3069ee9b
VP
1880static void drain_db_fifo(struct adapter *adap, int usecs)
1881{
2cc301d2 1882 u32 v1, v2, lp_count, hp_count;
3069ee9b
VP
1883
1884 do {
f061de42
HS
1885 v1 = t4_read_reg(adap, SGE_DBFIFO_STATUS_A);
1886 v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2_A);
d14807dd 1887 if (is_t4(adap->params.chip)) {
f061de42
HS
1888 lp_count = LP_COUNT_G(v1);
1889 hp_count = HP_COUNT_G(v1);
2cc301d2 1890 } else {
f061de42
HS
1891 lp_count = LP_COUNT_T5_G(v1);
1892 hp_count = HP_COUNT_T5_G(v2);
2cc301d2
SR
1893 }
1894
1895 if (lp_count == 0 && hp_count == 0)
1896 break;
3069ee9b
VP
1897 set_current_state(TASK_UNINTERRUPTIBLE);
1898 schedule_timeout(usecs_to_jiffies(usecs));
3069ee9b
VP
1899 } while (1);
1900}
1901
1902static void disable_txq_db(struct sge_txq *q)
1903{
05eb2389
SW
1904 unsigned long flags;
1905
1906 spin_lock_irqsave(&q->db_lock, flags);
3069ee9b 1907 q->db_disabled = 1;
05eb2389 1908 spin_unlock_irqrestore(&q->db_lock, flags);
3069ee9b
VP
1909}
1910
05eb2389 1911static void enable_txq_db(struct adapter *adap, struct sge_txq *q)
3069ee9b
VP
1912{
1913 spin_lock_irq(&q->db_lock);
05eb2389
SW
1914 if (q->db_pidx_inc) {
1915 /* Make sure that all writes to the TX descriptors
1916 * are committed before we tell HW about them.
1917 */
1918 wmb();
f612b815
HS
1919 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A),
1920 QID_V(q->cntxt_id) | PIDX_V(q->db_pidx_inc));
05eb2389
SW
1921 q->db_pidx_inc = 0;
1922 }
3069ee9b
VP
1923 q->db_disabled = 0;
1924 spin_unlock_irq(&q->db_lock);
1925}
1926
1927static void disable_dbs(struct adapter *adap)
1928{
1929 int i;
1930
1931 for_each_ethrxq(&adap->sge, i)
1932 disable_txq_db(&adap->sge.ethtxq[i].q);
ab677ff4
HS
1933 if (is_offload(adap)) {
1934 struct sge_uld_txq_info *txq_info =
1935 adap->sge.uld_txq_info[CXGB4_TX_OFLD];
1936
1937 if (txq_info) {
1938 for_each_ofldtxq(&adap->sge, i) {
1939 struct sge_uld_txq *txq = &txq_info->uldtxq[i];
1940
1941 disable_txq_db(&txq->q);
1942 }
1943 }
1944 }
3069ee9b
VP
1945 for_each_port(adap, i)
1946 disable_txq_db(&adap->sge.ctrlq[i].q);
1947}
1948
1949static void enable_dbs(struct adapter *adap)
1950{
1951 int i;
1952
1953 for_each_ethrxq(&adap->sge, i)
05eb2389 1954 enable_txq_db(adap, &adap->sge.ethtxq[i].q);
ab677ff4
HS
1955 if (is_offload(adap)) {
1956 struct sge_uld_txq_info *txq_info =
1957 adap->sge.uld_txq_info[CXGB4_TX_OFLD];
1958
1959 if (txq_info) {
1960 for_each_ofldtxq(&adap->sge, i) {
1961 struct sge_uld_txq *txq = &txq_info->uldtxq[i];
1962
1963 enable_txq_db(adap, &txq->q);
1964 }
1965 }
1966 }
3069ee9b 1967 for_each_port(adap, i)
05eb2389
SW
1968 enable_txq_db(adap, &adap->sge.ctrlq[i].q);
1969}
1970
1971static void notify_rdma_uld(struct adapter *adap, enum cxgb4_control cmd)
1972{
0fbc81b3
HS
1973 enum cxgb4_uld type = CXGB4_ULD_RDMA;
1974
1975 if (adap->uld && adap->uld[type].handle)
1976 adap->uld[type].control(adap->uld[type].handle, cmd);
05eb2389
SW
1977}
1978
1979static void process_db_full(struct work_struct *work)
1980{
1981 struct adapter *adap;
1982
1983 adap = container_of(work, struct adapter, db_full_task);
1984
1985 drain_db_fifo(adap, dbfifo_drain_delay);
1986 enable_dbs(adap);
1987 notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY);
3ccc6cf7
HS
1988 if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
1989 t4_set_reg_field(adap, SGE_INT_ENABLE3_A,
1990 DBFIFO_HP_INT_F | DBFIFO_LP_INT_F,
1991 DBFIFO_HP_INT_F | DBFIFO_LP_INT_F);
1992 else
1993 t4_set_reg_field(adap, SGE_INT_ENABLE3_A,
1994 DBFIFO_LP_INT_F, DBFIFO_LP_INT_F);
3069ee9b
VP
1995}
1996
1997static void sync_txq_pidx(struct adapter *adap, struct sge_txq *q)
1998{
1999 u16 hw_pidx, hw_cidx;
2000 int ret;
2001
05eb2389 2002 spin_lock_irq(&q->db_lock);
3069ee9b
VP
2003 ret = read_eq_indices(adap, (u16)q->cntxt_id, &hw_pidx, &hw_cidx);
2004 if (ret)
2005 goto out;
2006 if (q->db_pidx != hw_pidx) {
2007 u16 delta;
f612b815 2008 u32 val;
3069ee9b
VP
2009
2010 if (q->db_pidx >= hw_pidx)
2011 delta = q->db_pidx - hw_pidx;
2012 else
2013 delta = q->size - hw_pidx + q->db_pidx;
f612b815
HS
2014
2015 if (is_t4(adap->params.chip))
2016 val = PIDX_V(delta);
2017 else
2018 val = PIDX_T5_V(delta);
3069ee9b 2019 wmb();
f612b815
HS
2020 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A),
2021 QID_V(q->cntxt_id) | val);
3069ee9b
VP
2022 }
2023out:
2024 q->db_disabled = 0;
05eb2389
SW
2025 q->db_pidx_inc = 0;
2026 spin_unlock_irq(&q->db_lock);
3069ee9b
VP
2027 if (ret)
2028 CH_WARN(adap, "DB drop recovery failed.\n");
2029}
0fbc81b3 2030
3069ee9b
VP
2031static void recover_all_queues(struct adapter *adap)
2032{
2033 int i;
2034
2035 for_each_ethrxq(&adap->sge, i)
2036 sync_txq_pidx(adap, &adap->sge.ethtxq[i].q);
ab677ff4
HS
2037 if (is_offload(adap)) {
2038 struct sge_uld_txq_info *txq_info =
2039 adap->sge.uld_txq_info[CXGB4_TX_OFLD];
2040 if (txq_info) {
2041 for_each_ofldtxq(&adap->sge, i) {
2042 struct sge_uld_txq *txq = &txq_info->uldtxq[i];
2043
2044 sync_txq_pidx(adap, &txq->q);
2045 }
2046 }
2047 }
3069ee9b
VP
2048 for_each_port(adap, i)
2049 sync_txq_pidx(adap, &adap->sge.ctrlq[i].q);
2050}
2051
881806bc
VP
2052static void process_db_drop(struct work_struct *work)
2053{
2054 struct adapter *adap;
881806bc 2055
3069ee9b 2056 adap = container_of(work, struct adapter, db_drop_task);
881806bc 2057
d14807dd 2058 if (is_t4(adap->params.chip)) {
05eb2389 2059 drain_db_fifo(adap, dbfifo_drain_delay);
2cc301d2 2060 notify_rdma_uld(adap, CXGB4_CONTROL_DB_DROP);
05eb2389 2061 drain_db_fifo(adap, dbfifo_drain_delay);
2cc301d2 2062 recover_all_queues(adap);
05eb2389 2063 drain_db_fifo(adap, dbfifo_drain_delay);
2cc301d2 2064 enable_dbs(adap);
05eb2389 2065 notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY);
3ccc6cf7 2066 } else if (is_t5(adap->params.chip)) {
2cc301d2
SR
2067 u32 dropped_db = t4_read_reg(adap, 0x010ac);
2068 u16 qid = (dropped_db >> 15) & 0x1ffff;
2069 u16 pidx_inc = dropped_db & 0x1fff;
df64e4d3
HS
2070 u64 bar2_qoffset;
2071 unsigned int bar2_qid;
2072 int ret;
2cc301d2 2073
b2612722 2074 ret = t4_bar2_sge_qregs(adap, qid, T4_BAR2_QTYPE_EGRESS,
e0456717 2075 0, &bar2_qoffset, &bar2_qid);
df64e4d3
HS
2076 if (ret)
2077 dev_err(adap->pdev_dev, "doorbell drop recovery: "
2078 "qid=%d, pidx_inc=%d\n", qid, pidx_inc);
2079 else
f612b815 2080 writel(PIDX_T5_V(pidx_inc) | QID_V(bar2_qid),
df64e4d3 2081 adap->bar2 + bar2_qoffset + SGE_UDB_KDOORBELL);
2cc301d2
SR
2082
2083 /* Re-enable BAR2 WC */
2084 t4_set_reg_field(adap, 0x10b0, 1<<15, 1<<15);
2085 }
2086
3ccc6cf7
HS
2087 if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
2088 t4_set_reg_field(adap, SGE_DOORBELL_CONTROL_A, DROPPED_DB_F, 0);
881806bc
VP
2089}
2090
2091void t4_db_full(struct adapter *adap)
2092{
d14807dd 2093 if (is_t4(adap->params.chip)) {
05eb2389
SW
2094 disable_dbs(adap);
2095 notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL);
f612b815
HS
2096 t4_set_reg_field(adap, SGE_INT_ENABLE3_A,
2097 DBFIFO_HP_INT_F | DBFIFO_LP_INT_F, 0);
29aaee65 2098 queue_work(adap->workq, &adap->db_full_task);
2cc301d2 2099 }
881806bc
VP
2100}
2101
2102void t4_db_dropped(struct adapter *adap)
2103{
05eb2389
SW
2104 if (is_t4(adap->params.chip)) {
2105 disable_dbs(adap);
2106 notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL);
2107 }
29aaee65 2108 queue_work(adap->workq, &adap->db_drop_task);
881806bc
VP
2109}
2110
0fbc81b3
HS
2111void t4_register_netevent_notifier(void)
2112{
b8ff05a9
DM
2113 if (!netevent_registered) {
2114 register_netevent_notifier(&cxgb4_netevent_nb);
2115 netevent_registered = true;
2116 }
b8ff05a9
DM
2117}
2118
2119static void detach_ulds(struct adapter *adap)
2120{
2121 unsigned int i;
2122
2123 mutex_lock(&uld_mutex);
2124 list_del(&adap->list_node);
6a146f3a 2125
b8ff05a9 2126 for (i = 0; i < CXGB4_ULD_MAX; i++)
6a146f3a 2127 if (adap->uld && adap->uld[i].handle)
94cdb8bb
HS
2128 adap->uld[i].state_change(adap->uld[i].handle,
2129 CXGB4_STATE_DETACH);
6a146f3a 2130
b8ff05a9
DM
2131 if (netevent_registered && list_empty(&adapter_list)) {
2132 unregister_netevent_notifier(&cxgb4_netevent_nb);
2133 netevent_registered = false;
2134 }
2135 mutex_unlock(&uld_mutex);
2136}
2137
2138static void notify_ulds(struct adapter *adap, enum cxgb4_state new_state)
2139{
2140 unsigned int i;
2141
2142 mutex_lock(&uld_mutex);
2143 for (i = 0; i < CXGB4_ULD_MAX; i++)
94cdb8bb
HS
2144 if (adap->uld && adap->uld[i].handle)
2145 adap->uld[i].state_change(adap->uld[i].handle,
2146 new_state);
b8ff05a9
DM
2147 mutex_unlock(&uld_mutex);
2148}
2149
1bb60376 2150#if IS_ENABLED(CONFIG_IPV6)
b5a02f50
AB
2151static int cxgb4_inet6addr_handler(struct notifier_block *this,
2152 unsigned long event, void *data)
01bcca68 2153{
b5a02f50
AB
2154 struct inet6_ifaddr *ifa = data;
2155 struct net_device *event_dev = ifa->idev->dev;
2156 const struct device *parent = NULL;
2157#if IS_ENABLED(CONFIG_BONDING)
01bcca68 2158 struct adapter *adap;
b5a02f50 2159#endif
d0d7b10b 2160 if (is_vlan_dev(event_dev))
b5a02f50
AB
2161 event_dev = vlan_dev_real_dev(event_dev);
2162#if IS_ENABLED(CONFIG_BONDING)
2163 if (event_dev->flags & IFF_MASTER) {
2164 list_for_each_entry(adap, &adapter_list, list_node) {
2165 switch (event) {
2166 case NETDEV_UP:
2167 cxgb4_clip_get(adap->port[0],
2168 (const u32 *)ifa, 1);
2169 break;
2170 case NETDEV_DOWN:
2171 cxgb4_clip_release(adap->port[0],
2172 (const u32 *)ifa, 1);
2173 break;
2174 default:
2175 break;
2176 }
2177 }
2178 return NOTIFY_OK;
2179 }
2180#endif
01bcca68 2181
b5a02f50
AB
2182 if (event_dev)
2183 parent = event_dev->dev.parent;
01bcca68 2184
b5a02f50 2185 if (parent && parent->driver == &cxgb4_driver.driver) {
01bcca68
VP
2186 switch (event) {
2187 case NETDEV_UP:
b5a02f50 2188 cxgb4_clip_get(event_dev, (const u32 *)ifa, 1);
01bcca68
VP
2189 break;
2190 case NETDEV_DOWN:
b5a02f50 2191 cxgb4_clip_release(event_dev, (const u32 *)ifa, 1);
01bcca68
VP
2192 break;
2193 default:
2194 break;
2195 }
2196 }
b5a02f50 2197 return NOTIFY_OK;
01bcca68
VP
2198}
2199
b5a02f50 2200static bool inet6addr_registered;
01bcca68
VP
2201static struct notifier_block cxgb4_inet6addr_notifier = {
2202 .notifier_call = cxgb4_inet6addr_handler
2203};
2204
01bcca68
VP
2205static void update_clip(const struct adapter *adap)
2206{
2207 int i;
2208 struct net_device *dev;
2209 int ret;
2210
2211 rcu_read_lock();
2212
2213 for (i = 0; i < MAX_NPORTS; i++) {
2214 dev = adap->port[i];
2215 ret = 0;
2216
2217 if (dev)
b5a02f50 2218 ret = cxgb4_update_root_dev_clip(dev);
01bcca68
VP
2219
2220 if (ret < 0)
2221 break;
2222 }
2223 rcu_read_unlock();
2224}
1bb60376 2225#endif /* IS_ENABLED(CONFIG_IPV6) */
01bcca68 2226
b8ff05a9
DM
2227/**
2228 * cxgb_up - enable the adapter
2229 * @adap: adapter being enabled
2230 *
2231 * Called when the first port is enabled, this function performs the
2232 * actions necessary to make an adapter operational, such as completing
2233 * the initialization of HW modules, and enabling interrupts.
2234 *
2235 * Must be called with the rtnl lock held.
2236 */
2237static int cxgb_up(struct adapter *adap)
2238{
aaefae9b 2239 int err;
b8ff05a9 2240
91060381 2241 mutex_lock(&uld_mutex);
aaefae9b
DM
2242 err = setup_sge_queues(adap);
2243 if (err)
91060381 2244 goto rel_lock;
aaefae9b
DM
2245 err = setup_rss(adap);
2246 if (err)
2247 goto freeq;
b8ff05a9
DM
2248
2249 if (adap->flags & USING_MSIX) {
aaefae9b 2250 name_msix_vecs(adap);
b8ff05a9
DM
2251 err = request_irq(adap->msix_info[0].vec, t4_nondata_intr, 0,
2252 adap->msix_info[0].desc, adap);
2253 if (err)
2254 goto irq_err;
b8ff05a9
DM
2255 err = request_msix_queue_irqs(adap);
2256 if (err) {
2257 free_irq(adap->msix_info[0].vec, adap);
2258 goto irq_err;
2259 }
2260 } else {
2261 err = request_irq(adap->pdev->irq, t4_intr_handler(adap),
2262 (adap->flags & USING_MSI) ? 0 : IRQF_SHARED,
b1a3c2b6 2263 adap->port[0]->name, adap);
b8ff05a9
DM
2264 if (err)
2265 goto irq_err;
2266 }
e7519f99 2267
b8ff05a9
DM
2268 enable_rx(adap);
2269 t4_sge_start(adap);
2270 t4_intr_enable(adap);
aaefae9b 2271 adap->flags |= FULL_INIT_DONE;
e7519f99
GG
2272 mutex_unlock(&uld_mutex);
2273
b8ff05a9 2274 notify_ulds(adap, CXGB4_STATE_UP);
1bb60376 2275#if IS_ENABLED(CONFIG_IPV6)
01bcca68 2276 update_clip(adap);
1bb60376 2277#endif
fc08a01a
HS
2278 /* Initialize hash mac addr list*/
2279 INIT_LIST_HEAD(&adap->mac_hlist);
b8ff05a9 2280 return err;
91060381 2281
b8ff05a9
DM
2282 irq_err:
2283 dev_err(adap->pdev_dev, "request_irq failed, err %d\n", err);
aaefae9b
DM
2284 freeq:
2285 t4_free_sge_resources(adap);
91060381
RR
2286 rel_lock:
2287 mutex_unlock(&uld_mutex);
2288 return err;
b8ff05a9
DM
2289}
2290
2291static void cxgb_down(struct adapter *adapter)
2292{
b8ff05a9 2293 cancel_work_sync(&adapter->tid_release_task);
881806bc
VP
2294 cancel_work_sync(&adapter->db_full_task);
2295 cancel_work_sync(&adapter->db_drop_task);
b8ff05a9 2296 adapter->tid_release_task_busy = false;
204dc3c0 2297 adapter->tid_release_head = NULL;
b8ff05a9 2298
aaefae9b
DM
2299 t4_sge_stop(adapter);
2300 t4_free_sge_resources(adapter);
2301 adapter->flags &= ~FULL_INIT_DONE;
b8ff05a9
DM
2302}
2303
2304/*
2305 * net_device operations
2306 */
2307static int cxgb_open(struct net_device *dev)
2308{
2309 int err;
2310 struct port_info *pi = netdev_priv(dev);
2311 struct adapter *adapter = pi->adapter;
2312
6a3c869a
DM
2313 netif_carrier_off(dev);
2314
aaefae9b
DM
2315 if (!(adapter->flags & FULL_INIT_DONE)) {
2316 err = cxgb_up(adapter);
2317 if (err < 0)
2318 return err;
2319 }
b8ff05a9 2320
2061ec3f
GG
2321 /* It's possible that the basic port information could have
2322 * changed since we first read it.
2323 */
2324 err = t4_update_port_info(pi);
2325 if (err < 0)
2326 return err;
2327
f68707b8
DM
2328 err = link_start(dev);
2329 if (!err)
2330 netif_tx_start_all_queues(dev);
2331 return err;
b8ff05a9
DM
2332}
2333
2334static int cxgb_close(struct net_device *dev)
2335{
b8ff05a9
DM
2336 struct port_info *pi = netdev_priv(dev);
2337 struct adapter *adapter = pi->adapter;
ba581f77 2338 int ret;
b8ff05a9
DM
2339
2340 netif_tx_stop_all_queues(dev);
2341 netif_carrier_off(dev);
ba581f77
GG
2342 ret = t4_enable_vi(adapter, adapter->pf, pi->viid, false, false);
2343#ifdef CONFIG_CHELSIO_T4_DCB
2344 cxgb4_dcb_reset(dev);
2345 dcb_tx_queue_prio_enable(dev, false);
2346#endif
2347 return ret;
b8ff05a9
DM
2348}
2349
dca4faeb 2350int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid,
793dad94
VP
2351 __be32 sip, __be16 sport, __be16 vlan,
2352 unsigned int queue, unsigned char port, unsigned char mask)
dca4faeb
VP
2353{
2354 int ret;
2355 struct filter_entry *f;
2356 struct adapter *adap;
2357 int i;
2358 u8 *val;
2359
2360 adap = netdev2adap(dev);
2361
1cab775c 2362 /* Adjust stid to correct filter index */
470c60c4 2363 stid -= adap->tids.sftid_base;
1cab775c
VP
2364 stid += adap->tids.nftids;
2365
dca4faeb
VP
2366 /* Check to make sure the filter requested is writable ...
2367 */
2368 f = &adap->tids.ftid_tab[stid];
2369 ret = writable_filter(f);
2370 if (ret)
2371 return ret;
2372
2373 /* Clear out any old resources being used by the filter before
2374 * we start constructing the new filter.
2375 */
2376 if (f->valid)
2377 clear_filter(adap, f);
2378
2379 /* Clear out filter specifications */
2380 memset(&f->fs, 0, sizeof(struct ch_filter_specification));
2381 f->fs.val.lport = cpu_to_be16(sport);
2382 f->fs.mask.lport = ~0;
2383 val = (u8 *)&sip;
793dad94 2384 if ((val[0] | val[1] | val[2] | val[3]) != 0) {
dca4faeb
VP
2385 for (i = 0; i < 4; i++) {
2386 f->fs.val.lip[i] = val[i];
2387 f->fs.mask.lip[i] = ~0;
2388 }
0d804338 2389 if (adap->params.tp.vlan_pri_map & PORT_F) {
793dad94
VP
2390 f->fs.val.iport = port;
2391 f->fs.mask.iport = mask;
2392 }
2393 }
dca4faeb 2394
0d804338 2395 if (adap->params.tp.vlan_pri_map & PROTOCOL_F) {
7c89e555
KS
2396 f->fs.val.proto = IPPROTO_TCP;
2397 f->fs.mask.proto = ~0;
2398 }
2399
dca4faeb
VP
2400 f->fs.dirsteer = 1;
2401 f->fs.iq = queue;
2402 /* Mark filter as locked */
2403 f->locked = 1;
2404 f->fs.rpttid = 1;
2405
6b254afd
GG
2406 /* Save the actual tid. We need this to get the corresponding
2407 * filter entry structure in filter_rpl.
2408 */
2409 f->tid = stid + adap->tids.ftid_base;
dca4faeb
VP
2410 ret = set_filter_wr(adap, stid);
2411 if (ret) {
2412 clear_filter(adap, f);
2413 return ret;
2414 }
2415
2416 return 0;
2417}
2418EXPORT_SYMBOL(cxgb4_create_server_filter);
2419
2420int cxgb4_remove_server_filter(const struct net_device *dev, unsigned int stid,
2421 unsigned int queue, bool ipv6)
2422{
dca4faeb
VP
2423 struct filter_entry *f;
2424 struct adapter *adap;
2425
2426 adap = netdev2adap(dev);
1cab775c
VP
2427
2428 /* Adjust stid to correct filter index */
470c60c4 2429 stid -= adap->tids.sftid_base;
1cab775c
VP
2430 stid += adap->tids.nftids;
2431
dca4faeb
VP
2432 f = &adap->tids.ftid_tab[stid];
2433 /* Unlock the filter */
2434 f->locked = 0;
2435
8c14846d 2436 return delete_filter(adap, stid);
dca4faeb
VP
2437}
2438EXPORT_SYMBOL(cxgb4_remove_server_filter);
2439
bc1f4470 2440static void cxgb_get_stats(struct net_device *dev,
2441 struct rtnl_link_stats64 *ns)
b8ff05a9
DM
2442{
2443 struct port_stats stats;
2444 struct port_info *p = netdev_priv(dev);
2445 struct adapter *adapter = p->adapter;
b8ff05a9 2446
9fe6cb58
GS
2447 /* Block retrieving statistics during EEH error
2448 * recovery. Otherwise, the recovery might fail
2449 * and the PCI device will be removed permanently
2450 */
b8ff05a9 2451 spin_lock(&adapter->stats_lock);
9fe6cb58
GS
2452 if (!netif_device_present(dev)) {
2453 spin_unlock(&adapter->stats_lock);
bc1f4470 2454 return;
9fe6cb58 2455 }
a4cfd929
HS
2456 t4_get_port_stats_offset(adapter, p->tx_chan, &stats,
2457 &p->stats_base);
b8ff05a9
DM
2458 spin_unlock(&adapter->stats_lock);
2459
2460 ns->tx_bytes = stats.tx_octets;
2461 ns->tx_packets = stats.tx_frames;
2462 ns->rx_bytes = stats.rx_octets;
2463 ns->rx_packets = stats.rx_frames;
2464 ns->multicast = stats.rx_mcast_frames;
2465
2466 /* detailed rx_errors */
2467 ns->rx_length_errors = stats.rx_jabber + stats.rx_too_long +
2468 stats.rx_runt;
2469 ns->rx_over_errors = 0;
2470 ns->rx_crc_errors = stats.rx_fcs_err;
2471 ns->rx_frame_errors = stats.rx_symbol_err;
b93f79be 2472 ns->rx_dropped = stats.rx_ovflow0 + stats.rx_ovflow1 +
b8ff05a9
DM
2473 stats.rx_ovflow2 + stats.rx_ovflow3 +
2474 stats.rx_trunc0 + stats.rx_trunc1 +
2475 stats.rx_trunc2 + stats.rx_trunc3;
2476 ns->rx_missed_errors = 0;
2477
2478 /* detailed tx_errors */
2479 ns->tx_aborted_errors = 0;
2480 ns->tx_carrier_errors = 0;
2481 ns->tx_fifo_errors = 0;
2482 ns->tx_heartbeat_errors = 0;
2483 ns->tx_window_errors = 0;
2484
2485 ns->tx_errors = stats.tx_error_frames;
2486 ns->rx_errors = stats.rx_symbol_err + stats.rx_fcs_err +
2487 ns->rx_length_errors + stats.rx_len_err + ns->rx_fifo_errors;
b8ff05a9
DM
2488}
2489
2490static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
2491{
060e0c75 2492 unsigned int mbox;
b8ff05a9
DM
2493 int ret = 0, prtad, devad;
2494 struct port_info *pi = netdev_priv(dev);
a4569504 2495 struct adapter *adapter = pi->adapter;
b8ff05a9
DM
2496 struct mii_ioctl_data *data = (struct mii_ioctl_data *)&req->ifr_data;
2497
2498 switch (cmd) {
2499 case SIOCGMIIPHY:
2500 if (pi->mdio_addr < 0)
2501 return -EOPNOTSUPP;
2502 data->phy_id = pi->mdio_addr;
2503 break;
2504 case SIOCGMIIREG:
2505 case SIOCSMIIREG:
2506 if (mdio_phy_id_is_c45(data->phy_id)) {
2507 prtad = mdio_phy_id_prtad(data->phy_id);
2508 devad = mdio_phy_id_devad(data->phy_id);
2509 } else if (data->phy_id < 32) {
2510 prtad = data->phy_id;
2511 devad = 0;
2512 data->reg_num &= 0x1f;
2513 } else
2514 return -EINVAL;
2515
b2612722 2516 mbox = pi->adapter->pf;
b8ff05a9 2517 if (cmd == SIOCGMIIREG)
060e0c75 2518 ret = t4_mdio_rd(pi->adapter, mbox, prtad, devad,
b8ff05a9
DM
2519 data->reg_num, &data->val_out);
2520 else
060e0c75 2521 ret = t4_mdio_wr(pi->adapter, mbox, prtad, devad,
b8ff05a9
DM
2522 data->reg_num, data->val_in);
2523 break;
5e2a5ebc
HS
2524 case SIOCGHWTSTAMP:
2525 return copy_to_user(req->ifr_data, &pi->tstamp_config,
2526 sizeof(pi->tstamp_config)) ?
2527 -EFAULT : 0;
2528 case SIOCSHWTSTAMP:
2529 if (copy_from_user(&pi->tstamp_config, req->ifr_data,
2530 sizeof(pi->tstamp_config)))
2531 return -EFAULT;
2532
a4569504
AG
2533 if (!is_t4(adapter->params.chip)) {
2534 switch (pi->tstamp_config.tx_type) {
2535 case HWTSTAMP_TX_OFF:
2536 case HWTSTAMP_TX_ON:
2537 break;
2538 default:
2539 return -ERANGE;
2540 }
2541
2542 switch (pi->tstamp_config.rx_filter) {
2543 case HWTSTAMP_FILTER_NONE:
2544 pi->rxtstamp = false;
2545 break;
2546 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
2547 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
2548 cxgb4_ptprx_timestamping(pi, pi->port_id,
2549 PTP_TS_L4);
2550 break;
2551 case HWTSTAMP_FILTER_PTP_V2_EVENT:
2552 cxgb4_ptprx_timestamping(pi, pi->port_id,
2553 PTP_TS_L2_L4);
2554 break;
2555 case HWTSTAMP_FILTER_ALL:
2556 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
2557 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
2558 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
2559 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
2560 pi->rxtstamp = true;
2561 break;
2562 default:
2563 pi->tstamp_config.rx_filter =
2564 HWTSTAMP_FILTER_NONE;
2565 return -ERANGE;
2566 }
2567
2568 if ((pi->tstamp_config.tx_type == HWTSTAMP_TX_OFF) &&
2569 (pi->tstamp_config.rx_filter ==
2570 HWTSTAMP_FILTER_NONE)) {
2571 if (cxgb4_ptp_txtype(adapter, pi->port_id) >= 0)
2572 pi->ptp_enable = false;
2573 }
2574
2575 if (pi->tstamp_config.rx_filter !=
2576 HWTSTAMP_FILTER_NONE) {
2577 if (cxgb4_ptp_redirect_rx_packet(adapter,
2578 pi) >= 0)
2579 pi->ptp_enable = true;
2580 }
2581 } else {
2582 /* For T4 Adapters */
2583 switch (pi->tstamp_config.rx_filter) {
2584 case HWTSTAMP_FILTER_NONE:
5e2a5ebc
HS
2585 pi->rxtstamp = false;
2586 break;
a4569504 2587 case HWTSTAMP_FILTER_ALL:
5e2a5ebc
HS
2588 pi->rxtstamp = true;
2589 break;
a4569504
AG
2590 default:
2591 pi->tstamp_config.rx_filter =
2592 HWTSTAMP_FILTER_NONE;
5e2a5ebc 2593 return -ERANGE;
a4569504 2594 }
5e2a5ebc 2595 }
5e2a5ebc
HS
2596 return copy_to_user(req->ifr_data, &pi->tstamp_config,
2597 sizeof(pi->tstamp_config)) ?
2598 -EFAULT : 0;
b8ff05a9
DM
2599 default:
2600 return -EOPNOTSUPP;
2601 }
2602 return ret;
2603}
2604
2605static void cxgb_set_rxmode(struct net_device *dev)
2606{
2607 /* unfortunately we can't return errors to the stack */
2608 set_rxmode(dev, -1, false);
2609}
2610
2611static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
2612{
2613 int ret;
2614 struct port_info *pi = netdev_priv(dev);
2615
b2612722 2616 ret = t4_set_rxmode(pi->adapter, pi->adapter->pf, pi->viid, new_mtu, -1,
060e0c75 2617 -1, -1, -1, true);
b8ff05a9
DM
2618 if (!ret)
2619 dev->mtu = new_mtu;
2620 return ret;
2621}
2622
858aa65c 2623#ifdef CONFIG_PCI_IOV
baf50868 2624static int cxgb4_mgmt_open(struct net_device *dev)
e7b48a32
HS
2625{
2626 /* Turn carrier off since we don't have to transmit anything on this
2627 * interface.
2628 */
2629 netif_carrier_off(dev);
2630 return 0;
2631}
2632
661dbeb9 2633/* Fill MAC address that will be assigned by the FW */
baf50868 2634static void cxgb4_mgmt_fill_vf_station_mac_addr(struct adapter *adap)
661dbeb9 2635{
661dbeb9 2636 u8 hw_addr[ETH_ALEN], macaddr[ETH_ALEN];
baf50868
GG
2637 unsigned int i, vf, nvfs;
2638 u16 a, b;
661dbeb9
HS
2639 int err;
2640 u8 *na;
661dbeb9 2641
baf50868
GG
2642 adap->params.pci.vpd_cap_addr = pci_find_capability(adap->pdev,
2643 PCI_CAP_ID_VPD);
661dbeb9 2644 err = t4_get_raw_vpd_params(adap, &adap->params.vpd);
baf50868
GG
2645 if (err)
2646 return;
2647
2648 na = adap->params.vpd.na;
2649 for (i = 0; i < ETH_ALEN; i++)
2650 hw_addr[i] = (hex2val(na[2 * i + 0]) * 16 +
2651 hex2val(na[2 * i + 1]));
2652
2653 a = (hw_addr[0] << 8) | hw_addr[1];
2654 b = (hw_addr[1] << 8) | hw_addr[2];
2655 a ^= b;
2656 a |= 0x0200; /* locally assigned Ethernet MAC address */
2657 a &= ~0x0100; /* not a multicast Ethernet MAC address */
2658 macaddr[0] = a >> 8;
2659 macaddr[1] = a & 0xff;
2660
2661 for (i = 2; i < 5; i++)
2662 macaddr[i] = hw_addr[i + 1];
2663
2664 for (vf = 0, nvfs = pci_sriov_get_totalvfs(adap->pdev);
2665 vf < nvfs; vf++) {
2666 macaddr[5] = adap->pf * 16 + vf;
2667 ether_addr_copy(adap->vfinfo[vf].vf_mac_addr, macaddr);
661dbeb9
HS
2668 }
2669}
2670
baf50868 2671static int cxgb4_mgmt_set_vf_mac(struct net_device *dev, int vf, u8 *mac)
858aa65c
HS
2672{
2673 struct port_info *pi = netdev_priv(dev);
2674 struct adapter *adap = pi->adapter;
661dbeb9 2675 int ret;
858aa65c
HS
2676
2677 /* verify MAC addr is valid */
2678 if (!is_valid_ether_addr(mac)) {
2679 dev_err(pi->adapter->pdev_dev,
2680 "Invalid Ethernet address %pM for VF %d\n",
2681 mac, vf);
2682 return -EINVAL;
2683 }
2684
2685 dev_info(pi->adapter->pdev_dev,
2686 "Setting MAC %pM on VF %d\n", mac, vf);
661dbeb9
HS
2687 ret = t4_set_vf_mac_acl(adap, vf + 1, 1, mac);
2688 if (!ret)
2689 ether_addr_copy(adap->vfinfo[vf].vf_mac_addr, mac);
2690 return ret;
2691}
2692
baf50868
GG
2693static int cxgb4_mgmt_get_vf_config(struct net_device *dev,
2694 int vf, struct ifla_vf_info *ivi)
661dbeb9
HS
2695{
2696 struct port_info *pi = netdev_priv(dev);
2697 struct adapter *adap = pi->adapter;
bd79acee 2698 struct vf_info *vfinfo;
661dbeb9
HS
2699
2700 if (vf >= adap->num_vfs)
2701 return -EINVAL;
bd79acee
AV
2702 vfinfo = &adap->vfinfo[vf];
2703
661dbeb9 2704 ivi->vf = vf;
bd79acee 2705 ivi->max_tx_rate = vfinfo->tx_rate;
8ea4fae9 2706 ivi->min_tx_rate = 0;
bd79acee
AV
2707 ether_addr_copy(ivi->mac, vfinfo->vf_mac_addr);
2708 ivi->vlan = vfinfo->vlan;
661dbeb9 2709 return 0;
858aa65c 2710}
96fe11f2 2711
baf50868
GG
2712static int cxgb4_mgmt_get_phys_port_id(struct net_device *dev,
2713 struct netdev_phys_item_id *ppid)
96fe11f2
GG
2714{
2715 struct port_info *pi = netdev_priv(dev);
2716 unsigned int phy_port_id;
2717
2718 phy_port_id = pi->adapter->adap_idx * 10 + pi->port_id;
2719 ppid->id_len = sizeof(phy_port_id);
2720 memcpy(ppid->id, &phy_port_id, ppid->id_len);
2721 return 0;
2722}
2723
baf50868
GG
2724static int cxgb4_mgmt_set_vf_rate(struct net_device *dev, int vf,
2725 int min_tx_rate, int max_tx_rate)
8ea4fae9
GG
2726{
2727 struct port_info *pi = netdev_priv(dev);
2728 struct adapter *adap = pi->adapter;
c3168cab 2729 unsigned int link_ok, speed, mtu;
8ea4fae9
GG
2730 u32 fw_pfvf, fw_class;
2731 int class_id = vf;
c3168cab 2732 int ret;
8ea4fae9
GG
2733 u16 pktsize;
2734
2735 if (vf >= adap->num_vfs)
2736 return -EINVAL;
2737
2738 if (min_tx_rate) {
2739 dev_err(adap->pdev_dev,
2740 "Min tx rate (%d) (> 0) for VF %d is Invalid.\n",
2741 min_tx_rate, vf);
2742 return -EINVAL;
2743 }
c3168cab
GG
2744
2745 ret = t4_get_link_params(pi, &link_ok, &speed, &mtu);
8ea4fae9
GG
2746 if (ret != FW_SUCCESS) {
2747 dev_err(adap->pdev_dev,
c3168cab 2748 "Failed to get link information for VF %d\n", vf);
8ea4fae9
GG
2749 return -EINVAL;
2750 }
c3168cab 2751
8ea4fae9
GG
2752 if (!link_ok) {
2753 dev_err(adap->pdev_dev, "Link down for VF %d\n", vf);
2754 return -EINVAL;
2755 }
8ea4fae9
GG
2756
2757 if (max_tx_rate > speed) {
2758 dev_err(adap->pdev_dev,
2759 "Max tx rate %d for VF %d can't be > link-speed %u",
2760 max_tx_rate, vf, speed);
2761 return -EINVAL;
2762 }
c3168cab
GG
2763
2764 pktsize = mtu;
8ea4fae9
GG
2765 /* subtract ethhdr size and 4 bytes crc since, f/w appends it */
2766 pktsize = pktsize - sizeof(struct ethhdr) - 4;
2767 /* subtract ipv4 hdr size, tcp hdr size to get typical IPv4 MSS size */
2768 pktsize = pktsize - sizeof(struct iphdr) - sizeof(struct tcphdr);
2769 /* configure Traffic Class for rate-limiting */
2770 ret = t4_sched_params(adap, SCHED_CLASS_TYPE_PACKET,
2771 SCHED_CLASS_LEVEL_CL_RL,
2772 SCHED_CLASS_MODE_CLASS,
2773 SCHED_CLASS_RATEUNIT_BITS,
2774 SCHED_CLASS_RATEMODE_ABS,
c3168cab 2775 pi->tx_chan, class_id, 0,
8ea4fae9
GG
2776 max_tx_rate * 1000, 0, pktsize);
2777 if (ret) {
2778 dev_err(adap->pdev_dev, "Err %d for Traffic Class config\n",
2779 ret);
2780 return -EINVAL;
2781 }
2782 dev_info(adap->pdev_dev,
2783 "Class %d with MSS %u configured with rate %u\n",
2784 class_id, pktsize, max_tx_rate);
2785
2786 /* bind VF to configured Traffic Class */
2787 fw_pfvf = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) |
2788 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_SCHEDCLASS_ETH));
2789 fw_class = class_id;
2790 ret = t4_set_params(adap, adap->mbox, adap->pf, vf + 1, 1, &fw_pfvf,
2791 &fw_class);
2792 if (ret) {
2793 dev_err(adap->pdev_dev,
2794 "Err %d in binding VF %d to Traffic Class %d\n",
2795 ret, vf, class_id);
2796 return -EINVAL;
2797 }
2798 dev_info(adap->pdev_dev, "PF %d VF %d is bound to Class %d\n",
2799 adap->pf, vf, class_id);
2800 adap->vfinfo[vf].tx_rate = max_tx_rate;
2801 return 0;
2802}
2803
9d5fd927
GG
2804static int cxgb4_mgmt_set_vf_vlan(struct net_device *dev, int vf,
2805 u16 vlan, u8 qos, __be16 vlan_proto)
2806{
2807 struct port_info *pi = netdev_priv(dev);
2808 struct adapter *adap = pi->adapter;
2809 int ret;
2810
2811 if (vf >= adap->num_vfs || vlan > 4095 || qos > 7)
2812 return -EINVAL;
2813
2814 if (vlan_proto != htons(ETH_P_8021Q) || qos != 0)
2815 return -EPROTONOSUPPORT;
2816
2817 ret = t4_set_vlan_acl(adap, adap->mbox, vf + 1, vlan);
2818 if (!ret) {
2819 adap->vfinfo[vf].vlan = vlan;
2820 return 0;
2821 }
2822
2823 dev_err(adap->pdev_dev, "Err %d %s VLAN ACL for PF/VF %d/%d\n",
2824 ret, (vlan ? "setting" : "clearing"), adap->pf, vf);
2825 return ret;
2826}
2827#endif /* CONFIG_PCI_IOV */
858aa65c 2828
b8ff05a9
DM
2829static int cxgb_set_mac_addr(struct net_device *dev, void *p)
2830{
2831 int ret;
2832 struct sockaddr *addr = p;
2833 struct port_info *pi = netdev_priv(dev);
2834
2835 if (!is_valid_ether_addr(addr->sa_data))
504f9b5a 2836 return -EADDRNOTAVAIL;
b8ff05a9 2837
b2612722 2838 ret = t4_change_mac(pi->adapter, pi->adapter->pf, pi->viid,
060e0c75 2839 pi->xact_addr_filt, addr->sa_data, true, true);
b8ff05a9
DM
2840 if (ret < 0)
2841 return ret;
2842
2843 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
2844 pi->xact_addr_filt = ret;
2845 return 0;
2846}
2847
b8ff05a9
DM
2848#ifdef CONFIG_NET_POLL_CONTROLLER
2849static void cxgb_netpoll(struct net_device *dev)
2850{
2851 struct port_info *pi = netdev_priv(dev);
2852 struct adapter *adap = pi->adapter;
2853
2854 if (adap->flags & USING_MSIX) {
2855 int i;
2856 struct sge_eth_rxq *rx = &adap->sge.ethrxq[pi->first_qset];
2857
2858 for (i = pi->nqsets; i; i--, rx++)
2859 t4_sge_intr_msix(0, &rx->rspq);
2860 } else
2861 t4_intr_handler(adap)(0, adap);
2862}
2863#endif
2864
10a2604e
RL
2865static int cxgb_set_tx_maxrate(struct net_device *dev, int index, u32 rate)
2866{
2867 struct port_info *pi = netdev_priv(dev);
2868 struct adapter *adap = pi->adapter;
2869 struct sched_class *e;
2870 struct ch_sched_params p;
2871 struct ch_sched_queue qe;
2872 u32 req_rate;
2873 int err = 0;
2874
2875 if (!can_sched(dev))
2876 return -ENOTSUPP;
2877
2878 if (index < 0 || index > pi->nqsets - 1)
2879 return -EINVAL;
2880
2881 if (!(adap->flags & FULL_INIT_DONE)) {
2882 dev_err(adap->pdev_dev,
2883 "Failed to rate limit on queue %d. Link Down?\n",
2884 index);
2885 return -EINVAL;
2886 }
2887
2888 /* Convert from Mbps to Kbps */
2889 req_rate = rate << 10;
2890
d185efc1 2891 /* Max rate is 100 Gbps */
10a2604e
RL
2892 if (req_rate >= SCHED_MAX_RATE_KBPS) {
2893 dev_err(adap->pdev_dev,
d185efc1
GG
2894 "Invalid rate %u Mbps, Max rate is %u Mbps\n",
2895 rate, SCHED_MAX_RATE_KBPS >> 10);
10a2604e
RL
2896 return -ERANGE;
2897 }
2898
2899 /* First unbind the queue from any existing class */
2900 memset(&qe, 0, sizeof(qe));
2901 qe.queue = index;
2902 qe.class = SCHED_CLS_NONE;
2903
2904 err = cxgb4_sched_class_unbind(dev, (void *)(&qe), SCHED_QUEUE);
2905 if (err) {
2906 dev_err(adap->pdev_dev,
2907 "Unbinding Queue %d on port %d fail. Err: %d\n",
2908 index, pi->port_id, err);
2909 return err;
2910 }
2911
2912 /* Queue already unbound */
2913 if (!req_rate)
2914 return 0;
2915
2916 /* Fetch any available unused or matching scheduling class */
2917 memset(&p, 0, sizeof(p));
2918 p.type = SCHED_CLASS_TYPE_PACKET;
2919 p.u.params.level = SCHED_CLASS_LEVEL_CL_RL;
2920 p.u.params.mode = SCHED_CLASS_MODE_CLASS;
2921 p.u.params.rateunit = SCHED_CLASS_RATEUNIT_BITS;
2922 p.u.params.ratemode = SCHED_CLASS_RATEMODE_ABS;
2923 p.u.params.channel = pi->tx_chan;
2924 p.u.params.class = SCHED_CLS_NONE;
2925 p.u.params.minrate = 0;
2926 p.u.params.maxrate = req_rate;
2927 p.u.params.weight = 0;
2928 p.u.params.pktsize = dev->mtu;
2929
2930 e = cxgb4_sched_class_alloc(dev, &p);
2931 if (!e)
2932 return -ENOMEM;
2933
2934 /* Bind the queue to a scheduling class */
2935 memset(&qe, 0, sizeof(qe));
2936 qe.queue = index;
2937 qe.class = e->idx;
2938
2939 err = cxgb4_sched_class_bind(dev, (void *)(&qe), SCHED_QUEUE);
2940 if (err)
2941 dev_err(adap->pdev_dev,
2942 "Queue rate limiting failed. Err: %d\n", err);
2943 return err;
2944}
2945
6a345b3d
KS
2946static int cxgb_setup_tc_flower(struct net_device *dev,
2947 struct tc_cls_flower_offload *cls_flower)
2948{
6a345b3d
KS
2949 switch (cls_flower->command) {
2950 case TC_CLSFLOWER_REPLACE:
2951 return cxgb4_tc_flower_replace(dev, cls_flower);
2952 case TC_CLSFLOWER_DESTROY:
2953 return cxgb4_tc_flower_destroy(dev, cls_flower);
2954 case TC_CLSFLOWER_STATS:
2955 return cxgb4_tc_flower_stats(dev, cls_flower);
2956 default:
2957 return -EOPNOTSUPP;
2958 }
2959}
2960
f7323043 2961static int cxgb_setup_tc_cls_u32(struct net_device *dev,
f7323043
JP
2962 struct tc_cls_u32_offload *cls_u32)
2963{
f7323043
JP
2964 switch (cls_u32->command) {
2965 case TC_CLSU32_NEW_KNODE:
2966 case TC_CLSU32_REPLACE_KNODE:
5fd9fc4e 2967 return cxgb4_config_knode(dev, cls_u32);
f7323043 2968 case TC_CLSU32_DELETE_KNODE:
5fd9fc4e 2969 return cxgb4_delete_knode(dev, cls_u32);
f7323043
JP
2970 default:
2971 return -EOPNOTSUPP;
2972 }
2973}
2974
cd019e91
JP
2975static int cxgb_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
2976 void *cb_priv)
d8931847 2977{
cd019e91 2978 struct net_device *dev = cb_priv;
d8931847
RL
2979 struct port_info *pi = netdev2pinfo(dev);
2980 struct adapter *adap = netdev2adap(dev);
2981
2982 if (!(adap->flags & FULL_INIT_DONE)) {
2983 dev_err(adap->pdev_dev,
2984 "Failed to setup tc on port %d. Link Down?\n",
2985 pi->port_id);
2986 return -EINVAL;
2987 }
2988
2a84bbaf 2989 if (!tc_cls_can_offload_and_chain0(dev, type_data))
44ae12a7
JP
2990 return -EOPNOTSUPP;
2991
f7323043
JP
2992 switch (type) {
2993 case TC_SETUP_CLSU32:
de4784ca 2994 return cxgb_setup_tc_cls_u32(dev, type_data);
6a345b3d
KS
2995 case TC_SETUP_CLSFLOWER:
2996 return cxgb_setup_tc_flower(dev, type_data);
f7323043
JP
2997 default:
2998 return -EOPNOTSUPP;
d8931847 2999 }
d8931847
RL
3000}
3001
cd019e91
JP
3002static int cxgb_setup_tc_block(struct net_device *dev,
3003 struct tc_block_offload *f)
3004{
3005 struct port_info *pi = netdev2pinfo(dev);
3006
3007 if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
3008 return -EOPNOTSUPP;
3009
3010 switch (f->command) {
3011 case TC_BLOCK_BIND:
3012 return tcf_block_cb_register(f->block, cxgb_setup_tc_block_cb,
3013 pi, dev);
3014 case TC_BLOCK_UNBIND:
3015 tcf_block_cb_unregister(f->block, cxgb_setup_tc_block_cb, pi);
3016 return 0;
3017 default:
3018 return -EOPNOTSUPP;
3019 }
3020}
3021
3022static int cxgb_setup_tc(struct net_device *dev, enum tc_setup_type type,
3023 void *type_data)
3024{
3025 switch (type) {
cd019e91
JP
3026 case TC_SETUP_BLOCK:
3027 return cxgb_setup_tc_block(dev, type_data);
3028 default:
3029 return -EOPNOTSUPP;
3030 }
3031}
3032
846eac3f
GG
3033static void cxgb_del_udp_tunnel(struct net_device *netdev,
3034 struct udp_tunnel_info *ti)
3035{
3036 struct port_info *pi = netdev_priv(netdev);
3037 struct adapter *adapter = pi->adapter;
3038 unsigned int chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip);
3039 u8 match_all_mac[] = { 0, 0, 0, 0, 0, 0 };
3040 int ret = 0, i;
3041
3042 if (chip_ver < CHELSIO_T6)
3043 return;
3044
3045 switch (ti->type) {
3046 case UDP_TUNNEL_TYPE_VXLAN:
3047 if (!adapter->vxlan_port_cnt ||
3048 adapter->vxlan_port != ti->port)
3049 return; /* Invalid VxLAN destination port */
3050
3051 adapter->vxlan_port_cnt--;
3052 if (adapter->vxlan_port_cnt)
3053 return;
3054
3055 adapter->vxlan_port = 0;
3056 t4_write_reg(adapter, MPS_RX_VXLAN_TYPE_A, 0);
3057 break;
c746fc0e
GG
3058 case UDP_TUNNEL_TYPE_GENEVE:
3059 if (!adapter->geneve_port_cnt ||
3060 adapter->geneve_port != ti->port)
3061 return; /* Invalid GENEVE destination port */
3062
3063 adapter->geneve_port_cnt--;
3064 if (adapter->geneve_port_cnt)
3065 return;
3066
3067 adapter->geneve_port = 0;
3068 t4_write_reg(adapter, MPS_RX_GENEVE_TYPE_A, 0);
846eac3f
GG
3069 default:
3070 return;
3071 }
3072
3073 /* Matchall mac entries can be deleted only after all tunnel ports
3074 * are brought down or removed.
3075 */
3076 if (!adapter->rawf_cnt)
3077 return;
3078 for_each_port(adapter, i) {
3079 pi = adap2pinfo(adapter, i);
3080 ret = t4_free_raw_mac_filt(adapter, pi->viid,
3081 match_all_mac, match_all_mac,
3082 adapter->rawf_start +
3083 pi->port_id,
3084 1, pi->port_id, true);
3085 if (ret < 0) {
3086 netdev_info(netdev, "Failed to free mac filter entry, for port %d\n",
3087 i);
3088 return;
3089 }
3090 atomic_dec(&adapter->mps_encap[adapter->rawf_start +
3091 pi->port_id].refcnt);
3092 }
3093}
3094
3095static void cxgb_add_udp_tunnel(struct net_device *netdev,
3096 struct udp_tunnel_info *ti)
3097{
3098 struct port_info *pi = netdev_priv(netdev);
3099 struct adapter *adapter = pi->adapter;
3100 unsigned int chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip);
3101 u8 match_all_mac[] = { 0, 0, 0, 0, 0, 0 };
3102 int i, ret;
3103
c746fc0e 3104 if (chip_ver < CHELSIO_T6 || !adapter->rawf_cnt)
846eac3f
GG
3105 return;
3106
3107 switch (ti->type) {
3108 case UDP_TUNNEL_TYPE_VXLAN:
846eac3f
GG
3109 /* Callback for adding vxlan port can be called with the same
3110 * port for both IPv4 and IPv6. We should not disable the
3111 * offloading when the same port for both protocols is added
3112 * and later one of them is removed.
3113 */
3114 if (adapter->vxlan_port_cnt &&
3115 adapter->vxlan_port == ti->port) {
3116 adapter->vxlan_port_cnt++;
3117 return;
3118 }
3119
3120 /* We will support only one VxLAN port */
3121 if (adapter->vxlan_port_cnt) {
3122 netdev_info(netdev, "UDP port %d already offloaded, not adding port %d\n",
3123 be16_to_cpu(adapter->vxlan_port),
3124 be16_to_cpu(ti->port));
3125 return;
3126 }
3127
3128 adapter->vxlan_port = ti->port;
3129 adapter->vxlan_port_cnt = 1;
3130
3131 t4_write_reg(adapter, MPS_RX_VXLAN_TYPE_A,
3132 VXLAN_V(be16_to_cpu(ti->port)) | VXLAN_EN_F);
3133 break;
c746fc0e
GG
3134 case UDP_TUNNEL_TYPE_GENEVE:
3135 if (adapter->geneve_port_cnt &&
3136 adapter->geneve_port == ti->port) {
3137 adapter->geneve_port_cnt++;
3138 return;
3139 }
3140
3141 /* We will support only one GENEVE port */
3142 if (adapter->geneve_port_cnt) {
3143 netdev_info(netdev, "UDP port %d already offloaded, not adding port %d\n",
3144 be16_to_cpu(adapter->geneve_port),
3145 be16_to_cpu(ti->port));
3146 return;
3147 }
3148
3149 adapter->geneve_port = ti->port;
3150 adapter->geneve_port_cnt = 1;
3151
3152 t4_write_reg(adapter, MPS_RX_GENEVE_TYPE_A,
3153 GENEVE_V(be16_to_cpu(ti->port)) | GENEVE_EN_F);
846eac3f
GG
3154 default:
3155 return;
3156 }
3157
3158 /* Create a 'match all' mac filter entry for inner mac,
3159 * if raw mac interface is supported. Once the linux kernel provides
3160 * driver entry points for adding/deleting the inner mac addresses,
3161 * we will remove this 'match all' entry and fallback to adding
3162 * exact match filters.
3163 */
c746fc0e
GG
3164 for_each_port(adapter, i) {
3165 pi = adap2pinfo(adapter, i);
3166
3167 ret = t4_alloc_raw_mac_filt(adapter, pi->viid,
3168 match_all_mac,
3169 match_all_mac,
3170 adapter->rawf_start +
3171 pi->port_id,
3172 1, pi->port_id, true);
3173 if (ret < 0) {
3174 netdev_info(netdev, "Failed to allocate a mac filter entry, not adding port %d\n",
3175 be16_to_cpu(ti->port));
3176 cxgb_del_udp_tunnel(netdev, ti);
3177 return;
846eac3f 3178 }
c746fc0e 3179 atomic_inc(&adapter->mps_encap[ret].refcnt);
846eac3f
GG
3180 }
3181}
3182
4621ffd6
GG
3183static netdev_features_t cxgb_features_check(struct sk_buff *skb,
3184 struct net_device *dev,
3185 netdev_features_t features)
3186{
3187 struct port_info *pi = netdev_priv(dev);
3188 struct adapter *adapter = pi->adapter;
3189
3190 if (CHELSIO_CHIP_VERSION(adapter->params.chip) < CHELSIO_T6)
3191 return features;
3192
3193 /* Check if hw supports offload for this packet */
3194 if (!skb->encapsulation || cxgb_encap_offload_supported(skb))
3195 return features;
3196
3197 /* Offload is not supported for this encapsulated packet */
3198 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
3199}
3200
90592b9a
AV
3201static netdev_features_t cxgb_fix_features(struct net_device *dev,
3202 netdev_features_t features)
3203{
3204 /* Disable GRO, if RX_CSUM is disabled */
3205 if (!(features & NETIF_F_RXCSUM))
3206 features &= ~NETIF_F_GRO;
3207
3208 return features;
3209}
3210
b8ff05a9
DM
3211static const struct net_device_ops cxgb4_netdev_ops = {
3212 .ndo_open = cxgb_open,
3213 .ndo_stop = cxgb_close,
3214 .ndo_start_xmit = t4_eth_xmit,
688848b1 3215 .ndo_select_queue = cxgb_select_queue,
9be793bf 3216 .ndo_get_stats64 = cxgb_get_stats,
b8ff05a9
DM
3217 .ndo_set_rx_mode = cxgb_set_rxmode,
3218 .ndo_set_mac_address = cxgb_set_mac_addr,
2ed28baa 3219 .ndo_set_features = cxgb_set_features,
b8ff05a9
DM
3220 .ndo_validate_addr = eth_validate_addr,
3221 .ndo_do_ioctl = cxgb_ioctl,
3222 .ndo_change_mtu = cxgb_change_mtu,
b8ff05a9
DM
3223#ifdef CONFIG_NET_POLL_CONTROLLER
3224 .ndo_poll_controller = cxgb_netpoll,
3225#endif
84a200b3
VP
3226#ifdef CONFIG_CHELSIO_T4_FCOE
3227 .ndo_fcoe_enable = cxgb_fcoe_enable,
3228 .ndo_fcoe_disable = cxgb_fcoe_disable,
3229#endif /* CONFIG_CHELSIO_T4_FCOE */
10a2604e 3230 .ndo_set_tx_maxrate = cxgb_set_tx_maxrate,
d8931847 3231 .ndo_setup_tc = cxgb_setup_tc,
846eac3f
GG
3232 .ndo_udp_tunnel_add = cxgb_add_udp_tunnel,
3233 .ndo_udp_tunnel_del = cxgb_del_udp_tunnel,
4621ffd6 3234 .ndo_features_check = cxgb_features_check,
90592b9a 3235 .ndo_fix_features = cxgb_fix_features,
b8ff05a9
DM
3236};
3237
858aa65c 3238#ifdef CONFIG_PCI_IOV
e7b48a32 3239static const struct net_device_ops cxgb4_mgmt_netdev_ops = {
baf50868
GG
3240 .ndo_open = cxgb4_mgmt_open,
3241 .ndo_set_vf_mac = cxgb4_mgmt_set_vf_mac,
3242 .ndo_get_vf_config = cxgb4_mgmt_get_vf_config,
3243 .ndo_set_vf_rate = cxgb4_mgmt_set_vf_rate,
3244 .ndo_get_phys_port_id = cxgb4_mgmt_get_phys_port_id,
9d5fd927 3245 .ndo_set_vf_vlan = cxgb4_mgmt_set_vf_vlan,
7829451c 3246};
e7b48a32 3247#endif
7829451c 3248
baf50868
GG
3249static void cxgb4_mgmt_get_drvinfo(struct net_device *dev,
3250 struct ethtool_drvinfo *info)
7829451c
HS
3251{
3252 struct adapter *adapter = netdev2adap(dev);
3253
3254 strlcpy(info->driver, cxgb4_driver_name, sizeof(info->driver));
3255 strlcpy(info->version, cxgb4_driver_version,
3256 sizeof(info->version));
3257 strlcpy(info->bus_info, pci_name(adapter->pdev),
3258 sizeof(info->bus_info));
3259}
3260
3261static const struct ethtool_ops cxgb4_mgmt_ethtool_ops = {
baf50868 3262 .get_drvinfo = cxgb4_mgmt_get_drvinfo,
7829451c
HS
3263};
3264
8b7372c1
GG
3265static void notify_fatal_err(struct work_struct *work)
3266{
3267 struct adapter *adap;
3268
3269 adap = container_of(work, struct adapter, fatal_err_notify_task);
3270 notify_ulds(adap, CXGB4_STATE_FATAL_ERROR);
3271}
3272
b8ff05a9
DM
3273void t4_fatal_err(struct adapter *adap)
3274{
3be0679b
HS
3275 int port;
3276
025d0973
GP
3277 if (pci_channel_offline(adap->pdev))
3278 return;
3279
3be0679b
HS
3280 /* Disable the SGE since ULDs are going to free resources that
3281 * could be exposed to the adapter. RDMA MWs for example...
3282 */
3283 t4_shutdown_adapter(adap);
3284 for_each_port(adap, port) {
3285 struct net_device *dev = adap->port[port];
3286
3287 /* If we get here in very early initialization the network
3288 * devices may not have been set up yet.
3289 */
3290 if (!dev)
3291 continue;
3292
3293 netif_tx_stop_all_queues(dev);
3294 netif_carrier_off(dev);
3295 }
b8ff05a9 3296 dev_alert(adap->pdev_dev, "encountered fatal error, adapter stopped\n");
8b7372c1 3297 queue_work(adap->workq, &adap->fatal_err_notify_task);
b8ff05a9
DM
3298}
3299
3300static void setup_memwin(struct adapter *adap)
3301{
b562fc37 3302 u32 nic_win_base = t4_get_util_window(adap);
b8ff05a9 3303
b562fc37 3304 t4_setup_memwin(adap, nic_win_base, MEMWIN_NIC);
636f9d37
VP
3305}
3306
3307static void setup_memwin_rdma(struct adapter *adap)
3308{
1ae970e0 3309 if (adap->vres.ocq.size) {
0abfd152
HS
3310 u32 start;
3311 unsigned int sz_kb;
1ae970e0 3312
0abfd152
HS
3313 start = t4_read_pcie_cfg4(adap, PCI_BASE_ADDRESS_2);
3314 start &= PCI_BASE_ADDRESS_MEM_MASK;
3315 start += OCQ_WIN_OFFSET(adap->pdev, &adap->vres);
1ae970e0
DM
3316 sz_kb = roundup_pow_of_two(adap->vres.ocq.size) >> 10;
3317 t4_write_reg(adap,
f061de42
HS
3318 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, 3),
3319 start | BIR_V(1) | WINDOW_V(ilog2(sz_kb)));
1ae970e0 3320 t4_write_reg(adap,
f061de42 3321 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, 3),
1ae970e0
DM
3322 adap->vres.ocq.start);
3323 t4_read_reg(adap,
f061de42 3324 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, 3));
1ae970e0 3325 }
b8ff05a9
DM
3326}
3327
8b4e6b3c
AV
3328/* HMA Definitions */
3329
3330/* The maximum number of address that can be send in a single FW cmd */
3331#define HMA_MAX_ADDR_IN_CMD 5
3332
3333#define HMA_PAGE_SIZE PAGE_SIZE
3334
3335#define HMA_MAX_NO_FW_ADDRESS (16 << 10) /* FW supports 16K addresses */
3336
3337#define HMA_PAGE_ORDER \
3338 ((HMA_PAGE_SIZE < HMA_MAX_NO_FW_ADDRESS) ? \
3339 ilog2(HMA_MAX_NO_FW_ADDRESS / HMA_PAGE_SIZE) : 0)
3340
3341/* The minimum and maximum possible HMA sizes that can be specified in the FW
3342 * configuration(in units of MB).
3343 */
3344#define HMA_MIN_TOTAL_SIZE 1
3345#define HMA_MAX_TOTAL_SIZE \
3346 (((HMA_PAGE_SIZE << HMA_PAGE_ORDER) * \
3347 HMA_MAX_NO_FW_ADDRESS) >> 20)
3348
3349static void adap_free_hma_mem(struct adapter *adapter)
3350{
3351 struct scatterlist *iter;
3352 struct page *page;
3353 int i;
3354
3355 if (!adapter->hma.sgt)
3356 return;
3357
3358 if (adapter->hma.flags & HMA_DMA_MAPPED_FLAG) {
3359 dma_unmap_sg(adapter->pdev_dev, adapter->hma.sgt->sgl,
3360 adapter->hma.sgt->nents, PCI_DMA_BIDIRECTIONAL);
3361 adapter->hma.flags &= ~HMA_DMA_MAPPED_FLAG;
3362 }
3363
3364 for_each_sg(adapter->hma.sgt->sgl, iter,
3365 adapter->hma.sgt->orig_nents, i) {
3366 page = sg_page(iter);
3367 if (page)
3368 __free_pages(page, HMA_PAGE_ORDER);
3369 }
3370
3371 kfree(adapter->hma.phy_addr);
3372 sg_free_table(adapter->hma.sgt);
3373 kfree(adapter->hma.sgt);
3374 adapter->hma.sgt = NULL;
3375}
3376
3377static int adap_config_hma(struct adapter *adapter)
3378{
3379 struct scatterlist *sgl, *iter;
3380 struct sg_table *sgt;
3381 struct page *newpage;
3382 unsigned int i, j, k;
3383 u32 param, hma_size;
3384 unsigned int ncmds;
3385 size_t page_size;
3386 u32 page_order;
3387 int node, ret;
3388
3389 /* HMA is supported only for T6+ cards.
3390 * Avoid initializing HMA in kdump kernels.
3391 */
3392 if (is_kdump_kernel() ||
3393 CHELSIO_CHIP_VERSION(adapter->params.chip) < CHELSIO_T6)
3394 return 0;
3395
3396 /* Get the HMA region size required by fw */
3397 param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
3398 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_HMA_SIZE));
3399 ret = t4_query_params(adapter, adapter->mbox, adapter->pf, 0,
3400 1, &param, &hma_size);
3401 /* An error means card has its own memory or HMA is not supported by
3402 * the firmware. Return without any errors.
3403 */
3404 if (ret || !hma_size)
3405 return 0;
3406
3407 if (hma_size < HMA_MIN_TOTAL_SIZE ||
3408 hma_size > HMA_MAX_TOTAL_SIZE) {
3409 dev_err(adapter->pdev_dev,
3410 "HMA size %uMB beyond bounds(%u-%lu)MB\n",
3411 hma_size, HMA_MIN_TOTAL_SIZE, HMA_MAX_TOTAL_SIZE);
3412 return -EINVAL;
3413 }
3414
3415 page_size = HMA_PAGE_SIZE;
3416 page_order = HMA_PAGE_ORDER;
3417 adapter->hma.sgt = kzalloc(sizeof(*adapter->hma.sgt), GFP_KERNEL);
3418 if (unlikely(!adapter->hma.sgt)) {
3419 dev_err(adapter->pdev_dev, "HMA SG table allocation failed\n");
3420 return -ENOMEM;
3421 }
3422 sgt = adapter->hma.sgt;
3423 /* FW returned value will be in MB's
3424 */
3425 sgt->orig_nents = (hma_size << 20) / (page_size << page_order);
3426 if (sg_alloc_table(sgt, sgt->orig_nents, GFP_KERNEL)) {
3427 dev_err(adapter->pdev_dev, "HMA SGL allocation failed\n");
3428 kfree(adapter->hma.sgt);
3429 adapter->hma.sgt = NULL;
3430 return -ENOMEM;
3431 }
3432
3433 sgl = adapter->hma.sgt->sgl;
3434 node = dev_to_node(adapter->pdev_dev);
3435 for_each_sg(sgl, iter, sgt->orig_nents, i) {
3436 newpage = alloc_pages_node(node, __GFP_NOWARN | GFP_KERNEL,
3437 page_order);
3438 if (!newpage) {
3439 dev_err(adapter->pdev_dev,
3440 "Not enough memory for HMA page allocation\n");
3441 ret = -ENOMEM;
3442 goto free_hma;
3443 }
3444 sg_set_page(iter, newpage, page_size << page_order, 0);
3445 }
3446
3447 sgt->nents = dma_map_sg(adapter->pdev_dev, sgl, sgt->orig_nents,
3448 DMA_BIDIRECTIONAL);
3449 if (!sgt->nents) {
3450 dev_err(adapter->pdev_dev,
3451 "Not enough memory for HMA DMA mapping");
3452 ret = -ENOMEM;
3453 goto free_hma;
3454 }
3455 adapter->hma.flags |= HMA_DMA_MAPPED_FLAG;
3456
3457 adapter->hma.phy_addr = kcalloc(sgt->nents, sizeof(dma_addr_t),
3458 GFP_KERNEL);
3459 if (unlikely(!adapter->hma.phy_addr))
3460 goto free_hma;
3461
3462 for_each_sg(sgl, iter, sgt->nents, i) {
3463 newpage = sg_page(iter);
3464 adapter->hma.phy_addr[i] = sg_dma_address(iter);
3465 }
3466
3467 ncmds = DIV_ROUND_UP(sgt->nents, HMA_MAX_ADDR_IN_CMD);
3468 /* Pass on the addresses to firmware */
3469 for (i = 0, k = 0; i < ncmds; i++, k += HMA_MAX_ADDR_IN_CMD) {
3470 struct fw_hma_cmd hma_cmd;
3471 u8 naddr = HMA_MAX_ADDR_IN_CMD;
3472 u8 soc = 0, eoc = 0;
3473 u8 hma_mode = 1; /* Presently we support only Page table mode */
3474
3475 soc = (i == 0) ? 1 : 0;
3476 eoc = (i == ncmds - 1) ? 1 : 0;
3477
3478 /* For last cmd, set naddr corresponding to remaining
3479 * addresses
3480 */
3481 if (i == ncmds - 1) {
3482 naddr = sgt->nents % HMA_MAX_ADDR_IN_CMD;
3483 naddr = naddr ? naddr : HMA_MAX_ADDR_IN_CMD;
3484 }
3485 memset(&hma_cmd, 0, sizeof(hma_cmd));
3486 hma_cmd.op_pkd = htonl(FW_CMD_OP_V(FW_HMA_CMD) |
3487 FW_CMD_REQUEST_F | FW_CMD_WRITE_F);
3488 hma_cmd.retval_len16 = htonl(FW_LEN16(hma_cmd));
3489
3490 hma_cmd.mode_to_pcie_params =
3491 htonl(FW_HMA_CMD_MODE_V(hma_mode) |
3492 FW_HMA_CMD_SOC_V(soc) | FW_HMA_CMD_EOC_V(eoc));
3493
3494 /* HMA cmd size specified in MB's */
3495 hma_cmd.naddr_size =
3496 htonl(FW_HMA_CMD_SIZE_V(hma_size) |
3497 FW_HMA_CMD_NADDR_V(naddr));
3498
3499 /* Total Page size specified in units of 4K */
3500 hma_cmd.addr_size_pkd =
3501 htonl(FW_HMA_CMD_ADDR_SIZE_V
3502 ((page_size << page_order) >> 12));
3503
3504 /* Fill the 5 addresses */
3505 for (j = 0; j < naddr; j++) {
3506 hma_cmd.phy_address[j] =
3507 cpu_to_be64(adapter->hma.phy_addr[j + k]);
3508 }
3509 ret = t4_wr_mbox(adapter, adapter->mbox, &hma_cmd,
3510 sizeof(hma_cmd), &hma_cmd);
3511 if (ret) {
3512 dev_err(adapter->pdev_dev,
3513 "HMA FW command failed with err %d\n", ret);
3514 goto free_hma;
3515 }
3516 }
3517
3518 if (!ret)
3519 dev_info(adapter->pdev_dev,
3520 "Reserved %uMB host memory for HMA\n", hma_size);
3521 return ret;
3522
3523free_hma:
3524 adap_free_hma_mem(adapter);
3525 return ret;
3526}
3527
02b5fb8e
DM
3528static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c)
3529{
3530 u32 v;
3531 int ret;
3532
3533 /* get device capabilities */
3534 memset(c, 0, sizeof(*c));
e2ac9628
HS
3535 c->op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
3536 FW_CMD_REQUEST_F | FW_CMD_READ_F);
ce91a923 3537 c->cfvalid_to_len16 = htonl(FW_LEN16(*c));
b2612722 3538 ret = t4_wr_mbox(adap, adap->mbox, c, sizeof(*c), c);
02b5fb8e
DM
3539 if (ret < 0)
3540 return ret;
3541
e2ac9628
HS
3542 c->op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
3543 FW_CMD_REQUEST_F | FW_CMD_WRITE_F);
b2612722 3544 ret = t4_wr_mbox(adap, adap->mbox, c, sizeof(*c), NULL);
02b5fb8e
DM
3545 if (ret < 0)
3546 return ret;
3547
b2612722 3548 ret = t4_config_glbl_rss(adap, adap->pf,
02b5fb8e 3549 FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL,
b2e1a3f0
HS
3550 FW_RSS_GLB_CONFIG_CMD_TNLMAPEN_F |
3551 FW_RSS_GLB_CONFIG_CMD_TNLALLLKP_F);
02b5fb8e
DM
3552 if (ret < 0)
3553 return ret;
3554
b2612722 3555 ret = t4_cfg_pfvf(adap, adap->mbox, adap->pf, 0, adap->sge.egr_sz, 64,
4b8e27a8
HS
3556 MAX_INGQ, 0, 0, 4, 0xf, 0xf, 16, FW_CMD_CAP_PF,
3557 FW_CMD_CAP_PF);
02b5fb8e
DM
3558 if (ret < 0)
3559 return ret;
3560
3561 t4_sge_init(adap);
3562
02b5fb8e 3563 /* tweak some settings */
837e4a42 3564 t4_write_reg(adap, TP_SHIFT_CNT_A, 0x64f8849);
0d804338 3565 t4_write_reg(adap, ULP_RX_TDDP_PSZ_A, HPZ0_V(PAGE_SHIFT - 12));
837e4a42
HS
3566 t4_write_reg(adap, TP_PIO_ADDR_A, TP_INGRESS_CONFIG_A);
3567 v = t4_read_reg(adap, TP_PIO_DATA_A);
3568 t4_write_reg(adap, TP_PIO_DATA_A, v & ~CSUM_HAS_PSEUDO_HDR_F);
060e0c75 3569
dca4faeb
VP
3570 /* first 4 Tx modulation queues point to consecutive Tx channels */
3571 adap->params.tp.tx_modq_map = 0xE4;
0d804338
HS
3572 t4_write_reg(adap, TP_TX_MOD_QUEUE_REQ_MAP_A,
3573 TX_MOD_QUEUE_REQ_MAP_V(adap->params.tp.tx_modq_map));
dca4faeb
VP
3574
3575 /* associate each Tx modulation queue with consecutive Tx channels */
3576 v = 0x84218421;
837e4a42 3577 t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A,
0d804338 3578 &v, 1, TP_TX_SCHED_HDR_A);
837e4a42 3579 t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A,
0d804338 3580 &v, 1, TP_TX_SCHED_FIFO_A);
837e4a42 3581 t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A,
0d804338 3582 &v, 1, TP_TX_SCHED_PCMD_A);
dca4faeb
VP
3583
3584#define T4_TX_MODQ_10G_WEIGHT_DEFAULT 16 /* in KB units */
3585 if (is_offload(adap)) {
0d804338
HS
3586 t4_write_reg(adap, TP_TX_MOD_QUEUE_WEIGHT0_A,
3587 TX_MODQ_WEIGHT0_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
3588 TX_MODQ_WEIGHT1_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
3589 TX_MODQ_WEIGHT2_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
3590 TX_MODQ_WEIGHT3_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT));
3591 t4_write_reg(adap, TP_TX_MOD_CHANNEL_WEIGHT_A,
3592 TX_MODQ_WEIGHT0_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
3593 TX_MODQ_WEIGHT1_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
3594 TX_MODQ_WEIGHT2_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
3595 TX_MODQ_WEIGHT3_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT));
dca4faeb
VP
3596 }
3597
060e0c75 3598 /* get basic stuff going */
b2612722 3599 return t4_early_init(adap, adap->pf);
02b5fb8e
DM
3600}
3601
b8ff05a9
DM
3602/*
3603 * Max # of ATIDs. The absolute HW max is 16K but we keep it lower.
3604 */
3605#define MAX_ATIDS 8192U
3606
636f9d37
VP
3607/*
3608 * Phase 0 of initialization: contact FW, obtain config, perform basic init.
3609 *
3610 * If the firmware we're dealing with has Configuration File support, then
3611 * we use that to perform all configuration
3612 */
3613
3614/*
3615 * Tweak configuration based on module parameters, etc. Most of these have
3616 * defaults assigned to them by Firmware Configuration Files (if we're using
3617 * them) but need to be explicitly set if we're using hard-coded
3618 * initialization. But even in the case of using Firmware Configuration
3619 * Files, we'd like to expose the ability to change these via module
3620 * parameters so these are essentially common tweaks/settings for
3621 * Configuration Files and hard-coded initialization ...
3622 */
3623static int adap_init0_tweaks(struct adapter *adapter)
3624{
3625 /*
3626 * Fix up various Host-Dependent Parameters like Page Size, Cache
3627 * Line Size, etc. The firmware default is for a 4KB Page Size and
3628 * 64B Cache Line Size ...
3629 */
3630 t4_fixup_host_params(adapter, PAGE_SIZE, L1_CACHE_BYTES);
3631
3632 /*
3633 * Process module parameters which affect early initialization.
3634 */
3635 if (rx_dma_offset != 2 && rx_dma_offset != 0) {
3636 dev_err(&adapter->pdev->dev,
3637 "Ignoring illegal rx_dma_offset=%d, using 2\n",
3638 rx_dma_offset);
3639 rx_dma_offset = 2;
3640 }
f612b815
HS
3641 t4_set_reg_field(adapter, SGE_CONTROL_A,
3642 PKTSHIFT_V(PKTSHIFT_M),
3643 PKTSHIFT_V(rx_dma_offset));
636f9d37
VP
3644
3645 /*
3646 * Don't include the "IP Pseudo Header" in CPL_RX_PKT checksums: Linux
3647 * adds the pseudo header itself.
3648 */
837e4a42
HS
3649 t4_tp_wr_bits_indirect(adapter, TP_INGRESS_CONFIG_A,
3650 CSUM_HAS_PSEUDO_HDR_F, 0);
636f9d37
VP
3651
3652 return 0;
3653}
3654
01b69614
HS
3655/* 10Gb/s-BT PHY Support. chip-external 10Gb/s-BT PHYs are complex chips
3656 * unto themselves and they contain their own firmware to perform their
3657 * tasks ...
3658 */
3659static int phy_aq1202_version(const u8 *phy_fw_data,
3660 size_t phy_fw_size)
3661{
3662 int offset;
3663
3664 /* At offset 0x8 you're looking for the primary image's
3665 * starting offset which is 3 Bytes wide
3666 *
3667 * At offset 0xa of the primary image, you look for the offset
3668 * of the DRAM segment which is 3 Bytes wide.
3669 *
3670 * The FW version is at offset 0x27e of the DRAM and is 2 Bytes
3671 * wide
3672 */
3673 #define be16(__p) (((__p)[0] << 8) | (__p)[1])
3674 #define le16(__p) ((__p)[0] | ((__p)[1] << 8))
3675 #define le24(__p) (le16(__p) | ((__p)[2] << 16))
3676
3677 offset = le24(phy_fw_data + 0x8) << 12;
3678 offset = le24(phy_fw_data + offset + 0xa);
3679 return be16(phy_fw_data + offset + 0x27e);
3680
3681 #undef be16
3682 #undef le16
3683 #undef le24
3684}
3685
3686static struct info_10gbt_phy_fw {
3687 unsigned int phy_fw_id; /* PCI Device ID */
3688 char *phy_fw_file; /* /lib/firmware/ PHY Firmware file */
3689 int (*phy_fw_version)(const u8 *phy_fw_data, size_t phy_fw_size);
3690 int phy_flash; /* Has FLASH for PHY Firmware */
3691} phy_info_array[] = {
3692 {
3693 PHY_AQ1202_DEVICEID,
3694 PHY_AQ1202_FIRMWARE,
3695 phy_aq1202_version,
3696 1,
3697 },
3698 {
3699 PHY_BCM84834_DEVICEID,
3700 PHY_BCM84834_FIRMWARE,
3701 NULL,
3702 0,
3703 },
3704 { 0, NULL, NULL },
3705};
3706
3707static struct info_10gbt_phy_fw *find_phy_info(int devid)
3708{
3709 int i;
3710
3711 for (i = 0; i < ARRAY_SIZE(phy_info_array); i++) {
3712 if (phy_info_array[i].phy_fw_id == devid)
3713 return &phy_info_array[i];
3714 }
3715 return NULL;
3716}
3717
3718/* Handle updating of chip-external 10Gb/s-BT PHY firmware. This needs to
3719 * happen after the FW_RESET_CMD but before the FW_INITIALIZE_CMD. On error
3720 * we return a negative error number. If we transfer new firmware we return 1
3721 * (from t4_load_phy_fw()). If we don't do anything we return 0.
3722 */
3723static int adap_init0_phy(struct adapter *adap)
3724{
3725 const struct firmware *phyf;
3726 int ret;
3727 struct info_10gbt_phy_fw *phy_info;
3728
3729 /* Use the device ID to determine which PHY file to flash.
3730 */
3731 phy_info = find_phy_info(adap->pdev->device);
3732 if (!phy_info) {
3733 dev_warn(adap->pdev_dev,
3734 "No PHY Firmware file found for this PHY\n");
3735 return -EOPNOTSUPP;
3736 }
3737
3738 /* If we have a T4 PHY firmware file under /lib/firmware/cxgb4/, then
3739 * use that. The adapter firmware provides us with a memory buffer
3740 * where we can load a PHY firmware file from the host if we want to
3741 * override the PHY firmware File in flash.
3742 */
3743 ret = request_firmware_direct(&phyf, phy_info->phy_fw_file,
3744 adap->pdev_dev);
3745 if (ret < 0) {
3746 /* For adapters without FLASH attached to PHY for their
3747 * firmware, it's obviously a fatal error if we can't get the
3748 * firmware to the adapter. For adapters with PHY firmware
3749 * FLASH storage, it's worth a warning if we can't find the
3750 * PHY Firmware but we'll neuter the error ...
3751 */
3752 dev_err(adap->pdev_dev, "unable to find PHY Firmware image "
3753 "/lib/firmware/%s, error %d\n",
3754 phy_info->phy_fw_file, -ret);
3755 if (phy_info->phy_flash) {
3756 int cur_phy_fw_ver = 0;
3757
3758 t4_phy_fw_ver(adap, &cur_phy_fw_ver);
3759 dev_warn(adap->pdev_dev, "continuing with, on-adapter "
3760 "FLASH copy, version %#x\n", cur_phy_fw_ver);
3761 ret = 0;
3762 }
3763
3764 return ret;
3765 }
3766
3767 /* Load PHY Firmware onto adapter.
3768 */
3769 ret = t4_load_phy_fw(adap, MEMWIN_NIC, &adap->win0_lock,
3770 phy_info->phy_fw_version,
3771 (u8 *)phyf->data, phyf->size);
3772 if (ret < 0)
3773 dev_err(adap->pdev_dev, "PHY Firmware transfer error %d\n",
3774 -ret);
3775 else if (ret > 0) {
3776 int new_phy_fw_ver = 0;
3777
3778 if (phy_info->phy_fw_version)
3779 new_phy_fw_ver = phy_info->phy_fw_version(phyf->data,
3780 phyf->size);
3781 dev_info(adap->pdev_dev, "Successfully transferred PHY "
3782 "Firmware /lib/firmware/%s, version %#x\n",
3783 phy_info->phy_fw_file, new_phy_fw_ver);
3784 }
3785
3786 release_firmware(phyf);
3787
3788 return ret;
3789}
3790
636f9d37
VP
3791/*
3792 * Attempt to initialize the adapter via a Firmware Configuration File.
3793 */
3794static int adap_init0_config(struct adapter *adapter, int reset)
3795{
3796 struct fw_caps_config_cmd caps_cmd;
3797 const struct firmware *cf;
3798 unsigned long mtype = 0, maddr = 0;
3799 u32 finiver, finicsum, cfcsum;
16e47624
HS
3800 int ret;
3801 int config_issued = 0;
0a57a536 3802 char *fw_config_file, fw_config_file_path[256];
16e47624 3803 char *config_name = NULL;
636f9d37
VP
3804
3805 /*
3806 * Reset device if necessary.
3807 */
3808 if (reset) {
3809 ret = t4_fw_reset(adapter, adapter->mbox,
0d804338 3810 PIORSTMODE_F | PIORST_F);
636f9d37
VP
3811 if (ret < 0)
3812 goto bye;
3813 }
3814
01b69614
HS
3815 /* If this is a 10Gb/s-BT adapter make sure the chip-external
3816 * 10Gb/s-BT PHYs have up-to-date firmware. Note that this step needs
3817 * to be performed after any global adapter RESET above since some
3818 * PHYs only have local RAM copies of the PHY firmware.
3819 */
3820 if (is_10gbt_device(adapter->pdev->device)) {
3821 ret = adap_init0_phy(adapter);
3822 if (ret < 0)
3823 goto bye;
3824 }
636f9d37
VP
3825 /*
3826 * If we have a T4 configuration file under /lib/firmware/cxgb4/,
3827 * then use that. Otherwise, use the configuration file stored
3828 * in the adapter flash ...
3829 */
d14807dd 3830 switch (CHELSIO_CHIP_VERSION(adapter->params.chip)) {
0a57a536 3831 case CHELSIO_T4:
16e47624 3832 fw_config_file = FW4_CFNAME;
0a57a536
SR
3833 break;
3834 case CHELSIO_T5:
3835 fw_config_file = FW5_CFNAME;
3836 break;
3ccc6cf7
HS
3837 case CHELSIO_T6:
3838 fw_config_file = FW6_CFNAME;
3839 break;
0a57a536
SR
3840 default:
3841 dev_err(adapter->pdev_dev, "Device %d is not supported\n",
3842 adapter->pdev->device);
3843 ret = -EINVAL;
3844 goto bye;
3845 }
3846
3847 ret = request_firmware(&cf, fw_config_file, adapter->pdev_dev);
636f9d37 3848 if (ret < 0) {
16e47624 3849 config_name = "On FLASH";
636f9d37
VP
3850 mtype = FW_MEMTYPE_CF_FLASH;
3851 maddr = t4_flash_cfg_addr(adapter);
3852 } else {
3853 u32 params[7], val[7];
3854
16e47624
HS
3855 sprintf(fw_config_file_path,
3856 "/lib/firmware/%s", fw_config_file);
3857 config_name = fw_config_file_path;
3858
636f9d37
VP
3859 if (cf->size >= FLASH_CFG_MAX_SIZE)
3860 ret = -ENOMEM;
3861 else {
5167865a
HS
3862 params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
3863 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CF));
636f9d37 3864 ret = t4_query_params(adapter, adapter->mbox,
b2612722 3865 adapter->pf, 0, 1, params, val);
636f9d37
VP
3866 if (ret == 0) {
3867 /*
fc5ab020 3868 * For t4_memory_rw() below addresses and
636f9d37
VP
3869 * sizes have to be in terms of multiples of 4
3870 * bytes. So, if the Configuration File isn't
3871 * a multiple of 4 bytes in length we'll have
3872 * to write that out separately since we can't
3873 * guarantee that the bytes following the
3874 * residual byte in the buffer returned by
3875 * request_firmware() are zeroed out ...
3876 */
3877 size_t resid = cf->size & 0x3;
3878 size_t size = cf->size & ~0x3;
3879 __be32 *data = (__be32 *)cf->data;
3880
5167865a
HS
3881 mtype = FW_PARAMS_PARAM_Y_G(val[0]);
3882 maddr = FW_PARAMS_PARAM_Z_G(val[0]) << 16;
636f9d37 3883
fc5ab020
HS
3884 spin_lock(&adapter->win0_lock);
3885 ret = t4_memory_rw(adapter, 0, mtype, maddr,
3886 size, data, T4_MEMORY_WRITE);
636f9d37
VP
3887 if (ret == 0 && resid != 0) {
3888 union {
3889 __be32 word;
3890 char buf[4];
3891 } last;
3892 int i;
3893
3894 last.word = data[size >> 2];
3895 for (i = resid; i < 4; i++)
3896 last.buf[i] = 0;
fc5ab020
HS
3897 ret = t4_memory_rw(adapter, 0, mtype,
3898 maddr + size,
3899 4, &last.word,
3900 T4_MEMORY_WRITE);
636f9d37 3901 }
fc5ab020 3902 spin_unlock(&adapter->win0_lock);
636f9d37
VP
3903 }
3904 }
3905
3906 release_firmware(cf);
3907 if (ret)
3908 goto bye;
3909 }
3910
3911 /*
3912 * Issue a Capability Configuration command to the firmware to get it
3913 * to parse the Configuration File. We don't use t4_fw_config_file()
3914 * because we want the ability to modify various features after we've
3915 * processed the configuration file ...
3916 */
3917 memset(&caps_cmd, 0, sizeof(caps_cmd));
3918 caps_cmd.op_to_write =
e2ac9628
HS
3919 htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
3920 FW_CMD_REQUEST_F |
3921 FW_CMD_READ_F);
ce91a923 3922 caps_cmd.cfvalid_to_len16 =
5167865a
HS
3923 htonl(FW_CAPS_CONFIG_CMD_CFVALID_F |
3924 FW_CAPS_CONFIG_CMD_MEMTYPE_CF_V(mtype) |
3925 FW_CAPS_CONFIG_CMD_MEMADDR64K_CF_V(maddr >> 16) |
636f9d37
VP
3926 FW_LEN16(caps_cmd));
3927 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
3928 &caps_cmd);
16e47624
HS
3929
3930 /* If the CAPS_CONFIG failed with an ENOENT (for a Firmware
3931 * Configuration File in FLASH), our last gasp effort is to use the
3932 * Firmware Configuration File which is embedded in the firmware. A
3933 * very few early versions of the firmware didn't have one embedded
3934 * but we can ignore those.
3935 */
3936 if (ret == -ENOENT) {
3937 memset(&caps_cmd, 0, sizeof(caps_cmd));
3938 caps_cmd.op_to_write =
e2ac9628
HS
3939 htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
3940 FW_CMD_REQUEST_F |
3941 FW_CMD_READ_F);
16e47624
HS
3942 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
3943 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd,
3944 sizeof(caps_cmd), &caps_cmd);
3945 config_name = "Firmware Default";
3946 }
3947
3948 config_issued = 1;
636f9d37
VP
3949 if (ret < 0)
3950 goto bye;
3951
3952 finiver = ntohl(caps_cmd.finiver);
3953 finicsum = ntohl(caps_cmd.finicsum);
3954 cfcsum = ntohl(caps_cmd.cfcsum);
3955 if (finicsum != cfcsum)
3956 dev_warn(adapter->pdev_dev, "Configuration File checksum "\
3957 "mismatch: [fini] csum=%#x, computed csum=%#x\n",
3958 finicsum, cfcsum);
3959
636f9d37
VP
3960 /*
3961 * And now tell the firmware to use the configuration we just loaded.
3962 */
3963 caps_cmd.op_to_write =
e2ac9628
HS
3964 htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
3965 FW_CMD_REQUEST_F |
3966 FW_CMD_WRITE_F);
ce91a923 3967 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
636f9d37
VP
3968 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
3969 NULL);
3970 if (ret < 0)
3971 goto bye;
3972
3973 /*
3974 * Tweak configuration based on system architecture, module
3975 * parameters, etc.
3976 */
3977 ret = adap_init0_tweaks(adapter);
3978 if (ret < 0)
3979 goto bye;
3980
8b4e6b3c
AV
3981 /* We will proceed even if HMA init fails. */
3982 ret = adap_config_hma(adapter);
3983 if (ret)
3984 dev_err(adapter->pdev_dev,
3985 "HMA configuration failed with error %d\n", ret);
3986
636f9d37
VP
3987 /*
3988 * And finally tell the firmware to initialize itself using the
3989 * parameters from the Configuration File.
3990 */
3991 ret = t4_fw_initialize(adapter, adapter->mbox);
3992 if (ret < 0)
3993 goto bye;
3994
06640310
HS
3995 /* Emit Firmware Configuration File information and return
3996 * successfully.
636f9d37 3997 */
636f9d37 3998 dev_info(adapter->pdev_dev, "Successfully configured using Firmware "\
16e47624
HS
3999 "Configuration File \"%s\", version %#x, computed checksum %#x\n",
4000 config_name, finiver, cfcsum);
636f9d37
VP
4001 return 0;
4002
4003 /*
4004 * Something bad happened. Return the error ... (If the "error"
4005 * is that there's no Configuration File on the adapter we don't
4006 * want to issue a warning since this is fairly common.)
4007 */
4008bye:
16e47624
HS
4009 if (config_issued && ret != -ENOENT)
4010 dev_warn(adapter->pdev_dev, "\"%s\" configuration file error %d\n",
4011 config_name, -ret);
636f9d37
VP
4012 return ret;
4013}
4014
16e47624
HS
4015static struct fw_info fw_info_array[] = {
4016 {
4017 .chip = CHELSIO_T4,
4018 .fs_name = FW4_CFNAME,
4019 .fw_mod_name = FW4_FNAME,
4020 .fw_hdr = {
4021 .chip = FW_HDR_CHIP_T4,
4022 .fw_ver = __cpu_to_be32(FW_VERSION(T4)),
4023 .intfver_nic = FW_INTFVER(T4, NIC),
4024 .intfver_vnic = FW_INTFVER(T4, VNIC),
4025 .intfver_ri = FW_INTFVER(T4, RI),
4026 .intfver_iscsi = FW_INTFVER(T4, ISCSI),
4027 .intfver_fcoe = FW_INTFVER(T4, FCOE),
4028 },
4029 }, {
4030 .chip = CHELSIO_T5,
4031 .fs_name = FW5_CFNAME,
4032 .fw_mod_name = FW5_FNAME,
4033 .fw_hdr = {
4034 .chip = FW_HDR_CHIP_T5,
4035 .fw_ver = __cpu_to_be32(FW_VERSION(T5)),
4036 .intfver_nic = FW_INTFVER(T5, NIC),
4037 .intfver_vnic = FW_INTFVER(T5, VNIC),
4038 .intfver_ri = FW_INTFVER(T5, RI),
4039 .intfver_iscsi = FW_INTFVER(T5, ISCSI),
4040 .intfver_fcoe = FW_INTFVER(T5, FCOE),
4041 },
3ccc6cf7
HS
4042 }, {
4043 .chip = CHELSIO_T6,
4044 .fs_name = FW6_CFNAME,
4045 .fw_mod_name = FW6_FNAME,
4046 .fw_hdr = {
4047 .chip = FW_HDR_CHIP_T6,
4048 .fw_ver = __cpu_to_be32(FW_VERSION(T6)),
4049 .intfver_nic = FW_INTFVER(T6, NIC),
4050 .intfver_vnic = FW_INTFVER(T6, VNIC),
4051 .intfver_ofld = FW_INTFVER(T6, OFLD),
4052 .intfver_ri = FW_INTFVER(T6, RI),
4053 .intfver_iscsipdu = FW_INTFVER(T6, ISCSIPDU),
4054 .intfver_iscsi = FW_INTFVER(T6, ISCSI),
4055 .intfver_fcoepdu = FW_INTFVER(T6, FCOEPDU),
4056 .intfver_fcoe = FW_INTFVER(T6, FCOE),
4057 },
16e47624 4058 }
3ccc6cf7 4059
16e47624
HS
4060};
4061
4062static struct fw_info *find_fw_info(int chip)
4063{
4064 int i;
4065
4066 for (i = 0; i < ARRAY_SIZE(fw_info_array); i++) {
4067 if (fw_info_array[i].chip == chip)
4068 return &fw_info_array[i];
4069 }
4070 return NULL;
4071}
4072
b8ff05a9
DM
4073/*
4074 * Phase 0 of initialization: contact FW, obtain config, perform basic init.
4075 */
4076static int adap_init0(struct adapter *adap)
4077{
4078 int ret;
4079 u32 v, port_vec;
4080 enum dev_state state;
4081 u32 params[7], val[7];
9a4da2cd 4082 struct fw_caps_config_cmd caps_cmd;
dcf7b6f5 4083 int reset = 1;
b8ff05a9 4084
ae469b68
HS
4085 /* Grab Firmware Device Log parameters as early as possible so we have
4086 * access to it for debugging, etc.
4087 */
4088 ret = t4_init_devlog_params(adap);
4089 if (ret < 0)
4090 return ret;
4091
666224d4 4092 /* Contact FW, advertising Master capability */
c5a8c0f3
HS
4093 ret = t4_fw_hello(adap, adap->mbox, adap->mbox,
4094 is_kdump_kernel() ? MASTER_MUST : MASTER_MAY, &state);
b8ff05a9
DM
4095 if (ret < 0) {
4096 dev_err(adap->pdev_dev, "could not connect to FW, error %d\n",
4097 ret);
4098 return ret;
4099 }
636f9d37
VP
4100 if (ret == adap->mbox)
4101 adap->flags |= MASTER_PF;
b8ff05a9 4102
636f9d37
VP
4103 /*
4104 * If we're the Master PF Driver and the device is uninitialized,
4105 * then let's consider upgrading the firmware ... (We always want
4106 * to check the firmware version number in order to A. get it for
4107 * later reporting and B. to warn if the currently loaded firmware
4108 * is excessively mismatched relative to the driver.)
4109 */
0de72738 4110
760446f9 4111 t4_get_version_info(adap);
a69265e9
HS
4112 ret = t4_check_fw_version(adap);
4113 /* If firmware is too old (not supported by driver) force an update. */
21d11bd6 4114 if (ret)
a69265e9 4115 state = DEV_STATE_UNINIT;
636f9d37 4116 if ((adap->flags & MASTER_PF) && state != DEV_STATE_INIT) {
16e47624
HS
4117 struct fw_info *fw_info;
4118 struct fw_hdr *card_fw;
4119 const struct firmware *fw;
4120 const u8 *fw_data = NULL;
4121 unsigned int fw_size = 0;
4122
4123 /* This is the firmware whose headers the driver was compiled
4124 * against
4125 */
4126 fw_info = find_fw_info(CHELSIO_CHIP_VERSION(adap->params.chip));
4127 if (fw_info == NULL) {
4128 dev_err(adap->pdev_dev,
4129 "unable to get firmware info for chip %d.\n",
4130 CHELSIO_CHIP_VERSION(adap->params.chip));
4131 return -EINVAL;
636f9d37 4132 }
16e47624
HS
4133
4134 /* allocate memory to read the header of the firmware on the
4135 * card
4136 */
752ade68 4137 card_fw = kvzalloc(sizeof(*card_fw), GFP_KERNEL);
16e47624
HS
4138
4139 /* Get FW from from /lib/firmware/ */
4140 ret = request_firmware(&fw, fw_info->fw_mod_name,
4141 adap->pdev_dev);
4142 if (ret < 0) {
4143 dev_err(adap->pdev_dev,
4144 "unable to load firmware image %s, error %d\n",
4145 fw_info->fw_mod_name, ret);
4146 } else {
4147 fw_data = fw->data;
4148 fw_size = fw->size;
4149 }
4150
4151 /* upgrade FW logic */
4152 ret = t4_prep_fw(adap, fw_info, fw_data, fw_size, card_fw,
4153 state, &reset);
4154
4155 /* Cleaning up */
0b5b6bee 4156 release_firmware(fw);
752ade68 4157 kvfree(card_fw);
16e47624 4158
636f9d37 4159 if (ret < 0)
16e47624 4160 goto bye;
636f9d37 4161 }
b8ff05a9 4162
636f9d37
VP
4163 /*
4164 * Grab VPD parameters. This should be done after we establish a
4165 * connection to the firmware since some of the VPD parameters
4166 * (notably the Core Clock frequency) are retrieved via requests to
4167 * the firmware. On the other hand, we need these fairly early on
4168 * so we do this right after getting ahold of the firmware.
4169 */
098ef6c2 4170 ret = t4_get_vpd_params(adap, &adap->params.vpd);
a0881cab
DM
4171 if (ret < 0)
4172 goto bye;
a0881cab 4173
636f9d37 4174 /*
13ee15d3
VP
4175 * Find out what ports are available to us. Note that we need to do
4176 * this before calling adap_init0_no_config() since it needs nports
4177 * and portvec ...
636f9d37
VP
4178 */
4179 v =
5167865a
HS
4180 FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
4181 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PORTVEC);
b2612722 4182 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, &v, &port_vec);
a0881cab
DM
4183 if (ret < 0)
4184 goto bye;
4185
636f9d37
VP
4186 adap->params.nports = hweight32(port_vec);
4187 adap->params.portvec = port_vec;
4188
06640310
HS
4189 /* If the firmware is initialized already, emit a simply note to that
4190 * effect. Otherwise, it's time to try initializing the adapter.
636f9d37
VP
4191 */
4192 if (state == DEV_STATE_INIT) {
8b4e6b3c
AV
4193 ret = adap_config_hma(adap);
4194 if (ret)
4195 dev_err(adap->pdev_dev,
4196 "HMA configuration failed with error %d\n",
4197 ret);
636f9d37
VP
4198 dev_info(adap->pdev_dev, "Coming up as %s: "\
4199 "Adapter already initialized\n",
4200 adap->flags & MASTER_PF ? "MASTER" : "SLAVE");
636f9d37
VP
4201 } else {
4202 dev_info(adap->pdev_dev, "Coming up as MASTER: "\
4203 "Initializing adapter\n");
06640310
HS
4204
4205 /* Find out whether we're dealing with a version of the
4206 * firmware which has configuration file support.
636f9d37 4207 */
06640310
HS
4208 params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
4209 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CF));
b2612722 4210 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1,
06640310 4211 params, val);
13ee15d3 4212
06640310
HS
4213 /* If the firmware doesn't support Configuration Files,
4214 * return an error.
4215 */
4216 if (ret < 0) {
4217 dev_err(adap->pdev_dev, "firmware doesn't support "
4218 "Firmware Configuration Files\n");
4219 goto bye;
4220 }
4221
4222 /* The firmware provides us with a memory buffer where we can
4223 * load a Configuration File from the host if we want to
4224 * override the Configuration File in flash.
4225 */
4226 ret = adap_init0_config(adap, reset);
4227 if (ret == -ENOENT) {
4228 dev_err(adap->pdev_dev, "no Configuration File "
4229 "present on adapter.\n");
4230 goto bye;
636f9d37
VP
4231 }
4232 if (ret < 0) {
06640310
HS
4233 dev_err(adap->pdev_dev, "could not initialize "
4234 "adapter, error %d\n", -ret);
636f9d37
VP
4235 goto bye;
4236 }
4237 }
4238
06640310
HS
4239 /* Give the SGE code a chance to pull in anything that it needs ...
4240 * Note that this must be called after we retrieve our VPD parameters
4241 * in order to know how to convert core ticks to seconds, etc.
636f9d37 4242 */
06640310
HS
4243 ret = t4_sge_init(adap);
4244 if (ret < 0)
4245 goto bye;
636f9d37 4246
9a4da2cd
VP
4247 if (is_bypass_device(adap->pdev->device))
4248 adap->params.bypass = 1;
4249
636f9d37
VP
4250 /*
4251 * Grab some of our basic fundamental operating parameters.
4252 */
4253#define FW_PARAM_DEV(param) \
5167865a
HS
4254 (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | \
4255 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_##param))
636f9d37 4256
b8ff05a9 4257#define FW_PARAM_PFVF(param) \
5167865a
HS
4258 FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) | \
4259 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_##param)| \
4260 FW_PARAMS_PARAM_Y_V(0) | \
4261 FW_PARAMS_PARAM_Z_V(0)
b8ff05a9 4262
636f9d37 4263 params[0] = FW_PARAM_PFVF(EQ_START);
b8ff05a9
DM
4264 params[1] = FW_PARAM_PFVF(L2T_START);
4265 params[2] = FW_PARAM_PFVF(L2T_END);
4266 params[3] = FW_PARAM_PFVF(FILTER_START);
4267 params[4] = FW_PARAM_PFVF(FILTER_END);
e46dab4d 4268 params[5] = FW_PARAM_PFVF(IQFLINT_START);
b2612722 4269 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 6, params, val);
b8ff05a9
DM
4270 if (ret < 0)
4271 goto bye;
636f9d37
VP
4272 adap->sge.egr_start = val[0];
4273 adap->l2t_start = val[1];
4274 adap->l2t_end = val[2];
b8ff05a9
DM
4275 adap->tids.ftid_base = val[3];
4276 adap->tids.nftids = val[4] - val[3] + 1;
e46dab4d 4277 adap->sge.ingr_start = val[5];
b8ff05a9 4278
4b8e27a8
HS
4279 /* qids (ingress/egress) returned from firmware can be anywhere
4280 * in the range from EQ(IQFLINT)_START to EQ(IQFLINT)_END.
4281 * Hence driver needs to allocate memory for this range to
4282 * store the queue info. Get the highest IQFLINT/EQ index returned
4283 * in FW_EQ_*_CMD.alloc command.
4284 */
4285 params[0] = FW_PARAM_PFVF(EQ_END);
4286 params[1] = FW_PARAM_PFVF(IQFLINT_END);
b2612722 4287 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params, val);
4b8e27a8
HS
4288 if (ret < 0)
4289 goto bye;
4290 adap->sge.egr_sz = val[0] - adap->sge.egr_start + 1;
4291 adap->sge.ingr_sz = val[1] - adap->sge.ingr_start + 1;
4292
4293 adap->sge.egr_map = kcalloc(adap->sge.egr_sz,
4294 sizeof(*adap->sge.egr_map), GFP_KERNEL);
4295 if (!adap->sge.egr_map) {
4296 ret = -ENOMEM;
4297 goto bye;
4298 }
4299
4300 adap->sge.ingr_map = kcalloc(adap->sge.ingr_sz,
4301 sizeof(*adap->sge.ingr_map), GFP_KERNEL);
4302 if (!adap->sge.ingr_map) {
4303 ret = -ENOMEM;
4304 goto bye;
4305 }
4306
4307 /* Allocate the memory for the vaious egress queue bitmaps
5b377d11 4308 * ie starving_fl, txq_maperr and blocked_fl.
4b8e27a8
HS
4309 */
4310 adap->sge.starving_fl = kcalloc(BITS_TO_LONGS(adap->sge.egr_sz),
4311 sizeof(long), GFP_KERNEL);
4312 if (!adap->sge.starving_fl) {
4313 ret = -ENOMEM;
4314 goto bye;
4315 }
4316
4317 adap->sge.txq_maperr = kcalloc(BITS_TO_LONGS(adap->sge.egr_sz),
4318 sizeof(long), GFP_KERNEL);
4319 if (!adap->sge.txq_maperr) {
4320 ret = -ENOMEM;
4321 goto bye;
4322 }
4323
5b377d11
HS
4324#ifdef CONFIG_DEBUG_FS
4325 adap->sge.blocked_fl = kcalloc(BITS_TO_LONGS(adap->sge.egr_sz),
4326 sizeof(long), GFP_KERNEL);
4327 if (!adap->sge.blocked_fl) {
4328 ret = -ENOMEM;
4329 goto bye;
4330 }
4331#endif
4332
b5a02f50
AB
4333 params[0] = FW_PARAM_PFVF(CLIP_START);
4334 params[1] = FW_PARAM_PFVF(CLIP_END);
b2612722 4335 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params, val);
b5a02f50
AB
4336 if (ret < 0)
4337 goto bye;
4338 adap->clipt_start = val[0];
4339 adap->clipt_end = val[1];
4340
b72a32da
RL
4341 /* We don't yet have a PARAMs calls to retrieve the number of Traffic
4342 * Classes supported by the hardware/firmware so we hard code it here
4343 * for now.
4344 */
4345 adap->params.nsched_cls = is_t4(adap->params.chip) ? 15 : 16;
4346
636f9d37
VP
4347 /* query params related to active filter region */
4348 params[0] = FW_PARAM_PFVF(ACTIVE_FILTER_START);
4349 params[1] = FW_PARAM_PFVF(ACTIVE_FILTER_END);
b2612722 4350 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params, val);
636f9d37
VP
4351 /* If Active filter size is set we enable establishing
4352 * offload connection through firmware work request
4353 */
4354 if ((val[0] != val[1]) && (ret >= 0)) {
4355 adap->flags |= FW_OFLD_CONN;
4356 adap->tids.aftid_base = val[0];
4357 adap->tids.aftid_end = val[1];
4358 }
4359
b407a4a9
VP
4360 /* If we're running on newer firmware, let it know that we're
4361 * prepared to deal with encapsulated CPL messages. Older
4362 * firmware won't understand this and we'll just get
4363 * unencapsulated messages ...
4364 */
4365 params[0] = FW_PARAM_PFVF(CPLFW4MSG_ENCAP);
4366 val[0] = 1;
b2612722 4367 (void)t4_set_params(adap, adap->mbox, adap->pf, 0, 1, params, val);
b407a4a9 4368
1ac0f095
KS
4369 /*
4370 * Find out whether we're allowed to use the T5+ ULPTX MEMWRITE DSGL
4371 * capability. Earlier versions of the firmware didn't have the
4372 * ULPTX_MEMWRITE_DSGL so we'll interpret a query failure as no
4373 * permission to use ULPTX MEMWRITE DSGL.
4374 */
4375 if (is_t4(adap->params.chip)) {
4376 adap->params.ulptx_memwrite_dsgl = false;
4377 } else {
4378 params[0] = FW_PARAM_DEV(ULPTX_MEMWRITE_DSGL);
b2612722 4379 ret = t4_query_params(adap, adap->mbox, adap->pf, 0,
1ac0f095
KS
4380 1, params, val);
4381 adap->params.ulptx_memwrite_dsgl = (ret == 0 && val[0] != 0);
4382 }
4383
086de575
SW
4384 /* See if FW supports FW_RI_FR_NSMR_TPTE_WR work request */
4385 params[0] = FW_PARAM_DEV(RI_FR_NSMR_TPTE_WR);
4386 ret = t4_query_params(adap, adap->mbox, adap->pf, 0,
4387 1, params, val);
4388 adap->params.fr_nsmr_tpte_wr_support = (ret == 0 && val[0] != 0);
4389
0ff90994
KS
4390 /* See if FW supports FW_FILTER2 work request */
4391 if (is_t4(adap->params.chip)) {
4392 adap->params.filter2_wr_support = 0;
4393 } else {
4394 params[0] = FW_PARAM_DEV(FILTER2_WR);
4395 ret = t4_query_params(adap, adap->mbox, adap->pf, 0,
4396 1, params, val);
4397 adap->params.filter2_wr_support = (ret == 0 && val[0] != 0);
4398 }
4399
636f9d37
VP
4400 /*
4401 * Get device capabilities so we can determine what resources we need
4402 * to manage.
4403 */
4404 memset(&caps_cmd, 0, sizeof(caps_cmd));
e2ac9628
HS
4405 caps_cmd.op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
4406 FW_CMD_REQUEST_F | FW_CMD_READ_F);
ce91a923 4407 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
636f9d37
VP
4408 ret = t4_wr_mbox(adap, adap->mbox, &caps_cmd, sizeof(caps_cmd),
4409 &caps_cmd);
4410 if (ret < 0)
4411 goto bye;
4412
5c31254e
KS
4413 if (caps_cmd.ofldcaps ||
4414 (caps_cmd.niccaps & htons(FW_CAPS_CONFIG_NIC_HASHFILTER))) {
b8ff05a9
DM
4415 /* query offload-related parameters */
4416 params[0] = FW_PARAM_DEV(NTID);
4417 params[1] = FW_PARAM_PFVF(SERVER_START);
4418 params[2] = FW_PARAM_PFVF(SERVER_END);
4419 params[3] = FW_PARAM_PFVF(TDDP_START);
4420 params[4] = FW_PARAM_PFVF(TDDP_END);
4421 params[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
b2612722 4422 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 6,
636f9d37 4423 params, val);
b8ff05a9
DM
4424 if (ret < 0)
4425 goto bye;
4426 adap->tids.ntids = val[0];
4427 adap->tids.natids = min(adap->tids.ntids / 2, MAX_ATIDS);
4428 adap->tids.stid_base = val[1];
4429 adap->tids.nstids = val[2] - val[1] + 1;
636f9d37 4430 /*
dbedd44e 4431 * Setup server filter region. Divide the available filter
636f9d37
VP
4432 * region into two parts. Regular filters get 1/3rd and server
4433 * filters get 2/3rd part. This is only enabled if workarond
4434 * path is enabled.
4435 * 1. For regular filters.
4436 * 2. Server filter: This are special filters which are used
4437 * to redirect SYN packets to offload queue.
4438 */
4439 if (adap->flags & FW_OFLD_CONN && !is_bypass(adap)) {
4440 adap->tids.sftid_base = adap->tids.ftid_base +
4441 DIV_ROUND_UP(adap->tids.nftids, 3);
4442 adap->tids.nsftids = adap->tids.nftids -
4443 DIV_ROUND_UP(adap->tids.nftids, 3);
4444 adap->tids.nftids = adap->tids.sftid_base -
4445 adap->tids.ftid_base;
4446 }
b8ff05a9
DM
4447 adap->vres.ddp.start = val[3];
4448 adap->vres.ddp.size = val[4] - val[3] + 1;
4449 adap->params.ofldq_wr_cred = val[5];
636f9d37 4450
5c31254e 4451 if (caps_cmd.niccaps & htons(FW_CAPS_CONFIG_NIC_HASHFILTER)) {
004c3cf1
WY
4452 ret = init_hash_filter(adap);
4453 if (ret < 0)
5c31254e
KS
4454 goto bye;
4455 } else {
4456 adap->params.offload = 1;
4457 adap->num_ofld_uld += 1;
4458 }
b8ff05a9 4459 }
636f9d37 4460 if (caps_cmd.rdmacaps) {
b8ff05a9
DM
4461 params[0] = FW_PARAM_PFVF(STAG_START);
4462 params[1] = FW_PARAM_PFVF(STAG_END);
4463 params[2] = FW_PARAM_PFVF(RQ_START);
4464 params[3] = FW_PARAM_PFVF(RQ_END);
4465 params[4] = FW_PARAM_PFVF(PBL_START);
4466 params[5] = FW_PARAM_PFVF(PBL_END);
b2612722 4467 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 6,
636f9d37 4468 params, val);
b8ff05a9
DM
4469 if (ret < 0)
4470 goto bye;
4471 adap->vres.stag.start = val[0];
4472 adap->vres.stag.size = val[1] - val[0] + 1;
4473 adap->vres.rq.start = val[2];
4474 adap->vres.rq.size = val[3] - val[2] + 1;
4475 adap->vres.pbl.start = val[4];
4476 adap->vres.pbl.size = val[5] - val[4] + 1;
a0881cab 4477
c68644ef
RR
4478 params[0] = FW_PARAM_PFVF(SRQ_START);
4479 params[1] = FW_PARAM_PFVF(SRQ_END);
4480 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2,
4481 params, val);
4482 if (!ret) {
4483 adap->vres.srq.start = val[0];
4484 adap->vres.srq.size = val[1] - val[0] + 1;
4485 }
4486 if (adap->vres.srq.size) {
4487 adap->srq = t4_init_srq(adap->vres.srq.size);
4488 if (!adap->srq)
4489 dev_warn(&adap->pdev->dev, "could not allocate SRQ, continuing\n");
4490 }
4491
a0881cab
DM
4492 params[0] = FW_PARAM_PFVF(SQRQ_START);
4493 params[1] = FW_PARAM_PFVF(SQRQ_END);
4494 params[2] = FW_PARAM_PFVF(CQ_START);
4495 params[3] = FW_PARAM_PFVF(CQ_END);
1ae970e0
DM
4496 params[4] = FW_PARAM_PFVF(OCQ_START);
4497 params[5] = FW_PARAM_PFVF(OCQ_END);
b2612722 4498 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 6, params,
5c937dd3 4499 val);
a0881cab
DM
4500 if (ret < 0)
4501 goto bye;
4502 adap->vres.qp.start = val[0];
4503 adap->vres.qp.size = val[1] - val[0] + 1;
4504 adap->vres.cq.start = val[2];
4505 adap->vres.cq.size = val[3] - val[2] + 1;
1ae970e0
DM
4506 adap->vres.ocq.start = val[4];
4507 adap->vres.ocq.size = val[5] - val[4] + 1;
4c2c5763
HS
4508
4509 params[0] = FW_PARAM_DEV(MAXORDIRD_QP);
4510 params[1] = FW_PARAM_DEV(MAXIRD_ADAPTER);
b2612722 4511 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params,
5c937dd3 4512 val);
4c2c5763
HS
4513 if (ret < 0) {
4514 adap->params.max_ordird_qp = 8;
4515 adap->params.max_ird_adapter = 32 * adap->tids.ntids;
4516 ret = 0;
4517 } else {
4518 adap->params.max_ordird_qp = val[0];
4519 adap->params.max_ird_adapter = val[1];
4520 }
4521 dev_info(adap->pdev_dev,
4522 "max_ordird_qp %d max_ird_adapter %d\n",
4523 adap->params.max_ordird_qp,
4524 adap->params.max_ird_adapter);
43db9296
RR
4525
4526 /* Enable write_with_immediate if FW supports it */
4527 params[0] = FW_PARAM_DEV(RDMA_WRITE_WITH_IMM);
4528 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, params,
4529 val);
4530 adap->params.write_w_imm_support = (ret == 0 && val[0] != 0);
f3910c62
RR
4531
4532 /* Enable write_cmpl if FW supports it */
4533 params[0] = FW_PARAM_DEV(RI_WRITE_CMPL_WR);
4534 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, params,
4535 val);
4536 adap->params.write_cmpl_support = (ret == 0 && val[0] != 0);
0fbc81b3 4537 adap->num_ofld_uld += 2;
b8ff05a9 4538 }
636f9d37 4539 if (caps_cmd.iscsicaps) {
b8ff05a9
DM
4540 params[0] = FW_PARAM_PFVF(ISCSI_START);
4541 params[1] = FW_PARAM_PFVF(ISCSI_END);
b2612722 4542 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2,
636f9d37 4543 params, val);
b8ff05a9
DM
4544 if (ret < 0)
4545 goto bye;
4546 adap->vres.iscsi.start = val[0];
4547 adap->vres.iscsi.size = val[1] - val[0] + 1;
0fbc81b3
HS
4548 /* LIO target and cxgb4i initiaitor */
4549 adap->num_ofld_uld += 2;
b8ff05a9 4550 }
94cdb8bb 4551 if (caps_cmd.cryptocaps) {
e383f248
AG
4552 if (ntohs(caps_cmd.cryptocaps) &
4553 FW_CAPS_CONFIG_CRYPTO_LOOKASIDE) {
4554 params[0] = FW_PARAM_PFVF(NCRYPTO_LOOKASIDE);
4555 ret = t4_query_params(adap, adap->mbox, adap->pf, 0,
4556 2, params, val);
4557 if (ret < 0) {
4558 if (ret != -EINVAL)
4559 goto bye;
4560 } else {
4561 adap->vres.ncrypto_fc = val[0];
4562 }
4563 adap->num_ofld_uld += 1;
4564 }
4565 if (ntohs(caps_cmd.cryptocaps) &
4566 FW_CAPS_CONFIG_TLS_INLINE) {
4567 params[0] = FW_PARAM_PFVF(TLS_START);
4568 params[1] = FW_PARAM_PFVF(TLS_END);
4569 ret = t4_query_params(adap, adap->mbox, adap->pf, 0,
4570 2, params, val);
4571 if (ret < 0)
72a56ca9 4572 goto bye;
e383f248
AG
4573 adap->vres.key.start = val[0];
4574 adap->vres.key.size = val[1] - val[0] + 1;
4575 adap->num_uld += 1;
72a56ca9 4576 }
a6ec572b 4577 adap->params.crypto = ntohs(caps_cmd.cryptocaps);
94cdb8bb 4578 }
b8ff05a9
DM
4579#undef FW_PARAM_PFVF
4580#undef FW_PARAM_DEV
4581
92e7ae71
HS
4582 /* The MTU/MSS Table is initialized by now, so load their values. If
4583 * we're initializing the adapter, then we'll make any modifications
4584 * we want to the MTU/MSS Table and also initialize the congestion
4585 * parameters.
636f9d37 4586 */
b8ff05a9 4587 t4_read_mtu_tbl(adap, adap->params.mtus, NULL);
92e7ae71
HS
4588 if (state != DEV_STATE_INIT) {
4589 int i;
4590
4591 /* The default MTU Table contains values 1492 and 1500.
4592 * However, for TCP, it's better to have two values which are
4593 * a multiple of 8 +/- 4 bytes apart near this popular MTU.
4594 * This allows us to have a TCP Data Payload which is a
4595 * multiple of 8 regardless of what combination of TCP Options
4596 * are in use (always a multiple of 4 bytes) which is
4597 * important for performance reasons. For instance, if no
4598 * options are in use, then we have a 20-byte IP header and a
4599 * 20-byte TCP header. In this case, a 1500-byte MSS would
4600 * result in a TCP Data Payload of 1500 - 40 == 1460 bytes
4601 * which is not a multiple of 8. So using an MSS of 1488 in
4602 * this case results in a TCP Data Payload of 1448 bytes which
4603 * is a multiple of 8. On the other hand, if 12-byte TCP Time
4604 * Stamps have been negotiated, then an MTU of 1500 bytes
4605 * results in a TCP Data Payload of 1448 bytes which, as
4606 * above, is a multiple of 8 bytes ...
4607 */
4608 for (i = 0; i < NMTUS; i++)
4609 if (adap->params.mtus[i] == 1492) {
4610 adap->params.mtus[i] = 1488;
4611 break;
4612 }
7ee9ff94 4613
92e7ae71
HS
4614 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
4615 adap->params.b_wnd);
4616 }
df64e4d3 4617 t4_init_sge_params(adap);
636f9d37 4618 adap->flags |= FW_OK;
5ccf9d04 4619 t4_init_tp_params(adap, true);
b8ff05a9
DM
4620 return 0;
4621
4622 /*
636f9d37
VP
4623 * Something bad happened. If a command timed out or failed with EIO
4624 * FW does not operate within its spec or something catastrophic
4625 * happened to HW/FW, stop issuing commands.
b8ff05a9 4626 */
636f9d37 4627bye:
8b4e6b3c 4628 adap_free_hma_mem(adap);
4b8e27a8
HS
4629 kfree(adap->sge.egr_map);
4630 kfree(adap->sge.ingr_map);
4631 kfree(adap->sge.starving_fl);
4632 kfree(adap->sge.txq_maperr);
5b377d11
HS
4633#ifdef CONFIG_DEBUG_FS
4634 kfree(adap->sge.blocked_fl);
4635#endif
636f9d37
VP
4636 if (ret != -ETIMEDOUT && ret != -EIO)
4637 t4_fw_bye(adap, adap->mbox);
b8ff05a9
DM
4638 return ret;
4639}
4640
204dc3c0
DM
4641/* EEH callbacks */
4642
4643static pci_ers_result_t eeh_err_detected(struct pci_dev *pdev,
4644 pci_channel_state_t state)
4645{
4646 int i;
4647 struct adapter *adap = pci_get_drvdata(pdev);
4648
4649 if (!adap)
4650 goto out;
4651
4652 rtnl_lock();
4653 adap->flags &= ~FW_OK;
4654 notify_ulds(adap, CXGB4_STATE_START_RECOVERY);
9fe6cb58 4655 spin_lock(&adap->stats_lock);
204dc3c0
DM
4656 for_each_port(adap, i) {
4657 struct net_device *dev = adap->port[i];
025d0973
GP
4658 if (dev) {
4659 netif_device_detach(dev);
4660 netif_carrier_off(dev);
4661 }
204dc3c0 4662 }
9fe6cb58 4663 spin_unlock(&adap->stats_lock);
b37987e8 4664 disable_interrupts(adap);
204dc3c0
DM
4665 if (adap->flags & FULL_INIT_DONE)
4666 cxgb_down(adap);
4667 rtnl_unlock();
144be3d9
GS
4668 if ((adap->flags & DEV_ENABLED)) {
4669 pci_disable_device(pdev);
4670 adap->flags &= ~DEV_ENABLED;
4671 }
204dc3c0
DM
4672out: return state == pci_channel_io_perm_failure ?
4673 PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET;
4674}
4675
4676static pci_ers_result_t eeh_slot_reset(struct pci_dev *pdev)
4677{
4678 int i, ret;
4679 struct fw_caps_config_cmd c;
4680 struct adapter *adap = pci_get_drvdata(pdev);
4681
4682 if (!adap) {
4683 pci_restore_state(pdev);
4684 pci_save_state(pdev);
4685 return PCI_ERS_RESULT_RECOVERED;
4686 }
4687
144be3d9
GS
4688 if (!(adap->flags & DEV_ENABLED)) {
4689 if (pci_enable_device(pdev)) {
4690 dev_err(&pdev->dev, "Cannot reenable PCI "
4691 "device after reset\n");
4692 return PCI_ERS_RESULT_DISCONNECT;
4693 }
4694 adap->flags |= DEV_ENABLED;
204dc3c0
DM
4695 }
4696
4697 pci_set_master(pdev);
4698 pci_restore_state(pdev);
4699 pci_save_state(pdev);
4700 pci_cleanup_aer_uncorrect_error_status(pdev);
4701
8203b509 4702 if (t4_wait_dev_ready(adap->regs) < 0)
204dc3c0 4703 return PCI_ERS_RESULT_DISCONNECT;
b2612722 4704 if (t4_fw_hello(adap, adap->mbox, adap->pf, MASTER_MUST, NULL) < 0)
204dc3c0
DM
4705 return PCI_ERS_RESULT_DISCONNECT;
4706 adap->flags |= FW_OK;
4707 if (adap_init1(adap, &c))
4708 return PCI_ERS_RESULT_DISCONNECT;
4709
4710 for_each_port(adap, i) {
4711 struct port_info *p = adap2pinfo(adap, i);
4712
b2612722 4713 ret = t4_alloc_vi(adap, adap->mbox, p->tx_chan, adap->pf, 0, 1,
060e0c75 4714 NULL, NULL);
204dc3c0
DM
4715 if (ret < 0)
4716 return PCI_ERS_RESULT_DISCONNECT;
4717 p->viid = ret;
4718 p->xact_addr_filt = -1;
4719 }
4720
4721 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
4722 adap->params.b_wnd);
1ae970e0 4723 setup_memwin(adap);
204dc3c0
DM
4724 if (cxgb_up(adap))
4725 return PCI_ERS_RESULT_DISCONNECT;
4726 return PCI_ERS_RESULT_RECOVERED;
4727}
4728
4729static void eeh_resume(struct pci_dev *pdev)
4730{
4731 int i;
4732 struct adapter *adap = pci_get_drvdata(pdev);
4733
4734 if (!adap)
4735 return;
4736
4737 rtnl_lock();
4738 for_each_port(adap, i) {
4739 struct net_device *dev = adap->port[i];
025d0973
GP
4740 if (dev) {
4741 if (netif_running(dev)) {
4742 link_start(dev);
4743 cxgb_set_rxmode(dev);
4744 }
4745 netif_device_attach(dev);
204dc3c0 4746 }
204dc3c0
DM
4747 }
4748 rtnl_unlock();
4749}
4750
3646f0e5 4751static const struct pci_error_handlers cxgb4_eeh = {
204dc3c0
DM
4752 .error_detected = eeh_err_detected,
4753 .slot_reset = eeh_slot_reset,
4754 .resume = eeh_resume,
4755};
4756
9b86a8d1
HS
4757/* Return true if the Link Configuration supports "High Speeds" (those greater
4758 * than 1Gb/s).
4759 */
57d8b764 4760static inline bool is_x_10g_port(const struct link_config *lc)
b8ff05a9 4761{
9b86a8d1
HS
4762 unsigned int speeds, high_speeds;
4763
c3168cab
GG
4764 speeds = FW_PORT_CAP32_SPEED_V(FW_PORT_CAP32_SPEED_G(lc->pcaps));
4765 high_speeds = speeds &
4766 ~(FW_PORT_CAP32_SPEED_100M | FW_PORT_CAP32_SPEED_1G);
9b86a8d1
HS
4767
4768 return high_speeds != 0;
b8ff05a9
DM
4769}
4770
b8ff05a9
DM
4771/*
4772 * Perform default configuration of DMA queues depending on the number and type
4773 * of ports we found and the number of available CPUs. Most settings can be
4774 * modified by the admin prior to actual use.
4775 */
91744948 4776static void cfg_queues(struct adapter *adap)
b8ff05a9
DM
4777{
4778 struct sge *s = &adap->sge;
ab677ff4 4779 int i = 0, n10g = 0, qidx = 0;
688848b1
AB
4780#ifndef CONFIG_CHELSIO_T4_DCB
4781 int q10g = 0;
4782#endif
b8ff05a9 4783
94cdb8bb
HS
4784 /* Reduce memory usage in kdump environment, disable all offload.
4785 */
85eacf3f 4786 if (is_kdump_kernel() || (is_uld(adap) && t4_uld_mem_alloc(adap))) {
0fbc81b3 4787 adap->params.offload = 0;
94cdb8bb
HS
4788 adap->params.crypto = 0;
4789 }
4790
ab677ff4 4791 n10g += is_x_10g_port(&adap2pinfo(adap, i)->link_cfg);
688848b1
AB
4792#ifdef CONFIG_CHELSIO_T4_DCB
4793 /* For Data Center Bridging support we need to be able to support up
4794 * to 8 Traffic Priorities; each of which will be assigned to its
4795 * own TX Queue in order to prevent Head-Of-Line Blocking.
4796 */
4797 if (adap->params.nports * 8 > MAX_ETH_QSETS) {
4798 dev_err(adap->pdev_dev, "MAX_ETH_QSETS=%d < %d!\n",
4799 MAX_ETH_QSETS, adap->params.nports * 8);
4800 BUG_ON(1);
4801 }
b8ff05a9 4802
688848b1
AB
4803 for_each_port(adap, i) {
4804 struct port_info *pi = adap2pinfo(adap, i);
4805
4806 pi->first_qset = qidx;
85eacf3f 4807 pi->nqsets = is_kdump_kernel() ? 1 : 8;
688848b1
AB
4808 qidx += pi->nqsets;
4809 }
4810#else /* !CONFIG_CHELSIO_T4_DCB */
b8ff05a9
DM
4811 /*
4812 * We default to 1 queue per non-10G port and up to # of cores queues
4813 * per 10G port.
4814 */
4815 if (n10g)
4816 q10g = (MAX_ETH_QSETS - (adap->params.nports - n10g)) / n10g;
5952dde7
YM
4817 if (q10g > netif_get_num_default_rss_queues())
4818 q10g = netif_get_num_default_rss_queues();
b8ff05a9 4819
85eacf3f
GG
4820 if (is_kdump_kernel())
4821 q10g = 1;
4822
b8ff05a9
DM
4823 for_each_port(adap, i) {
4824 struct port_info *pi = adap2pinfo(adap, i);
4825
4826 pi->first_qset = qidx;
57d8b764 4827 pi->nqsets = is_x_10g_port(&pi->link_cfg) ? q10g : 1;
b8ff05a9
DM
4828 qidx += pi->nqsets;
4829 }
688848b1 4830#endif /* !CONFIG_CHELSIO_T4_DCB */
b8ff05a9
DM
4831
4832 s->ethqsets = qidx;
4833 s->max_ethqsets = qidx; /* MSI-X may lower it later */
4834
0fbc81b3 4835 if (is_uld(adap)) {
b8ff05a9
DM
4836 /*
4837 * For offload we use 1 queue/channel if all ports are up to 1G,
4838 * otherwise we divide all available queues amongst the channels
4839 * capped by the number of available cores.
4840 */
4841 if (n10g) {
a56177e1 4842 i = min_t(int, MAX_OFLD_QSETS, num_online_cpus());
0fbc81b3
HS
4843 s->ofldqsets = roundup(i, adap->params.nports);
4844 } else {
4845 s->ofldqsets = adap->params.nports;
4846 }
b8ff05a9
DM
4847 }
4848
4849 for (i = 0; i < ARRAY_SIZE(s->ethrxq); i++) {
4850 struct sge_eth_rxq *r = &s->ethrxq[i];
4851
c887ad0e 4852 init_rspq(adap, &r->rspq, 5, 10, 1024, 64);
b8ff05a9
DM
4853 r->fl.size = 72;
4854 }
4855
4856 for (i = 0; i < ARRAY_SIZE(s->ethtxq); i++)
4857 s->ethtxq[i].q.size = 1024;
4858
4859 for (i = 0; i < ARRAY_SIZE(s->ctrlq); i++)
4860 s->ctrlq[i].q.size = 512;
4861
a4569504
AG
4862 if (!is_t4(adap->params.chip))
4863 s->ptptxq.q.size = 8;
4864
c887ad0e 4865 init_rspq(adap, &s->fw_evtq, 0, 1, 1024, 64);
0fbc81b3 4866 init_rspq(adap, &s->intrq, 0, 1, 512, 64);
b8ff05a9
DM
4867}
4868
4869/*
4870 * Reduce the number of Ethernet queues across all ports to at most n.
4871 * n provides at least one queue per port.
4872 */
91744948 4873static void reduce_ethqs(struct adapter *adap, int n)
b8ff05a9
DM
4874{
4875 int i;
4876 struct port_info *pi;
4877
4878 while (n < adap->sge.ethqsets)
4879 for_each_port(adap, i) {
4880 pi = adap2pinfo(adap, i);
4881 if (pi->nqsets > 1) {
4882 pi->nqsets--;
4883 adap->sge.ethqsets--;
4884 if (adap->sge.ethqsets <= n)
4885 break;
4886 }
4887 }
4888
4889 n = 0;
4890 for_each_port(adap, i) {
4891 pi = adap2pinfo(adap, i);
4892 pi->first_qset = n;
4893 n += pi->nqsets;
4894 }
4895}
4896
94cdb8bb
HS
4897static int get_msix_info(struct adapter *adap)
4898{
4899 struct uld_msix_info *msix_info;
0fbc81b3
HS
4900 unsigned int max_ingq = 0;
4901
4902 if (is_offload(adap))
4903 max_ingq += MAX_OFLD_QSETS * adap->num_ofld_uld;
4904 if (is_pci_uld(adap))
4905 max_ingq += MAX_OFLD_QSETS * adap->num_uld;
4906
4907 if (!max_ingq)
4908 goto out;
94cdb8bb
HS
4909
4910 msix_info = kcalloc(max_ingq, sizeof(*msix_info), GFP_KERNEL);
4911 if (!msix_info)
4912 return -ENOMEM;
4913
4914 adap->msix_bmap_ulds.msix_bmap = kcalloc(BITS_TO_LONGS(max_ingq),
4915 sizeof(long), GFP_KERNEL);
4916 if (!adap->msix_bmap_ulds.msix_bmap) {
4917 kfree(msix_info);
4918 return -ENOMEM;
4919 }
4920 spin_lock_init(&adap->msix_bmap_ulds.lock);
4921 adap->msix_info_ulds = msix_info;
0fbc81b3 4922out:
94cdb8bb
HS
4923 return 0;
4924}
4925
4926static void free_msix_info(struct adapter *adap)
4927{
0fbc81b3 4928 if (!(adap->num_uld && adap->num_ofld_uld))
94cdb8bb
HS
4929 return;
4930
4931 kfree(adap->msix_info_ulds);
4932 kfree(adap->msix_bmap_ulds.msix_bmap);
4933}
4934
b8ff05a9
DM
4935/* 2 MSI-X vectors needed for the FW queue and non-data interrupts */
4936#define EXTRA_VECS 2
4937
91744948 4938static int enable_msix(struct adapter *adap)
b8ff05a9 4939{
94cdb8bb
HS
4940 int ofld_need = 0, uld_need = 0;
4941 int i, j, want, need, allocated;
b8ff05a9
DM
4942 struct sge *s = &adap->sge;
4943 unsigned int nchan = adap->params.nports;
f36e58e5 4944 struct msix_entry *entries;
94cdb8bb 4945 int max_ingq = MAX_INGQ;
f36e58e5 4946
0fbc81b3
HS
4947 if (is_pci_uld(adap))
4948 max_ingq += (MAX_OFLD_QSETS * adap->num_uld);
4949 if (is_offload(adap))
4950 max_ingq += (MAX_OFLD_QSETS * adap->num_ofld_uld);
94cdb8bb 4951 entries = kmalloc(sizeof(*entries) * (max_ingq + 1),
f36e58e5
HS
4952 GFP_KERNEL);
4953 if (!entries)
4954 return -ENOMEM;
b8ff05a9 4955
94cdb8bb 4956 /* map for msix */
0fbc81b3
HS
4957 if (get_msix_info(adap)) {
4958 adap->params.offload = 0;
94cdb8bb 4959 adap->params.crypto = 0;
0fbc81b3 4960 }
94cdb8bb
HS
4961
4962 for (i = 0; i < max_ingq + 1; ++i)
b8ff05a9
DM
4963 entries[i].entry = i;
4964
4965 want = s->max_ethqsets + EXTRA_VECS;
4966 if (is_offload(adap)) {
0fbc81b3
HS
4967 want += adap->num_ofld_uld * s->ofldqsets;
4968 ofld_need = adap->num_ofld_uld * nchan;
b8ff05a9 4969 }
94cdb8bb 4970 if (is_pci_uld(adap)) {
0fbc81b3
HS
4971 want += adap->num_uld * s->ofldqsets;
4972 uld_need = adap->num_uld * nchan;
94cdb8bb 4973 }
688848b1
AB
4974#ifdef CONFIG_CHELSIO_T4_DCB
4975 /* For Data Center Bridging we need 8 Ethernet TX Priority Queues for
4976 * each port.
4977 */
94cdb8bb 4978 need = 8 * adap->params.nports + EXTRA_VECS + ofld_need + uld_need;
688848b1 4979#else
94cdb8bb 4980 need = adap->params.nports + EXTRA_VECS + ofld_need + uld_need;
688848b1 4981#endif
f36e58e5
HS
4982 allocated = pci_enable_msix_range(adap->pdev, entries, need, want);
4983 if (allocated < 0) {
4984 dev_info(adap->pdev_dev, "not enough MSI-X vectors left,"
4985 " not using MSI-X\n");
4986 kfree(entries);
4987 return allocated;
4988 }
b8ff05a9 4989
f36e58e5 4990 /* Distribute available vectors to the various queue groups.
c32ad224
AG
4991 * Every group gets its minimum requirement and NIC gets top
4992 * priority for leftovers.
4993 */
94cdb8bb 4994 i = allocated - EXTRA_VECS - ofld_need - uld_need;
c32ad224
AG
4995 if (i < s->max_ethqsets) {
4996 s->max_ethqsets = i;
4997 if (i < s->ethqsets)
4998 reduce_ethqs(adap, i);
4999 }
0fbc81b3 5000 if (is_uld(adap)) {
94cdb8bb
HS
5001 if (allocated < want)
5002 s->nqs_per_uld = nchan;
5003 else
0fbc81b3 5004 s->nqs_per_uld = s->ofldqsets;
94cdb8bb
HS
5005 }
5006
0fbc81b3 5007 for (i = 0; i < (s->max_ethqsets + EXTRA_VECS); ++i)
c32ad224 5008 adap->msix_info[i].vec = entries[i].vector;
0fbc81b3
HS
5009 if (is_uld(adap)) {
5010 for (j = 0 ; i < allocated; ++i, j++) {
94cdb8bb 5011 adap->msix_info_ulds[j].vec = entries[i].vector;
0fbc81b3
HS
5012 adap->msix_info_ulds[j].idx = i;
5013 }
94cdb8bb
HS
5014 adap->msix_bmap_ulds.mapsize = j;
5015 }
43eb4e82 5016 dev_info(adap->pdev_dev, "%d MSI-X vectors allocated, "
0fbc81b3
HS
5017 "nic %d per uld %d\n",
5018 allocated, s->max_ethqsets, s->nqs_per_uld);
c32ad224 5019
f36e58e5 5020 kfree(entries);
c32ad224 5021 return 0;
b8ff05a9
DM
5022}
5023
5024#undef EXTRA_VECS
5025
91744948 5026static int init_rss(struct adapter *adap)
671b0060 5027{
c035e183
HS
5028 unsigned int i;
5029 int err;
5030
5031 err = t4_init_rss_mode(adap, adap->mbox);
5032 if (err)
5033 return err;
671b0060
DM
5034
5035 for_each_port(adap, i) {
5036 struct port_info *pi = adap2pinfo(adap, i);
5037
5038 pi->rss = kcalloc(pi->rss_size, sizeof(u16), GFP_KERNEL);
5039 if (!pi->rss)
5040 return -ENOMEM;
671b0060
DM
5041 }
5042 return 0;
5043}
5044
547fd272
HS
5045static int cxgb4_get_pcie_dev_link_caps(struct adapter *adap,
5046 enum pci_bus_speed *speed,
5047 enum pcie_link_width *width)
5048{
5049 u32 lnkcap1, lnkcap2;
5050 int err1, err2;
5051
5052#define PCIE_MLW_CAP_SHIFT 4 /* start of MLW mask in link capabilities */
5053
5054 *speed = PCI_SPEED_UNKNOWN;
5055 *width = PCIE_LNK_WIDTH_UNKNOWN;
5056
5057 err1 = pcie_capability_read_dword(adap->pdev, PCI_EXP_LNKCAP,
5058 &lnkcap1);
5059 err2 = pcie_capability_read_dword(adap->pdev, PCI_EXP_LNKCAP2,
5060 &lnkcap2);
5061 if (!err2 && lnkcap2) { /* PCIe r3.0-compliant */
5062 if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_8_0GB)
5063 *speed = PCIE_SPEED_8_0GT;
5064 else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_5_0GB)
5065 *speed = PCIE_SPEED_5_0GT;
5066 else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_2_5GB)
5067 *speed = PCIE_SPEED_2_5GT;
5068 }
5069 if (!err1) {
5070 *width = (lnkcap1 & PCI_EXP_LNKCAP_MLW) >> PCIE_MLW_CAP_SHIFT;
5071 if (!lnkcap2) { /* pre-r3.0 */
5072 if (lnkcap1 & PCI_EXP_LNKCAP_SLS_5_0GB)
5073 *speed = PCIE_SPEED_5_0GT;
5074 else if (lnkcap1 & PCI_EXP_LNKCAP_SLS_2_5GB)
5075 *speed = PCIE_SPEED_2_5GT;
5076 }
5077 }
5078
5079 if (*speed == PCI_SPEED_UNKNOWN || *width == PCIE_LNK_WIDTH_UNKNOWN)
5080 return err1 ? err1 : err2 ? err2 : -EINVAL;
5081 return 0;
5082}
5083
5084static void cxgb4_check_pcie_caps(struct adapter *adap)
5085{
5086 enum pcie_link_width width, width_cap;
5087 enum pci_bus_speed speed, speed_cap;
5088
5089#define PCIE_SPEED_STR(speed) \
5090 (speed == PCIE_SPEED_8_0GT ? "8.0GT/s" : \
5091 speed == PCIE_SPEED_5_0GT ? "5.0GT/s" : \
5092 speed == PCIE_SPEED_2_5GT ? "2.5GT/s" : \
5093 "Unknown")
5094
5095 if (cxgb4_get_pcie_dev_link_caps(adap, &speed_cap, &width_cap)) {
5096 dev_warn(adap->pdev_dev,
5097 "Unable to determine PCIe device BW capabilities\n");
5098 return;
5099 }
5100
5101 if (pcie_get_minimum_link(adap->pdev, &speed, &width) ||
5102 speed == PCI_SPEED_UNKNOWN || width == PCIE_LNK_WIDTH_UNKNOWN) {
5103 dev_warn(adap->pdev_dev,
5104 "Unable to determine PCI Express bandwidth.\n");
5105 return;
5106 }
5107
5108 dev_info(adap->pdev_dev, "PCIe link speed is %s, device supports %s\n",
5109 PCIE_SPEED_STR(speed), PCIE_SPEED_STR(speed_cap));
5110 dev_info(adap->pdev_dev, "PCIe link width is x%d, device supports x%d\n",
5111 width, width_cap);
5112 if (speed < speed_cap || width < width_cap)
5113 dev_info(adap->pdev_dev,
5114 "A slot with more lanes and/or higher speed is "
5115 "suggested for optimal performance.\n");
5116}
5117
0de72738
HS
5118/* Dump basic information about the adapter */
5119static void print_adapter_info(struct adapter *adapter)
5120{
760446f9
GG
5121 /* Hardware/Firmware/etc. Version/Revision IDs */
5122 t4_dump_version_info(adapter);
0de72738
HS
5123
5124 /* Software/Hardware configuration */
5125 dev_info(adapter->pdev_dev, "Configuration: %sNIC %s, %s capable\n",
5126 is_offload(adapter) ? "R" : "",
5127 ((adapter->flags & USING_MSIX) ? "MSI-X" :
5128 (adapter->flags & USING_MSI) ? "MSI" : ""),
5129 is_offload(adapter) ? "Offload" : "non-Offload");
5130}
5131
91744948 5132static void print_port_info(const struct net_device *dev)
b8ff05a9 5133{
b8ff05a9 5134 char buf[80];
118969ed 5135 char *bufp = buf;
f1a051b9 5136 const char *spd = "";
118969ed
DM
5137 const struct port_info *pi = netdev_priv(dev);
5138 const struct adapter *adap = pi->adapter;
f1a051b9
DM
5139
5140 if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_2_5GB)
5141 spd = " 2.5 GT/s";
5142 else if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_5_0GB)
5143 spd = " 5 GT/s";
d2e752db
RD
5144 else if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_8_0GB)
5145 spd = " 8 GT/s";
b8ff05a9 5146
c3168cab 5147 if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_100M)
5e78f7fd 5148 bufp += sprintf(bufp, "100M/");
c3168cab 5149 if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_1G)
5e78f7fd 5150 bufp += sprintf(bufp, "1G/");
c3168cab 5151 if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_10G)
118969ed 5152 bufp += sprintf(bufp, "10G/");
c3168cab 5153 if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_25G)
9b86a8d1 5154 bufp += sprintf(bufp, "25G/");
c3168cab 5155 if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_40G)
72aca4bf 5156 bufp += sprintf(bufp, "40G/");
c3168cab
GG
5157 if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_50G)
5158 bufp += sprintf(bufp, "50G/");
5159 if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_100G)
9b86a8d1 5160 bufp += sprintf(bufp, "100G/");
c3168cab
GG
5161 if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_200G)
5162 bufp += sprintf(bufp, "200G/");
5163 if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_400G)
5164 bufp += sprintf(bufp, "400G/");
118969ed
DM
5165 if (bufp != buf)
5166 --bufp;
72aca4bf 5167 sprintf(bufp, "BASE-%s", t4_get_port_type_description(pi->port_type));
118969ed 5168
0de72738
HS
5169 netdev_info(dev, "%s: Chelsio %s (%s) %s\n",
5170 dev->name, adap->params.vpd.id, adap->name, buf);
b8ff05a9
DM
5171}
5172
06546391
DM
5173/*
5174 * Free the following resources:
5175 * - memory used for tables
5176 * - MSI/MSI-X
5177 * - net devices
5178 * - resources FW is holding for us
5179 */
5180static void free_some_resources(struct adapter *adapter)
5181{
5182 unsigned int i;
5183
3bdb376e 5184 kvfree(adapter->smt);
752ade68 5185 kvfree(adapter->l2t);
c68644ef 5186 kvfree(adapter->srq);
b72a32da 5187 t4_cleanup_sched(adapter);
752ade68 5188 kvfree(adapter->tids.tid_tab);
e0f911c8 5189 cxgb4_cleanup_tc_flower(adapter);
d8931847 5190 cxgb4_cleanup_tc_u32(adapter);
4b8e27a8
HS
5191 kfree(adapter->sge.egr_map);
5192 kfree(adapter->sge.ingr_map);
5193 kfree(adapter->sge.starving_fl);
5194 kfree(adapter->sge.txq_maperr);
5b377d11
HS
5195#ifdef CONFIG_DEBUG_FS
5196 kfree(adapter->sge.blocked_fl);
5197#endif
06546391
DM
5198 disable_msi(adapter);
5199
5200 for_each_port(adapter, i)
671b0060 5201 if (adapter->port[i]) {
4f3a0fcf
HS
5202 struct port_info *pi = adap2pinfo(adapter, i);
5203
5204 if (pi->viid != 0)
5205 t4_free_vi(adapter, adapter->mbox, adapter->pf,
5206 0, pi->viid);
671b0060 5207 kfree(adap2pinfo(adapter, i)->rss);
06546391 5208 free_netdev(adapter->port[i]);
671b0060 5209 }
06546391 5210 if (adapter->flags & FW_OK)
b2612722 5211 t4_fw_bye(adapter, adapter->pf);
06546391
DM
5212}
5213
2ed28baa 5214#define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
35d35682 5215#define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \
b8ff05a9 5216 NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
22adfe0a 5217#define SEGMENT_SIZE 128
b8ff05a9 5218
d86bd29e
HS
5219static int get_chip_type(struct pci_dev *pdev, u32 pl_rev)
5220{
d86bd29e
HS
5221 u16 device_id;
5222
5223 /* Retrieve adapter's device ID */
5224 pci_read_config_word(pdev, PCI_DEVICE_ID, &device_id);
46cdc9be 5225
5226 switch (device_id >> 12) {
d86bd29e 5227 case CHELSIO_T4:
46cdc9be 5228 return CHELSIO_CHIP_CODE(CHELSIO_T4, pl_rev);
d86bd29e 5229 case CHELSIO_T5:
46cdc9be 5230 return CHELSIO_CHIP_CODE(CHELSIO_T5, pl_rev);
d86bd29e 5231 case CHELSIO_T6:
46cdc9be 5232 return CHELSIO_CHIP_CODE(CHELSIO_T6, pl_rev);
d86bd29e
HS
5233 default:
5234 dev_err(&pdev->dev, "Device %d is not supported\n",
5235 device_id);
d86bd29e 5236 }
46cdc9be 5237 return -EINVAL;
d86bd29e
HS
5238}
5239
b6244201 5240#ifdef CONFIG_PCI_IOV
baf50868 5241static void cxgb4_mgmt_setup(struct net_device *dev)
e7b48a32
HS
5242{
5243 dev->type = ARPHRD_NONE;
5244 dev->mtu = 0;
5245 dev->hard_header_len = 0;
5246 dev->addr_len = 0;
5247 dev->tx_queue_len = 0;
5248 dev->flags |= IFF_NOARP;
5249 dev->priv_flags |= IFF_NO_QUEUE;
5250
5251 /* Initialize the device structure. */
5252 dev->netdev_ops = &cxgb4_mgmt_netdev_ops;
5253 dev->ethtool_ops = &cxgb4_mgmt_ethtool_ops;
e7b48a32
HS
5254}
5255
b6244201
HS
5256static int cxgb4_iov_configure(struct pci_dev *pdev, int num_vfs)
5257{
7829451c 5258 struct adapter *adap = pci_get_drvdata(pdev);
b6244201
HS
5259 int err = 0;
5260 int current_vfs = pci_num_vf(pdev);
5261 u32 pcie_fw;
b6244201 5262
7829451c 5263 pcie_fw = readl(adap->regs + PCIE_FW_A);
b6244201 5264 /* Check if cxgb4 is the MASTER and fw is initialized */
c4e43e14
GG
5265 if (num_vfs &&
5266 (!(pcie_fw & PCIE_FW_INIT_F) ||
b6244201 5267 !(pcie_fw & PCIE_FW_MASTER_VLD_F) ||
c4e43e14 5268 PCIE_FW_MASTER_G(pcie_fw) != CXGB4_UNIFIED_PF)) {
b6244201
HS
5269 dev_warn(&pdev->dev,
5270 "cxgb4 driver needs to be MASTER to support SRIOV\n");
5271 return -EOPNOTSUPP;
5272 }
5273
5274 /* If any of the VF's is already assigned to Guest OS, then
5275 * SRIOV for the same cannot be modified
5276 */
5277 if (current_vfs && pci_vfs_assigned(pdev)) {
5278 dev_err(&pdev->dev,
5279 "Cannot modify SR-IOV while VFs are assigned\n");
baf50868 5280 return current_vfs;
b6244201 5281 }
baf50868
GG
5282 /* Note that the upper-level code ensures that we're never called with
5283 * a non-zero "num_vfs" when we already have VFs instantiated. But
5284 * it never hurts to code defensively.
b6244201 5285 */
baf50868
GG
5286 if (num_vfs != 0 && current_vfs != 0)
5287 return -EBUSY;
5288
5289 /* Nothing to do for no change. */
5290 if (num_vfs == current_vfs)
5291 return num_vfs;
5292
5293 /* Disable SRIOV when zero is passed. */
b6244201
HS
5294 if (!num_vfs) {
5295 pci_disable_sriov(pdev);
baf50868
GG
5296 /* free VF Management Interface */
5297 unregister_netdev(adap->port[0]);
5298 free_netdev(adap->port[0]);
5299 adap->port[0] = NULL;
5300
661dbeb9 5301 /* free VF resources */
baf50868 5302 adap->num_vfs = 0;
661dbeb9
HS
5303 kfree(adap->vfinfo);
5304 adap->vfinfo = NULL;
baf50868 5305 return 0;
b6244201
HS
5306 }
5307
baf50868
GG
5308 if (!current_vfs) {
5309 struct fw_pfvf_cmd port_cmd, port_rpl;
5310 struct net_device *netdev;
5311 unsigned int pmask, port;
5312 struct pci_dev *pbridge;
5313 struct port_info *pi;
5314 char name[IFNAMSIZ];
5315 u32 devcap2;
5316 u16 flags;
5317 int pos;
5318
5319 /* If we want to instantiate Virtual Functions, then our
5320 * parent bridge's PCI-E needs to support Alternative Routing
5321 * ID (ARI) because our VFs will show up at function offset 8
5322 * and above.
5323 */
5324 pbridge = pdev->bus->self;
5325 pos = pci_find_capability(pbridge, PCI_CAP_ID_EXP);
5326 pci_read_config_word(pbridge, pos + PCI_EXP_FLAGS, &flags);
5327 pci_read_config_dword(pbridge, pos + PCI_EXP_DEVCAP2, &devcap2);
5328
5329 if ((flags & PCI_EXP_FLAGS_VERS) < 2 ||
5330 !(devcap2 & PCI_EXP_DEVCAP2_ARI)) {
5331 /* Our parent bridge does not support ARI so issue a
5332 * warning and skip instantiating the VFs. They
5333 * won't be reachable.
5334 */
5335 dev_warn(&pdev->dev, "Parent bridge %02x:%02x.%x doesn't support ARI; can't instantiate Virtual Functions\n",
5336 pbridge->bus->number, PCI_SLOT(pbridge->devfn),
5337 PCI_FUNC(pbridge->devfn));
5338 return -ENOTSUPP;
5339 }
5340 memset(&port_cmd, 0, sizeof(port_cmd));
5341 port_cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PFVF_CMD) |
5342 FW_CMD_REQUEST_F |
5343 FW_CMD_READ_F |
5344 FW_PFVF_CMD_PFN_V(adap->pf) |
5345 FW_PFVF_CMD_VFN_V(0));
5346 port_cmd.retval_len16 = cpu_to_be32(FW_LEN16(port_cmd));
5347 err = t4_wr_mbox(adap, adap->mbox, &port_cmd, sizeof(port_cmd),
5348 &port_rpl);
b6244201
HS
5349 if (err)
5350 return err;
baf50868
GG
5351 pmask = FW_PFVF_CMD_PMASK_G(be32_to_cpu(port_rpl.type_to_neq));
5352 port = ffs(pmask) - 1;
5353 /* Allocate VF Management Interface. */
5354 snprintf(name, IFNAMSIZ, "mgmtpf%d,%d", adap->adap_idx,
5355 adap->pf);
5356 netdev = alloc_netdev(sizeof(struct port_info),
5357 name, NET_NAME_UNKNOWN, cxgb4_mgmt_setup);
5358 if (!netdev)
5359 return -ENOMEM;
7829451c 5360
baf50868
GG
5361 pi = netdev_priv(netdev);
5362 pi->adapter = adap;
5363 pi->lport = port;
5364 pi->tx_chan = port;
5365 SET_NETDEV_DEV(netdev, &pdev->dev);
5366
5367 adap->port[0] = netdev;
5368 pi->port_id = 0;
5369
5370 err = register_netdev(adap->port[0]);
5371 if (err) {
5372 pr_info("Unable to register VF mgmt netdev %s\n", name);
5373 free_netdev(adap->port[0]);
5374 adap->port[0] = NULL;
e7b48a32 5375 return err;
baf50868
GG
5376 }
5377 /* Allocate and set up VF Information. */
5378 adap->vfinfo = kcalloc(pci_sriov_get_totalvfs(pdev),
5379 sizeof(struct vf_info), GFP_KERNEL);
5380 if (!adap->vfinfo) {
5381 unregister_netdev(adap->port[0]);
5382 free_netdev(adap->port[0]);
5383 adap->port[0] = NULL;
5384 return -ENOMEM;
5385 }
5386 cxgb4_mgmt_fill_vf_station_mac_addr(adap);
5387 }
5388 /* Instantiate the requested number of VFs. */
5389 err = pci_enable_sriov(pdev, num_vfs);
5390 if (err) {
5391 pr_info("Unable to instantiate %d VFs\n", num_vfs);
5392 if (!current_vfs) {
5393 unregister_netdev(adap->port[0]);
5394 free_netdev(adap->port[0]);
5395 adap->port[0] = NULL;
5396 kfree(adap->vfinfo);
5397 adap->vfinfo = NULL;
5398 }
5399 return err;
b6244201 5400 }
661dbeb9 5401
baf50868 5402 adap->num_vfs = num_vfs;
b6244201
HS
5403 return num_vfs;
5404}
baf50868 5405#endif /* CONFIG_PCI_IOV */
b6244201 5406
1dd06ae8 5407static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
b8ff05a9 5408{
22adfe0a 5409 int func, i, err, s_qpp, qpp, num_seg;
b8ff05a9 5410 struct port_info *pi;
c8f44aff 5411 bool highdma = false;
b8ff05a9 5412 struct adapter *adapter = NULL;
7829451c 5413 struct net_device *netdev;
d6ce2628 5414 void __iomem *regs;
d86bd29e
HS
5415 u32 whoami, pl_rev;
5416 enum chip_type chip;
7829451c 5417 static int adap_idx = 1;
b8ff05a9
DM
5418
5419 printk_once(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
5420
5421 err = pci_request_regions(pdev, KBUILD_MODNAME);
5422 if (err) {
5423 /* Just info, some other driver may have claimed the device. */
5424 dev_info(&pdev->dev, "cannot obtain PCI resources\n");
5425 return err;
5426 }
5427
b8ff05a9
DM
5428 err = pci_enable_device(pdev);
5429 if (err) {
5430 dev_err(&pdev->dev, "cannot enable PCI device\n");
5431 goto out_release_regions;
5432 }
5433
d6ce2628
HS
5434 regs = pci_ioremap_bar(pdev, 0);
5435 if (!regs) {
5436 dev_err(&pdev->dev, "cannot map device registers\n");
5437 err = -ENOMEM;
5438 goto out_disable_device;
5439 }
5440
baf50868
GG
5441 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
5442 if (!adapter) {
5443 err = -ENOMEM;
5444 goto out_unmap_bar0;
5445 }
5446
5447 adapter->regs = regs;
8203b509
HS
5448 err = t4_wait_dev_ready(regs);
5449 if (err < 0)
e729452e 5450 goto out_free_adapter;
8203b509 5451
d6ce2628 5452 /* We control everything through one PF */
d86bd29e
HS
5453 whoami = readl(regs + PL_WHOAMI_A);
5454 pl_rev = REV_G(readl(regs + PL_REV_A));
5455 chip = get_chip_type(pdev, pl_rev);
5456 func = CHELSIO_CHIP_VERSION(chip) <= CHELSIO_T5 ?
5457 SOURCEPF_G(whoami) : T6_SOURCEPF_G(whoami);
baf50868
GG
5458
5459 adapter->pdev = pdev;
5460 adapter->pdev_dev = &pdev->dev;
5461 adapter->name = pci_name(pdev);
5462 adapter->mbox = func;
5463 adapter->pf = func;
016764de
GG
5464 adapter->params.chip = chip;
5465 adapter->adap_idx = adap_idx;
baf50868
GG
5466 adapter->msg_enable = DFLT_MSG_ENABLE;
5467 adapter->mbox_log = kzalloc(sizeof(*adapter->mbox_log) +
5468 (sizeof(struct mbox_cmd) *
5469 T4_OS_LOG_MBOX_CMDS),
5470 GFP_KERNEL);
5471 if (!adapter->mbox_log) {
5472 err = -ENOMEM;
5473 goto out_free_adapter;
5474 }
5475 spin_lock_init(&adapter->mbox_lock);
5476 INIT_LIST_HEAD(&adapter->mlist.list);
5477 pci_set_drvdata(pdev, adapter);
5478
d6ce2628 5479 if (func != ent->driver_data) {
d6ce2628
HS
5480 pci_disable_device(pdev);
5481 pci_save_state(pdev); /* to restore SR-IOV later */
baf50868 5482 return 0;
d6ce2628
HS
5483 }
5484
b8ff05a9 5485 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
c8f44aff 5486 highdma = true;
b8ff05a9
DM
5487 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
5488 if (err) {
5489 dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
5490 "coherent allocations\n");
baf50868 5491 goto out_free_adapter;
b8ff05a9
DM
5492 }
5493 } else {
5494 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
5495 if (err) {
5496 dev_err(&pdev->dev, "no usable DMA configuration\n");
baf50868 5497 goto out_free_adapter;
b8ff05a9
DM
5498 }
5499 }
5500
5501 pci_enable_pcie_error_reporting(pdev);
5502 pci_set_master(pdev);
5503 pci_save_state(pdev);
7829451c 5504 adap_idx++;
29aaee65
AB
5505 adapter->workq = create_singlethread_workqueue("cxgb4");
5506 if (!adapter->workq) {
5507 err = -ENOMEM;
5508 goto out_free_adapter;
5509 }
5510
7f080c3f
HS
5511 adapter->mbox_log->size = T4_OS_LOG_MBOX_CMDS;
5512
144be3d9
GS
5513 /* PCI device has been enabled */
5514 adapter->flags |= DEV_ENABLED;
b8ff05a9
DM
5515 memset(adapter->chan_map, 0xff, sizeof(adapter->chan_map));
5516
b0ba9d5f
CL
5517 /* If possible, we use PCIe Relaxed Ordering Attribute to deliver
5518 * Ingress Packet Data to Free List Buffers in order to allow for
5519 * chipset performance optimizations between the Root Complex and
5520 * Memory Controllers. (Messages to the associated Ingress Queue
5521 * notifying new Packet Placement in the Free Lists Buffers will be
5522 * send without the Relaxed Ordering Attribute thus guaranteeing that
5523 * all preceding PCIe Transaction Layer Packets will be processed
5524 * first.) But some Root Complexes have various issues with Upstream
5525 * Transaction Layer Packets with the Relaxed Ordering Attribute set.
5526 * The PCIe devices which under the Root Complexes will be cleared the
5527 * Relaxed Ordering bit in the configuration space, So we check our
5528 * PCIe configuration space to see if it's flagged with advice against
5529 * using Relaxed Ordering.
5530 */
5531 if (!pcie_relaxed_ordering_enabled(pdev))
5532 adapter->flags |= ROOT_NO_RELAXED_ORDERING;
5533
b8ff05a9
DM
5534 spin_lock_init(&adapter->stats_lock);
5535 spin_lock_init(&adapter->tid_release_lock);
e327c225 5536 spin_lock_init(&adapter->win0_lock);
b8ff05a9
DM
5537
5538 INIT_WORK(&adapter->tid_release_task, process_tid_release_list);
881806bc
VP
5539 INIT_WORK(&adapter->db_full_task, process_db_full);
5540 INIT_WORK(&adapter->db_drop_task, process_db_drop);
8b7372c1 5541 INIT_WORK(&adapter->fatal_err_notify_task, notify_fatal_err);
b8ff05a9
DM
5542
5543 err = t4_prep_adapter(adapter);
5544 if (err)
d6ce2628
HS
5545 goto out_free_adapter;
5546
22adfe0a 5547
d14807dd 5548 if (!is_t4(adapter->params.chip)) {
f612b815
HS
5549 s_qpp = (QUEUESPERPAGEPF0_S +
5550 (QUEUESPERPAGEPF1_S - QUEUESPERPAGEPF0_S) *
b2612722 5551 adapter->pf);
f612b815
HS
5552 qpp = 1 << QUEUESPERPAGEPF0_G(t4_read_reg(adapter,
5553 SGE_EGRESS_QUEUES_PER_PAGE_PF_A) >> s_qpp);
22adfe0a
SR
5554 num_seg = PAGE_SIZE / SEGMENT_SIZE;
5555
5556 /* Each segment size is 128B. Write coalescing is enabled only
5557 * when SGE_EGRESS_QUEUES_PER_PAGE_PF reg value for the
5558 * queue is less no of segments that can be accommodated in
5559 * a page size.
5560 */
5561 if (qpp > num_seg) {
5562 dev_err(&pdev->dev,
5563 "Incorrect number of egress queues per page\n");
5564 err = -EINVAL;
d6ce2628 5565 goto out_free_adapter;
22adfe0a
SR
5566 }
5567 adapter->bar2 = ioremap_wc(pci_resource_start(pdev, 2),
5568 pci_resource_len(pdev, 2));
5569 if (!adapter->bar2) {
5570 dev_err(&pdev->dev, "cannot map device bar2 region\n");
5571 err = -ENOMEM;
d6ce2628 5572 goto out_free_adapter;
22adfe0a
SR
5573 }
5574 }
5575
636f9d37 5576 setup_memwin(adapter);
b8ff05a9 5577 err = adap_init0(adapter);
5b377d11
HS
5578#ifdef CONFIG_DEBUG_FS
5579 bitmap_zero(adapter->sge.blocked_fl, adapter->sge.egr_sz);
5580#endif
636f9d37 5581 setup_memwin_rdma(adapter);
b8ff05a9
DM
5582 if (err)
5583 goto out_unmap_bar;
5584
2a485cf7
HS
5585 /* configure SGE_STAT_CFG_A to read WC stats */
5586 if (!is_t4(adapter->params.chip))
676d6a75
HS
5587 t4_write_reg(adapter, SGE_STAT_CFG_A, STATSOURCE_T5_V(7) |
5588 (is_t5(adapter->params.chip) ? STATMODE_V(0) :
5589 T6_STATMODE_V(0)));
2a485cf7 5590
b8ff05a9 5591 for_each_port(adapter, i) {
b8ff05a9
DM
5592 netdev = alloc_etherdev_mq(sizeof(struct port_info),
5593 MAX_ETH_QSETS);
5594 if (!netdev) {
5595 err = -ENOMEM;
5596 goto out_free_dev;
5597 }
5598
5599 SET_NETDEV_DEV(netdev, &pdev->dev);
5600
5601 adapter->port[i] = netdev;
5602 pi = netdev_priv(netdev);
5603 pi->adapter = adapter;
5604 pi->xact_addr_filt = -1;
b8ff05a9 5605 pi->port_id = i;
b8ff05a9
DM
5606 netdev->irq = pdev->irq;
5607
2ed28baa
MM
5608 netdev->hw_features = NETIF_F_SG | TSO_FLAGS |
5609 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
5610 NETIF_F_RXCSUM | NETIF_F_RXHASH |
d8931847
RL
5611 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
5612 NETIF_F_HW_TC;
d0a1299c
GG
5613
5614 if (CHELSIO_CHIP_VERSION(chip) > CHELSIO_T5)
5615 netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
5616
c8f44aff
MM
5617 if (highdma)
5618 netdev->hw_features |= NETIF_F_HIGHDMA;
5619 netdev->features |= netdev->hw_features;
b8ff05a9
DM
5620 netdev->vlan_features = netdev->features & VLAN_FEAT;
5621
01789349
JP
5622 netdev->priv_flags |= IFF_UNICAST_FLT;
5623
d894be57 5624 /* MTU range: 81 - 9600 */
a047fbae 5625 netdev->min_mtu = 81; /* accommodate SACK */
d894be57
JW
5626 netdev->max_mtu = MAX_MTU;
5627
b8ff05a9 5628 netdev->netdev_ops = &cxgb4_netdev_ops;
688848b1
AB
5629#ifdef CONFIG_CHELSIO_T4_DCB
5630 netdev->dcbnl_ops = &cxgb4_dcb_ops;
5631 cxgb4_dcb_state_init(netdev);
5632#endif
812034f1 5633 cxgb4_set_ethtool_ops(netdev);
b8ff05a9
DM
5634 }
5635
ad75b7d3
RL
5636 cxgb4_init_ethtool_dump(adapter);
5637
b8ff05a9
DM
5638 pci_set_drvdata(pdev, adapter);
5639
5640 if (adapter->flags & FW_OK) {
060e0c75 5641 err = t4_port_init(adapter, func, func, 0);
b8ff05a9
DM
5642 if (err)
5643 goto out_free_dev;
098ef6c2
HS
5644 } else if (adapter->params.nports == 1) {
5645 /* If we don't have a connection to the firmware -- possibly
5646 * because of an error -- grab the raw VPD parameters so we
5647 * can set the proper MAC Address on the debug network
5648 * interface that we've created.
5649 */
5650 u8 hw_addr[ETH_ALEN];
5651 u8 *na = adapter->params.vpd.na;
5652
5653 err = t4_get_raw_vpd_params(adapter, &adapter->params.vpd);
5654 if (!err) {
5655 for (i = 0; i < ETH_ALEN; i++)
5656 hw_addr[i] = (hex2val(na[2 * i + 0]) * 16 +
5657 hex2val(na[2 * i + 1]));
5658 t4_set_hw_addr(adapter, 0, hw_addr);
5659 }
b8ff05a9
DM
5660 }
5661
098ef6c2 5662 /* Configure queues and allocate tables now, they can be needed as
b8ff05a9
DM
5663 * soon as the first register_netdev completes.
5664 */
5665 cfg_queues(adapter);
5666
3bdb376e
KS
5667 adapter->smt = t4_init_smt();
5668 if (!adapter->smt) {
5669 /* We tolerate a lack of SMT, giving up some functionality */
5670 dev_warn(&pdev->dev, "could not allocate SMT, continuing\n");
5671 }
5672
5be9ed8d 5673 adapter->l2t = t4_init_l2t(adapter->l2t_start, adapter->l2t_end);
b8ff05a9
DM
5674 if (!adapter->l2t) {
5675 /* We tolerate a lack of L2T, giving up some functionality */
5676 dev_warn(&pdev->dev, "could not allocate L2T, continuing\n");
5677 adapter->params.offload = 0;
5678 }
5679
b5a02f50 5680#if IS_ENABLED(CONFIG_IPV6)
eb72f74f
HS
5681 if ((CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5) &&
5682 (!(t4_read_reg(adapter, LE_DB_CONFIG_A) & ASLIPCOMPEN_F))) {
5683 /* CLIP functionality is not present in hardware,
5684 * hence disable all offload features
b5a02f50
AB
5685 */
5686 dev_warn(&pdev->dev,
eb72f74f 5687 "CLIP not enabled in hardware, continuing\n");
b5a02f50 5688 adapter->params.offload = 0;
eb72f74f
HS
5689 } else {
5690 adapter->clipt = t4_init_clip_tbl(adapter->clipt_start,
5691 adapter->clipt_end);
5692 if (!adapter->clipt) {
5693 /* We tolerate a lack of clip_table, giving up
5694 * some functionality
5695 */
5696 dev_warn(&pdev->dev,
5697 "could not allocate Clip table, continuing\n");
5698 adapter->params.offload = 0;
5699 }
b5a02f50
AB
5700 }
5701#endif
b72a32da
RL
5702
5703 for_each_port(adapter, i) {
5704 pi = adap2pinfo(adapter, i);
5705 pi->sched_tbl = t4_init_sched(adapter->params.nsched_cls);
5706 if (!pi->sched_tbl)
5707 dev_warn(&pdev->dev,
5708 "could not activate scheduling on port %d\n",
5709 i);
5710 }
5711
578b46b9 5712 if (tid_init(&adapter->tids) < 0) {
b8ff05a9
DM
5713 dev_warn(&pdev->dev, "could not allocate TID table, "
5714 "continuing\n");
5715 adapter->params.offload = 0;
d8931847 5716 } else {
45da1ca2 5717 adapter->tc_u32 = cxgb4_init_tc_u32(adapter);
d8931847
RL
5718 if (!adapter->tc_u32)
5719 dev_warn(&pdev->dev,
5720 "could not offload tc u32, continuing\n");
62488e4b 5721
79e6d46a
KS
5722 if (cxgb4_init_tc_flower(adapter))
5723 dev_warn(&pdev->dev,
5724 "could not offload tc flower, continuing\n");
b8ff05a9
DM
5725 }
5726
5c31254e 5727 if (is_offload(adapter) || is_hashfilter(adapter)) {
9a1bb9f6
HS
5728 if (t4_read_reg(adapter, LE_DB_CONFIG_A) & HASHEN_F) {
5729 u32 hash_base, hash_reg;
5730
5731 if (chip <= CHELSIO_T5) {
5732 hash_reg = LE_DB_TID_HASHBASE_A;
5733 hash_base = t4_read_reg(adapter, hash_reg);
5734 adapter->tids.hash_base = hash_base / 4;
5735 } else {
5736 hash_reg = T6_LE_DB_HASH_TID_BASE_A;
5737 hash_base = t4_read_reg(adapter, hash_reg);
5738 adapter->tids.hash_base = hash_base;
5739 }
5740 }
5741 }
5742
f7cabcdd
DM
5743 /* See what interrupts we'll be using */
5744 if (msi > 1 && enable_msix(adapter) == 0)
5745 adapter->flags |= USING_MSIX;
94cdb8bb 5746 else if (msi > 0 && pci_enable_msi(pdev) == 0) {
f7cabcdd 5747 adapter->flags |= USING_MSI;
94cdb8bb
HS
5748 if (msi > 1)
5749 free_msix_info(adapter);
5750 }
f7cabcdd 5751
547fd272
HS
5752 /* check for PCI Express bandwidth capabiltites */
5753 cxgb4_check_pcie_caps(adapter);
5754
671b0060
DM
5755 err = init_rss(adapter);
5756 if (err)
5757 goto out_free_dev;
5758
843bd7db
AV
5759 err = setup_fw_sge_queues(adapter);
5760 if (err) {
5761 dev_err(adapter->pdev_dev,
5762 "FW sge queue allocation failed, err %d", err);
5763 goto out_free_dev;
5764 }
5765
b8ff05a9
DM
5766 /*
5767 * The card is now ready to go. If any errors occur during device
5768 * registration we do not fail the whole card but rather proceed only
5769 * with the ports we manage to register successfully. However we must
5770 * register at least one net device.
5771 */
5772 for_each_port(adapter, i) {
a57cabe0 5773 pi = adap2pinfo(adapter, i);
d2a007ab 5774 adapter->port[i]->dev_port = pi->lport;
a57cabe0
DM
5775 netif_set_real_num_tx_queues(adapter->port[i], pi->nqsets);
5776 netif_set_real_num_rx_queues(adapter->port[i], pi->nqsets);
5777
b1a73af9
SM
5778 netif_carrier_off(adapter->port[i]);
5779
b8ff05a9
DM
5780 err = register_netdev(adapter->port[i]);
5781 if (err)
b1a3c2b6 5782 break;
b1a3c2b6
DM
5783 adapter->chan_map[pi->tx_chan] = i;
5784 print_port_info(adapter->port[i]);
b8ff05a9 5785 }
b1a3c2b6 5786 if (i == 0) {
b8ff05a9
DM
5787 dev_err(&pdev->dev, "could not register any net devices\n");
5788 goto out_free_dev;
5789 }
b1a3c2b6
DM
5790 if (err) {
5791 dev_warn(&pdev->dev, "only %d net devices registered\n", i);
5792 err = 0;
6403eab1 5793 }
b8ff05a9
DM
5794
5795 if (cxgb4_debugfs_root) {
5796 adapter->debugfs_root = debugfs_create_dir(pci_name(pdev),
5797 cxgb4_debugfs_root);
5798 setup_debugfs(adapter);
5799 }
5800
6482aa7c
DLR
5801 /* PCIe EEH recovery on powerpc platforms needs fundamental reset */
5802 pdev->needs_freset = 1;
5803
0fbc81b3
HS
5804 if (is_uld(adapter)) {
5805 mutex_lock(&uld_mutex);
5806 list_add_tail(&adapter->list_node, &adapter_list);
5807 mutex_unlock(&uld_mutex);
5808 }
b8ff05a9 5809
9c33e420
AG
5810 if (!is_t4(adapter->params.chip))
5811 cxgb4_ptp_init(adapter);
5812
0de72738 5813 print_adapter_info(adapter);
7829451c 5814 return 0;
0de72738 5815
b8ff05a9 5816 out_free_dev:
843bd7db 5817 t4_free_sge_resources(adapter);
06546391 5818 free_some_resources(adapter);
94cdb8bb
HS
5819 if (adapter->flags & USING_MSIX)
5820 free_msix_info(adapter);
0fbc81b3
HS
5821 if (adapter->num_uld || adapter->num_ofld_uld)
5822 t4_uld_mem_free(adapter);
b8ff05a9 5823 out_unmap_bar:
d14807dd 5824 if (!is_t4(adapter->params.chip))
22adfe0a 5825 iounmap(adapter->bar2);
b8ff05a9 5826 out_free_adapter:
29aaee65
AB
5827 if (adapter->workq)
5828 destroy_workqueue(adapter->workq);
5829
7f080c3f 5830 kfree(adapter->mbox_log);
b8ff05a9 5831 kfree(adapter);
d6ce2628
HS
5832 out_unmap_bar0:
5833 iounmap(regs);
b8ff05a9
DM
5834 out_disable_device:
5835 pci_disable_pcie_error_reporting(pdev);
5836 pci_disable_device(pdev);
5837 out_release_regions:
5838 pci_release_regions(pdev);
b8ff05a9
DM
5839 return err;
5840}
5841
91744948 5842static void remove_one(struct pci_dev *pdev)
b8ff05a9
DM
5843{
5844 struct adapter *adapter = pci_get_drvdata(pdev);
5845
7829451c
HS
5846 if (!adapter) {
5847 pci_release_regions(pdev);
5848 return;
5849 }
636f9d37 5850
e1f6198e
GG
5851 adapter->flags |= SHUTTING_DOWN;
5852
7829451c 5853 if (adapter->pf == 4) {
b8ff05a9
DM
5854 int i;
5855
29aaee65
AB
5856 /* Tear down per-adapter Work Queue first since it can contain
5857 * references to our adapter data structure.
5858 */
5859 destroy_workqueue(adapter->workq);
5860
6a146f3a 5861 if (is_uld(adapter)) {
b8ff05a9 5862 detach_ulds(adapter);
6a146f3a
GP
5863 t4_uld_clean_up(adapter);
5864 }
b8ff05a9 5865
8b4e6b3c
AV
5866 adap_free_hma_mem(adapter);
5867
b37987e8
HS
5868 disable_interrupts(adapter);
5869
b8ff05a9 5870 for_each_port(adapter, i)
8f3a7676 5871 if (adapter->port[i]->reg_state == NETREG_REGISTERED)
b8ff05a9
DM
5872 unregister_netdev(adapter->port[i]);
5873
9f16dc2e 5874 debugfs_remove_recursive(adapter->debugfs_root);
b8ff05a9 5875
9c33e420
AG
5876 if (!is_t4(adapter->params.chip))
5877 cxgb4_ptp_stop(adapter);
5878
f2b7e78d
VP
5879 /* If we allocated filters, free up state associated with any
5880 * valid filters ...
5881 */
578b46b9 5882 clear_all_filters(adapter);
f2b7e78d 5883
aaefae9b
DM
5884 if (adapter->flags & FULL_INIT_DONE)
5885 cxgb_down(adapter);
b8ff05a9 5886
94cdb8bb
HS
5887 if (adapter->flags & USING_MSIX)
5888 free_msix_info(adapter);
0fbc81b3
HS
5889 if (adapter->num_uld || adapter->num_ofld_uld)
5890 t4_uld_mem_free(adapter);
06546391 5891 free_some_resources(adapter);
b5a02f50
AB
5892#if IS_ENABLED(CONFIG_IPV6)
5893 t4_cleanup_clip_tbl(adapter);
5894#endif
d14807dd 5895 if (!is_t4(adapter->params.chip))
22adfe0a 5896 iounmap(adapter->bar2);
7829451c
HS
5897 }
5898#ifdef CONFIG_PCI_IOV
5899 else {
baf50868 5900 cxgb4_iov_configure(adapter->pdev, 0);
7829451c
HS
5901 }
5902#endif
c4e43e14
GG
5903 iounmap(adapter->regs);
5904 pci_disable_pcie_error_reporting(pdev);
5905 if ((adapter->flags & DEV_ENABLED)) {
5906 pci_disable_device(pdev);
5907 adapter->flags &= ~DEV_ENABLED;
5908 }
5909 pci_release_regions(pdev);
5910 kfree(adapter->mbox_log);
5911 synchronize_rcu();
5912 kfree(adapter);
b8ff05a9
DM
5913}
5914
0fbc81b3
HS
5915/* "Shutdown" quiesces the device, stopping Ingress Packet and Interrupt
5916 * delivery. This is essentially a stripped down version of the PCI remove()
5917 * function where we do the minimal amount of work necessary to shutdown any
5918 * further activity.
5919 */
5920static void shutdown_one(struct pci_dev *pdev)
5921{
5922 struct adapter *adapter = pci_get_drvdata(pdev);
5923
5924 /* As with remove_one() above (see extended comment), we only want do
5925 * do cleanup on PCI Devices which went all the way through init_one()
5926 * ...
5927 */
5928 if (!adapter) {
5929 pci_release_regions(pdev);
5930 return;
5931 }
5932
e1f6198e
GG
5933 adapter->flags |= SHUTTING_DOWN;
5934
0fbc81b3
HS
5935 if (adapter->pf == 4) {
5936 int i;
5937
5938 for_each_port(adapter, i)
5939 if (adapter->port[i]->reg_state == NETREG_REGISTERED)
5940 cxgb_close(adapter->port[i]);
5941
6a146f3a
GP
5942 if (is_uld(adapter)) {
5943 detach_ulds(adapter);
5944 t4_uld_clean_up(adapter);
5945 }
5946
0fbc81b3
HS
5947 disable_interrupts(adapter);
5948 disable_msi(adapter);
5949
5950 t4_sge_stop(adapter);
5951 if (adapter->flags & FW_OK)
5952 t4_fw_bye(adapter, adapter->mbox);
5953 }
0fbc81b3
HS
5954}
5955
b8ff05a9
DM
5956static struct pci_driver cxgb4_driver = {
5957 .name = KBUILD_MODNAME,
5958 .id_table = cxgb4_pci_tbl,
5959 .probe = init_one,
91744948 5960 .remove = remove_one,
0fbc81b3 5961 .shutdown = shutdown_one,
b6244201
HS
5962#ifdef CONFIG_PCI_IOV
5963 .sriov_configure = cxgb4_iov_configure,
5964#endif
204dc3c0 5965 .err_handler = &cxgb4_eeh,
b8ff05a9
DM
5966};
5967
5968static int __init cxgb4_init_module(void)
5969{
5970 int ret;
5971
5972 /* Debugfs support is optional, just warn if this fails */
5973 cxgb4_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
5974 if (!cxgb4_debugfs_root)
428ac43f 5975 pr_warn("could not create debugfs entry, continuing\n");
b8ff05a9
DM
5976
5977 ret = pci_register_driver(&cxgb4_driver);
29aaee65 5978 if (ret < 0)
b8ff05a9 5979 debugfs_remove(cxgb4_debugfs_root);
01bcca68 5980
1bb60376 5981#if IS_ENABLED(CONFIG_IPV6)
b5a02f50
AB
5982 if (!inet6addr_registered) {
5983 register_inet6addr_notifier(&cxgb4_inet6addr_notifier);
5984 inet6addr_registered = true;
5985 }
1bb60376 5986#endif
01bcca68 5987
b8ff05a9
DM
5988 return ret;
5989}
5990
5991static void __exit cxgb4_cleanup_module(void)
5992{
1bb60376 5993#if IS_ENABLED(CONFIG_IPV6)
1793c798 5994 if (inet6addr_registered) {
b5a02f50
AB
5995 unregister_inet6addr_notifier(&cxgb4_inet6addr_notifier);
5996 inet6addr_registered = false;
5997 }
1bb60376 5998#endif
b8ff05a9
DM
5999 pci_unregister_driver(&cxgb4_driver);
6000 debugfs_remove(cxgb4_debugfs_root); /* NULL ok */
6001}
6002
6003module_init(cxgb4_init_module);
6004module_exit(cxgb4_cleanup_module);