]> git.ipfire.org Git - thirdparty/kernel/stable.git/blame - drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
cxgb4: Adds support for T6 adapter
[thirdparty/kernel/stable.git] / drivers / net / ethernet / chelsio / cxgb4 / cxgb4_main.c
CommitLineData
b8ff05a9
DM
1/*
2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
3 *
ce100b8b 4 * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved.
b8ff05a9
DM
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
36
37#include <linux/bitmap.h>
38#include <linux/crc32.h>
39#include <linux/ctype.h>
40#include <linux/debugfs.h>
41#include <linux/err.h>
42#include <linux/etherdevice.h>
43#include <linux/firmware.h>
01789349 44#include <linux/if.h>
b8ff05a9
DM
45#include <linux/if_vlan.h>
46#include <linux/init.h>
47#include <linux/log2.h>
48#include <linux/mdio.h>
49#include <linux/module.h>
50#include <linux/moduleparam.h>
51#include <linux/mutex.h>
52#include <linux/netdevice.h>
53#include <linux/pci.h>
54#include <linux/aer.h>
55#include <linux/rtnetlink.h>
56#include <linux/sched.h>
57#include <linux/seq_file.h>
58#include <linux/sockios.h>
59#include <linux/vmalloc.h>
60#include <linux/workqueue.h>
61#include <net/neighbour.h>
62#include <net/netevent.h>
01bcca68 63#include <net/addrconf.h>
1ef8019b 64#include <net/bonding.h>
b5a02f50 65#include <net/addrconf.h>
b8ff05a9
DM
66#include <asm/uaccess.h>
67
68#include "cxgb4.h"
69#include "t4_regs.h"
f612b815 70#include "t4_values.h"
b8ff05a9
DM
71#include "t4_msg.h"
72#include "t4fw_api.h"
cd6c2f12 73#include "t4fw_version.h"
688848b1 74#include "cxgb4_dcb.h"
fd88b31a 75#include "cxgb4_debugfs.h"
b5a02f50 76#include "clip_tbl.h"
b8ff05a9
DM
77#include "l2t.h"
78
812034f1
HS
79char cxgb4_driver_name[] = KBUILD_MODNAME;
80
01bcca68
VP
81#ifdef DRV_VERSION
82#undef DRV_VERSION
83#endif
3a7f8554 84#define DRV_VERSION "2.0.0-ko"
812034f1 85const char cxgb4_driver_version[] = DRV_VERSION;
3a7f8554 86#define DRV_DESC "Chelsio T4/T5 Network Driver"
b8ff05a9 87
f2b7e78d
VP
88/* Host shadow copy of ingress filter entry. This is in host native format
89 * and doesn't match the ordering or bit order, etc. of the hardware of the
90 * firmware command. The use of bit-field structure elements is purely to
91 * remind ourselves of the field size limitations and save memory in the case
92 * where the filter table is large.
93 */
94struct filter_entry {
95 /* Administrative fields for filter.
96 */
97 u32 valid:1; /* filter allocated and valid */
98 u32 locked:1; /* filter is administratively locked */
99
100 u32 pending:1; /* filter action is pending firmware reply */
101 u32 smtidx:8; /* Source MAC Table index for smac */
102 struct l2t_entry *l2t; /* Layer Two Table entry for dmac */
103
104 /* The filter itself. Most of this is a straight copy of information
105 * provided by the extended ioctl(). Some fields are translated to
106 * internal forms -- for instance the Ingress Queue ID passed in from
107 * the ioctl() is translated into the Absolute Ingress Queue ID.
108 */
109 struct ch_filter_specification fs;
110};
111
b8ff05a9
DM
112#define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
113 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
114 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
115
3fedeab1
HS
116/* Macros needed to support the PCI Device ID Table ...
117 */
118#define CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN \
768ffc66 119 static const struct pci_device_id cxgb4_pci_tbl[] = {
3fedeab1 120#define CH_PCI_DEVICE_ID_FUNCTION 0x4
b8ff05a9 121
3fedeab1
HS
122/* Include PCI Device IDs for both PF4 and PF0-3 so our PCI probe() routine is
123 * called for both.
124 */
125#define CH_PCI_DEVICE_ID_FUNCTION2 0x0
126
127#define CH_PCI_ID_TABLE_ENTRY(devid) \
128 {PCI_VDEVICE(CHELSIO, (devid)), 4}
129
130#define CH_PCI_DEVICE_ID_TABLE_DEFINE_END \
131 { 0, } \
132 }
133
134#include "t4_pci_id_tbl.h"
b8ff05a9 135
16e47624 136#define FW4_FNAME "cxgb4/t4fw.bin"
0a57a536 137#define FW5_FNAME "cxgb4/t5fw.bin"
3ccc6cf7 138#define FW6_FNAME "cxgb4/t6fw.bin"
16e47624 139#define FW4_CFNAME "cxgb4/t4-config.txt"
0a57a536 140#define FW5_CFNAME "cxgb4/t5-config.txt"
3ccc6cf7 141#define FW6_CFNAME "cxgb4/t6-config.txt"
01b69614
HS
142#define PHY_AQ1202_FIRMWARE "cxgb4/aq1202_fw.cld"
143#define PHY_BCM84834_FIRMWARE "cxgb4/bcm8483.bin"
144#define PHY_AQ1202_DEVICEID 0x4409
145#define PHY_BCM84834_DEVICEID 0x4486
b8ff05a9
DM
146
147MODULE_DESCRIPTION(DRV_DESC);
148MODULE_AUTHOR("Chelsio Communications");
149MODULE_LICENSE("Dual BSD/GPL");
150MODULE_VERSION(DRV_VERSION);
151MODULE_DEVICE_TABLE(pci, cxgb4_pci_tbl);
16e47624 152MODULE_FIRMWARE(FW4_FNAME);
0a57a536 153MODULE_FIRMWARE(FW5_FNAME);
b8ff05a9 154
636f9d37
VP
155/*
156 * Normally we're willing to become the firmware's Master PF but will be happy
157 * if another PF has already become the Master and initialized the adapter.
158 * Setting "force_init" will cause this driver to forcibly establish itself as
159 * the Master PF and initialize the adapter.
160 */
161static uint force_init;
162
163module_param(force_init, uint, 0644);
164MODULE_PARM_DESC(force_init, "Forcibly become Master PF and initialize adapter");
165
13ee15d3
VP
166/*
167 * Normally if the firmware we connect to has Configuration File support, we
168 * use that and only fall back to the old Driver-based initialization if the
169 * Configuration File fails for some reason. If force_old_init is set, then
170 * we'll always use the old Driver-based initialization sequence.
171 */
172static uint force_old_init;
173
174module_param(force_old_init, uint, 0644);
06640310
HS
175MODULE_PARM_DESC(force_old_init, "Force old initialization sequence, deprecated"
176 " parameter");
13ee15d3 177
b8ff05a9
DM
178static int dflt_msg_enable = DFLT_MSG_ENABLE;
179
180module_param(dflt_msg_enable, int, 0644);
181MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T4 default message enable bitmap");
182
183/*
184 * The driver uses the best interrupt scheme available on a platform in the
185 * order MSI-X, MSI, legacy INTx interrupts. This parameter determines which
186 * of these schemes the driver may consider as follows:
187 *
188 * msi = 2: choose from among all three options
189 * msi = 1: only consider MSI and INTx interrupts
190 * msi = 0: force INTx interrupts
191 */
192static int msi = 2;
193
194module_param(msi, int, 0644);
195MODULE_PARM_DESC(msi, "whether to use INTx (0), MSI (1) or MSI-X (2)");
196
197/*
198 * Queue interrupt hold-off timer values. Queues default to the first of these
199 * upon creation.
200 */
201static unsigned int intr_holdoff[SGE_NTIMERS - 1] = { 5, 10, 20, 50, 100 };
202
203module_param_array(intr_holdoff, uint, NULL, 0644);
204MODULE_PARM_DESC(intr_holdoff, "values for queue interrupt hold-off timers "
06640310 205 "0..4 in microseconds, deprecated parameter");
b8ff05a9
DM
206
207static unsigned int intr_cnt[SGE_NCOUNTERS - 1] = { 4, 8, 16 };
208
209module_param_array(intr_cnt, uint, NULL, 0644);
210MODULE_PARM_DESC(intr_cnt,
06640310
HS
211 "thresholds 1..3 for queue interrupt packet counters, "
212 "deprecated parameter");
b8ff05a9 213
636f9d37
VP
214/*
215 * Normally we tell the chip to deliver Ingress Packets into our DMA buffers
216 * offset by 2 bytes in order to have the IP headers line up on 4-byte
217 * boundaries. This is a requirement for many architectures which will throw
218 * a machine check fault if an attempt is made to access one of the 4-byte IP
219 * header fields on a non-4-byte boundary. And it's a major performance issue
220 * even on some architectures which allow it like some implementations of the
221 * x86 ISA. However, some architectures don't mind this and for some very
222 * edge-case performance sensitive applications (like forwarding large volumes
223 * of small packets), setting this DMA offset to 0 will decrease the number of
224 * PCI-E Bus transfers enough to measurably affect performance.
225 */
226static int rx_dma_offset = 2;
227
eb939922 228static bool vf_acls;
b8ff05a9
DM
229
230#ifdef CONFIG_PCI_IOV
231module_param(vf_acls, bool, 0644);
06640310
HS
232MODULE_PARM_DESC(vf_acls, "if set enable virtualization L2 ACL enforcement, "
233 "deprecated parameter");
b8ff05a9 234
7d6727cf
SR
235/* Configure the number of PCI-E Virtual Function which are to be instantiated
236 * on SR-IOV Capable Physical Functions.
0a57a536 237 */
7d6727cf 238static unsigned int num_vf[NUM_OF_PF_WITH_SRIOV];
b8ff05a9
DM
239
240module_param_array(num_vf, uint, NULL, 0644);
7d6727cf 241MODULE_PARM_DESC(num_vf, "number of VFs for each of PFs 0-3");
b8ff05a9
DM
242#endif
243
688848b1
AB
244/* TX Queue select used to determine what algorithm to use for selecting TX
245 * queue. Select between the kernel provided function (select_queue=0) or user
246 * cxgb_select_queue function (select_queue=1)
247 *
248 * Default: select_queue=0
249 */
250static int select_queue;
251module_param(select_queue, int, 0644);
252MODULE_PARM_DESC(select_queue,
253 "Select between kernel provided method of selecting or driver method of selecting TX queue. Default is kernel method.");
254
06640310 255static unsigned int tp_vlan_pri_map = HW_TPL_FR_MT_PR_IV_P_FC;
13ee15d3 256
f2b7e78d 257module_param(tp_vlan_pri_map, uint, 0644);
06640310
HS
258MODULE_PARM_DESC(tp_vlan_pri_map, "global compressed filter configuration, "
259 "deprecated parameter");
f2b7e78d 260
b8ff05a9
DM
261static struct dentry *cxgb4_debugfs_root;
262
263static LIST_HEAD(adapter_list);
264static DEFINE_MUTEX(uld_mutex);
01bcca68
VP
265/* Adapter list to be accessed from atomic context */
266static LIST_HEAD(adap_rcu_list);
267static DEFINE_SPINLOCK(adap_rcu_lock);
b8ff05a9
DM
268static struct cxgb4_uld_info ulds[CXGB4_ULD_MAX];
269static const char *uld_str[] = { "RDMA", "iSCSI" };
270
271static void link_report(struct net_device *dev)
272{
273 if (!netif_carrier_ok(dev))
274 netdev_info(dev, "link down\n");
275 else {
276 static const char *fc[] = { "no", "Rx", "Tx", "Tx/Rx" };
277
278 const char *s = "10Mbps";
279 const struct port_info *p = netdev_priv(dev);
280
281 switch (p->link_cfg.speed) {
e8b39015 282 case 10000:
b8ff05a9
DM
283 s = "10Gbps";
284 break;
e8b39015 285 case 1000:
b8ff05a9
DM
286 s = "1000Mbps";
287 break;
e8b39015 288 case 100:
b8ff05a9
DM
289 s = "100Mbps";
290 break;
e8b39015 291 case 40000:
72aca4bf
KS
292 s = "40Gbps";
293 break;
b8ff05a9
DM
294 }
295
296 netdev_info(dev, "link up, %s, full-duplex, %s PAUSE\n", s,
297 fc[p->link_cfg.fc]);
298 }
299}
300
688848b1
AB
301#ifdef CONFIG_CHELSIO_T4_DCB
302/* Set up/tear down Data Center Bridging Priority mapping for a net device. */
303static void dcb_tx_queue_prio_enable(struct net_device *dev, int enable)
304{
305 struct port_info *pi = netdev_priv(dev);
306 struct adapter *adap = pi->adapter;
307 struct sge_eth_txq *txq = &adap->sge.ethtxq[pi->first_qset];
308 int i;
309
310 /* We use a simple mapping of Port TX Queue Index to DCB
311 * Priority when we're enabling DCB.
312 */
313 for (i = 0; i < pi->nqsets; i++, txq++) {
314 u32 name, value;
315 int err;
316
5167865a
HS
317 name = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
318 FW_PARAMS_PARAM_X_V(
319 FW_PARAMS_PARAM_DMAQ_EQ_DCBPRIO_ETH) |
320 FW_PARAMS_PARAM_YZ_V(txq->q.cntxt_id));
688848b1
AB
321 value = enable ? i : 0xffffffff;
322
323 /* Since we can be called while atomic (from "interrupt
324 * level") we need to issue the Set Parameters Commannd
325 * without sleeping (timeout < 0).
326 */
b2612722 327 err = t4_set_params_timeout(adap, adap->mbox, adap->pf, 0, 1,
01b69614
HS
328 &name, &value,
329 -FW_CMD_MAX_TIMEOUT);
688848b1
AB
330
331 if (err)
332 dev_err(adap->pdev_dev,
333 "Can't %s DCB Priority on port %d, TX Queue %d: err=%d\n",
334 enable ? "set" : "unset", pi->port_id, i, -err);
10b00466
AB
335 else
336 txq->dcb_prio = value;
688848b1
AB
337 }
338}
339#endif /* CONFIG_CHELSIO_T4_DCB */
340
b8ff05a9
DM
341void t4_os_link_changed(struct adapter *adapter, int port_id, int link_stat)
342{
343 struct net_device *dev = adapter->port[port_id];
344
345 /* Skip changes from disabled ports. */
346 if (netif_running(dev) && link_stat != netif_carrier_ok(dev)) {
347 if (link_stat)
348 netif_carrier_on(dev);
688848b1
AB
349 else {
350#ifdef CONFIG_CHELSIO_T4_DCB
351 cxgb4_dcb_state_init(dev);
352 dcb_tx_queue_prio_enable(dev, false);
353#endif /* CONFIG_CHELSIO_T4_DCB */
b8ff05a9 354 netif_carrier_off(dev);
688848b1 355 }
b8ff05a9
DM
356
357 link_report(dev);
358 }
359}
360
361void t4_os_portmod_changed(const struct adapter *adap, int port_id)
362{
363 static const char *mod_str[] = {
a0881cab 364 NULL, "LR", "SR", "ER", "passive DA", "active DA", "LRM"
b8ff05a9
DM
365 };
366
367 const struct net_device *dev = adap->port[port_id];
368 const struct port_info *pi = netdev_priv(dev);
369
370 if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
371 netdev_info(dev, "port module unplugged\n");
a0881cab 372 else if (pi->mod_type < ARRAY_SIZE(mod_str))
b8ff05a9
DM
373 netdev_info(dev, "%s module inserted\n", mod_str[pi->mod_type]);
374}
375
376/*
377 * Configure the exact and hash address filters to handle a port's multicast
378 * and secondary unicast MAC addresses.
379 */
380static int set_addr_filters(const struct net_device *dev, bool sleep)
381{
382 u64 mhash = 0;
383 u64 uhash = 0;
384 bool free = true;
385 u16 filt_idx[7];
386 const u8 *addr[7];
387 int ret, naddr = 0;
b8ff05a9
DM
388 const struct netdev_hw_addr *ha;
389 int uc_cnt = netdev_uc_count(dev);
4a35ecf8 390 int mc_cnt = netdev_mc_count(dev);
b8ff05a9 391 const struct port_info *pi = netdev_priv(dev);
b2612722 392 unsigned int mb = pi->adapter->pf;
b8ff05a9
DM
393
394 /* first do the secondary unicast addresses */
395 netdev_for_each_uc_addr(ha, dev) {
396 addr[naddr++] = ha->addr;
397 if (--uc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) {
060e0c75 398 ret = t4_alloc_mac_filt(pi->adapter, mb, pi->viid, free,
b8ff05a9
DM
399 naddr, addr, filt_idx, &uhash, sleep);
400 if (ret < 0)
401 return ret;
402
403 free = false;
404 naddr = 0;
405 }
406 }
407
408 /* next set up the multicast addresses */
4a35ecf8
DM
409 netdev_for_each_mc_addr(ha, dev) {
410 addr[naddr++] = ha->addr;
411 if (--mc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) {
060e0c75 412 ret = t4_alloc_mac_filt(pi->adapter, mb, pi->viid, free,
b8ff05a9
DM
413 naddr, addr, filt_idx, &mhash, sleep);
414 if (ret < 0)
415 return ret;
416
417 free = false;
418 naddr = 0;
419 }
420 }
421
060e0c75 422 return t4_set_addr_hash(pi->adapter, mb, pi->viid, uhash != 0,
b8ff05a9
DM
423 uhash | mhash, sleep);
424}
425
3069ee9b
VP
426int dbfifo_int_thresh = 10; /* 10 == 640 entry threshold */
427module_param(dbfifo_int_thresh, int, 0644);
428MODULE_PARM_DESC(dbfifo_int_thresh, "doorbell fifo interrupt threshold");
429
404d9e3f
VP
430/*
431 * usecs to sleep while draining the dbfifo
432 */
433static int dbfifo_drain_delay = 1000;
3069ee9b
VP
434module_param(dbfifo_drain_delay, int, 0644);
435MODULE_PARM_DESC(dbfifo_drain_delay,
436 "usecs to sleep while draining the dbfifo");
437
b8ff05a9
DM
438/*
439 * Set Rx properties of a port, such as promiscruity, address filters, and MTU.
440 * If @mtu is -1 it is left unchanged.
441 */
442static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok)
443{
444 int ret;
445 struct port_info *pi = netdev_priv(dev);
446
447 ret = set_addr_filters(dev, sleep_ok);
448 if (ret == 0)
b2612722 449 ret = t4_set_rxmode(pi->adapter, pi->adapter->pf, pi->viid, mtu,
b8ff05a9 450 (dev->flags & IFF_PROMISC) ? 1 : 0,
f8f5aafa 451 (dev->flags & IFF_ALLMULTI) ? 1 : 0, 1, -1,
b8ff05a9
DM
452 sleep_ok);
453 return ret;
454}
455
456/**
457 * link_start - enable a port
458 * @dev: the port to enable
459 *
460 * Performs the MAC and PHY actions needed to enable a port.
461 */
462static int link_start(struct net_device *dev)
463{
464 int ret;
465 struct port_info *pi = netdev_priv(dev);
b2612722 466 unsigned int mb = pi->adapter->pf;
b8ff05a9
DM
467
468 /*
469 * We do not set address filters and promiscuity here, the stack does
470 * that step explicitly.
471 */
060e0c75 472 ret = t4_set_rxmode(pi->adapter, mb, pi->viid, dev->mtu, -1, -1, -1,
f646968f 473 !!(dev->features & NETIF_F_HW_VLAN_CTAG_RX), true);
b8ff05a9 474 if (ret == 0) {
060e0c75 475 ret = t4_change_mac(pi->adapter, mb, pi->viid,
b8ff05a9 476 pi->xact_addr_filt, dev->dev_addr, true,
b6bd29e7 477 true);
b8ff05a9
DM
478 if (ret >= 0) {
479 pi->xact_addr_filt = ret;
480 ret = 0;
481 }
482 }
483 if (ret == 0)
060e0c75
DM
484 ret = t4_link_start(pi->adapter, mb, pi->tx_chan,
485 &pi->link_cfg);
30f00847
AB
486 if (ret == 0) {
487 local_bh_disable();
688848b1
AB
488 ret = t4_enable_vi_params(pi->adapter, mb, pi->viid, true,
489 true, CXGB4_DCB_ENABLED);
30f00847
AB
490 local_bh_enable();
491 }
688848b1 492
b8ff05a9
DM
493 return ret;
494}
495
688848b1
AB
496int cxgb4_dcb_enabled(const struct net_device *dev)
497{
498#ifdef CONFIG_CHELSIO_T4_DCB
499 struct port_info *pi = netdev_priv(dev);
500
3bb06261
AB
501 if (!pi->dcb.enabled)
502 return 0;
503
504 return ((pi->dcb.state == CXGB4_DCB_STATE_FW_ALLSYNCED) ||
505 (pi->dcb.state == CXGB4_DCB_STATE_HOST));
688848b1
AB
506#else
507 return 0;
508#endif
509}
510EXPORT_SYMBOL(cxgb4_dcb_enabled);
511
512#ifdef CONFIG_CHELSIO_T4_DCB
513/* Handle a Data Center Bridging update message from the firmware. */
514static void dcb_rpl(struct adapter *adap, const struct fw_port_cmd *pcmd)
515{
2b5fb1f2 516 int port = FW_PORT_CMD_PORTID_G(ntohl(pcmd->op_to_portid));
688848b1
AB
517 struct net_device *dev = adap->port[port];
518 int old_dcb_enabled = cxgb4_dcb_enabled(dev);
519 int new_dcb_enabled;
520
521 cxgb4_dcb_handle_fw_update(adap, pcmd);
522 new_dcb_enabled = cxgb4_dcb_enabled(dev);
523
524 /* If the DCB has become enabled or disabled on the port then we're
525 * going to need to set up/tear down DCB Priority parameters for the
526 * TX Queues associated with the port.
527 */
528 if (new_dcb_enabled != old_dcb_enabled)
529 dcb_tx_queue_prio_enable(dev, new_dcb_enabled);
530}
531#endif /* CONFIG_CHELSIO_T4_DCB */
532
f2b7e78d
VP
533/* Clear a filter and release any of its resources that we own. This also
534 * clears the filter's "pending" status.
535 */
536static void clear_filter(struct adapter *adap, struct filter_entry *f)
537{
538 /* If the new or old filter have loopback rewriteing rules then we'll
539 * need to free any existing Layer Two Table (L2T) entries of the old
540 * filter rule. The firmware will handle freeing up any Source MAC
541 * Table (SMT) entries used for rewriting Source MAC Addresses in
542 * loopback rules.
543 */
544 if (f->l2t)
545 cxgb4_l2t_release(f->l2t);
546
547 /* The zeroing of the filter rule below clears the filter valid,
548 * pending, locked flags, l2t pointer, etc. so it's all we need for
549 * this operation.
550 */
551 memset(f, 0, sizeof(*f));
552}
553
554/* Handle a filter write/deletion reply.
555 */
556static void filter_rpl(struct adapter *adap, const struct cpl_set_tcb_rpl *rpl)
557{
558 unsigned int idx = GET_TID(rpl);
559 unsigned int nidx = idx - adap->tids.ftid_base;
560 unsigned int ret;
561 struct filter_entry *f;
562
563 if (idx >= adap->tids.ftid_base && nidx <
564 (adap->tids.nftids + adap->tids.nsftids)) {
565 idx = nidx;
bdc590b9 566 ret = TCB_COOKIE_G(rpl->cookie);
f2b7e78d
VP
567 f = &adap->tids.ftid_tab[idx];
568
569 if (ret == FW_FILTER_WR_FLT_DELETED) {
570 /* Clear the filter when we get confirmation from the
571 * hardware that the filter has been deleted.
572 */
573 clear_filter(adap, f);
574 } else if (ret == FW_FILTER_WR_SMT_TBL_FULL) {
575 dev_err(adap->pdev_dev, "filter %u setup failed due to full SMT\n",
576 idx);
577 clear_filter(adap, f);
578 } else if (ret == FW_FILTER_WR_FLT_ADDED) {
579 f->smtidx = (be64_to_cpu(rpl->oldval) >> 24) & 0xff;
580 f->pending = 0; /* asynchronous setup completed */
581 f->valid = 1;
582 } else {
583 /* Something went wrong. Issue a warning about the
584 * problem and clear everything out.
585 */
586 dev_err(adap->pdev_dev, "filter %u setup failed with error %u\n",
587 idx, ret);
588 clear_filter(adap, f);
589 }
590 }
591}
592
593/* Response queue handler for the FW event queue.
b8ff05a9
DM
594 */
595static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,
596 const struct pkt_gl *gl)
597{
598 u8 opcode = ((const struct rss_header *)rsp)->opcode;
599
600 rsp++; /* skip RSS header */
b407a4a9
VP
601
602 /* FW can send EGR_UPDATEs encapsulated in a CPL_FW4_MSG.
603 */
604 if (unlikely(opcode == CPL_FW4_MSG &&
605 ((const struct cpl_fw4_msg *)rsp)->type == FW_TYPE_RSSCPL)) {
606 rsp++;
607 opcode = ((const struct rss_header *)rsp)->opcode;
608 rsp++;
609 if (opcode != CPL_SGE_EGR_UPDATE) {
610 dev_err(q->adap->pdev_dev, "unexpected FW4/CPL %#x on FW event queue\n"
611 , opcode);
612 goto out;
613 }
614 }
615
b8ff05a9
DM
616 if (likely(opcode == CPL_SGE_EGR_UPDATE)) {
617 const struct cpl_sge_egr_update *p = (void *)rsp;
bdc590b9 618 unsigned int qid = EGR_QID_G(ntohl(p->opcode_qid));
e46dab4d 619 struct sge_txq *txq;
b8ff05a9 620
e46dab4d 621 txq = q->adap->sge.egr_map[qid - q->adap->sge.egr_start];
b8ff05a9 622 txq->restarts++;
e46dab4d 623 if ((u8 *)txq < (u8 *)q->adap->sge.ofldtxq) {
b8ff05a9
DM
624 struct sge_eth_txq *eq;
625
626 eq = container_of(txq, struct sge_eth_txq, q);
627 netif_tx_wake_queue(eq->txq);
628 } else {
629 struct sge_ofld_txq *oq;
630
631 oq = container_of(txq, struct sge_ofld_txq, q);
632 tasklet_schedule(&oq->qresume_tsk);
633 }
634 } else if (opcode == CPL_FW6_MSG || opcode == CPL_FW4_MSG) {
635 const struct cpl_fw6_msg *p = (void *)rsp;
636
688848b1
AB
637#ifdef CONFIG_CHELSIO_T4_DCB
638 const struct fw_port_cmd *pcmd = (const void *)p->data;
e2ac9628 639 unsigned int cmd = FW_CMD_OP_G(ntohl(pcmd->op_to_portid));
688848b1 640 unsigned int action =
2b5fb1f2 641 FW_PORT_CMD_ACTION_G(ntohl(pcmd->action_to_len16));
688848b1
AB
642
643 if (cmd == FW_PORT_CMD &&
644 action == FW_PORT_ACTION_GET_PORT_INFO) {
2b5fb1f2 645 int port = FW_PORT_CMD_PORTID_G(
688848b1
AB
646 be32_to_cpu(pcmd->op_to_portid));
647 struct net_device *dev = q->adap->port[port];
648 int state_input = ((pcmd->u.info.dcbxdis_pkd &
2b5fb1f2 649 FW_PORT_CMD_DCBXDIS_F)
688848b1
AB
650 ? CXGB4_DCB_INPUT_FW_DISABLED
651 : CXGB4_DCB_INPUT_FW_ENABLED);
652
653 cxgb4_dcb_state_fsm(dev, state_input);
654 }
655
656 if (cmd == FW_PORT_CMD &&
657 action == FW_PORT_ACTION_L2_DCB_CFG)
658 dcb_rpl(q->adap, pcmd);
659 else
660#endif
661 if (p->type == 0)
662 t4_handle_fw_rpl(q->adap, p->data);
b8ff05a9
DM
663 } else if (opcode == CPL_L2T_WRITE_RPL) {
664 const struct cpl_l2t_write_rpl *p = (void *)rsp;
665
666 do_l2t_write_rpl(q->adap, p);
f2b7e78d
VP
667 } else if (opcode == CPL_SET_TCB_RPL) {
668 const struct cpl_set_tcb_rpl *p = (void *)rsp;
669
670 filter_rpl(q->adap, p);
b8ff05a9
DM
671 } else
672 dev_err(q->adap->pdev_dev,
673 "unexpected CPL %#x on FW event queue\n", opcode);
b407a4a9 674out:
b8ff05a9
DM
675 return 0;
676}
677
678/**
679 * uldrx_handler - response queue handler for ULD queues
680 * @q: the response queue that received the packet
681 * @rsp: the response queue descriptor holding the offload message
682 * @gl: the gather list of packet fragments
683 *
684 * Deliver an ingress offload packet to a ULD. All processing is done by
685 * the ULD, we just maintain statistics.
686 */
687static int uldrx_handler(struct sge_rspq *q, const __be64 *rsp,
688 const struct pkt_gl *gl)
689{
690 struct sge_ofld_rxq *rxq = container_of(q, struct sge_ofld_rxq, rspq);
691
b407a4a9
VP
692 /* FW can send CPLs encapsulated in a CPL_FW4_MSG.
693 */
694 if (((const struct rss_header *)rsp)->opcode == CPL_FW4_MSG &&
695 ((const struct cpl_fw4_msg *)(rsp + 1))->type == FW_TYPE_RSSCPL)
696 rsp += 2;
697
b8ff05a9
DM
698 if (ulds[q->uld].rx_handler(q->adap->uld_handle[q->uld], rsp, gl)) {
699 rxq->stats.nomem++;
700 return -1;
701 }
702 if (gl == NULL)
703 rxq->stats.imm++;
704 else if (gl == CXGB4_MSG_AN)
705 rxq->stats.an++;
706 else
707 rxq->stats.pkts++;
708 return 0;
709}
710
711static void disable_msi(struct adapter *adapter)
712{
713 if (adapter->flags & USING_MSIX) {
714 pci_disable_msix(adapter->pdev);
715 adapter->flags &= ~USING_MSIX;
716 } else if (adapter->flags & USING_MSI) {
717 pci_disable_msi(adapter->pdev);
718 adapter->flags &= ~USING_MSI;
719 }
720}
721
722/*
723 * Interrupt handler for non-data events used with MSI-X.
724 */
725static irqreturn_t t4_nondata_intr(int irq, void *cookie)
726{
727 struct adapter *adap = cookie;
0d804338 728 u32 v = t4_read_reg(adap, MYPF_REG(PL_PF_INT_CAUSE_A));
b8ff05a9 729
0d804338 730 if (v & PFSW_F) {
b8ff05a9 731 adap->swintr = 1;
0d804338 732 t4_write_reg(adap, MYPF_REG(PL_PF_INT_CAUSE_A), v);
b8ff05a9 733 }
c3c7b121
HS
734 if (adap->flags & MASTER_PF)
735 t4_slow_intr_handler(adap);
b8ff05a9
DM
736 return IRQ_HANDLED;
737}
738
739/*
740 * Name the MSI-X interrupts.
741 */
742static void name_msix_vecs(struct adapter *adap)
743{
ba27816c 744 int i, j, msi_idx = 2, n = sizeof(adap->msix_info[0].desc);
b8ff05a9
DM
745
746 /* non-data interrupts */
b1a3c2b6 747 snprintf(adap->msix_info[0].desc, n, "%s", adap->port[0]->name);
b8ff05a9
DM
748
749 /* FW events */
b1a3c2b6
DM
750 snprintf(adap->msix_info[1].desc, n, "%s-FWeventq",
751 adap->port[0]->name);
b8ff05a9
DM
752
753 /* Ethernet queues */
754 for_each_port(adap, j) {
755 struct net_device *d = adap->port[j];
756 const struct port_info *pi = netdev_priv(d);
757
ba27816c 758 for (i = 0; i < pi->nqsets; i++, msi_idx++)
b8ff05a9
DM
759 snprintf(adap->msix_info[msi_idx].desc, n, "%s-Rx%d",
760 d->name, i);
b8ff05a9
DM
761 }
762
763 /* offload queues */
ba27816c
DM
764 for_each_ofldrxq(&adap->sge, i)
765 snprintf(adap->msix_info[msi_idx++].desc, n, "%s-ofld%d",
b1a3c2b6 766 adap->port[0]->name, i);
ba27816c
DM
767
768 for_each_rdmarxq(&adap->sge, i)
769 snprintf(adap->msix_info[msi_idx++].desc, n, "%s-rdma%d",
b1a3c2b6 770 adap->port[0]->name, i);
cf38be6d
HS
771
772 for_each_rdmaciq(&adap->sge, i)
773 snprintf(adap->msix_info[msi_idx++].desc, n, "%s-rdma-ciq%d",
774 adap->port[0]->name, i);
b8ff05a9
DM
775}
776
777static int request_msix_queue_irqs(struct adapter *adap)
778{
779 struct sge *s = &adap->sge;
cf38be6d
HS
780 int err, ethqidx, ofldqidx = 0, rdmaqidx = 0, rdmaciqqidx = 0;
781 int msi_index = 2;
b8ff05a9
DM
782
783 err = request_irq(adap->msix_info[1].vec, t4_sge_intr_msix, 0,
784 adap->msix_info[1].desc, &s->fw_evtq);
785 if (err)
786 return err;
787
788 for_each_ethrxq(s, ethqidx) {
404d9e3f
VP
789 err = request_irq(adap->msix_info[msi_index].vec,
790 t4_sge_intr_msix, 0,
791 adap->msix_info[msi_index].desc,
b8ff05a9
DM
792 &s->ethrxq[ethqidx].rspq);
793 if (err)
794 goto unwind;
404d9e3f 795 msi_index++;
b8ff05a9
DM
796 }
797 for_each_ofldrxq(s, ofldqidx) {
404d9e3f
VP
798 err = request_irq(adap->msix_info[msi_index].vec,
799 t4_sge_intr_msix, 0,
800 adap->msix_info[msi_index].desc,
b8ff05a9
DM
801 &s->ofldrxq[ofldqidx].rspq);
802 if (err)
803 goto unwind;
404d9e3f 804 msi_index++;
b8ff05a9
DM
805 }
806 for_each_rdmarxq(s, rdmaqidx) {
404d9e3f
VP
807 err = request_irq(adap->msix_info[msi_index].vec,
808 t4_sge_intr_msix, 0,
809 adap->msix_info[msi_index].desc,
b8ff05a9
DM
810 &s->rdmarxq[rdmaqidx].rspq);
811 if (err)
812 goto unwind;
404d9e3f 813 msi_index++;
b8ff05a9 814 }
cf38be6d
HS
815 for_each_rdmaciq(s, rdmaciqqidx) {
816 err = request_irq(adap->msix_info[msi_index].vec,
817 t4_sge_intr_msix, 0,
818 adap->msix_info[msi_index].desc,
819 &s->rdmaciq[rdmaciqqidx].rspq);
820 if (err)
821 goto unwind;
822 msi_index++;
823 }
b8ff05a9
DM
824 return 0;
825
826unwind:
cf38be6d
HS
827 while (--rdmaciqqidx >= 0)
828 free_irq(adap->msix_info[--msi_index].vec,
829 &s->rdmaciq[rdmaciqqidx].rspq);
b8ff05a9 830 while (--rdmaqidx >= 0)
404d9e3f 831 free_irq(adap->msix_info[--msi_index].vec,
b8ff05a9
DM
832 &s->rdmarxq[rdmaqidx].rspq);
833 while (--ofldqidx >= 0)
404d9e3f 834 free_irq(adap->msix_info[--msi_index].vec,
b8ff05a9
DM
835 &s->ofldrxq[ofldqidx].rspq);
836 while (--ethqidx >= 0)
404d9e3f
VP
837 free_irq(adap->msix_info[--msi_index].vec,
838 &s->ethrxq[ethqidx].rspq);
b8ff05a9
DM
839 free_irq(adap->msix_info[1].vec, &s->fw_evtq);
840 return err;
841}
842
843static void free_msix_queue_irqs(struct adapter *adap)
844{
404d9e3f 845 int i, msi_index = 2;
b8ff05a9
DM
846 struct sge *s = &adap->sge;
847
848 free_irq(adap->msix_info[1].vec, &s->fw_evtq);
849 for_each_ethrxq(s, i)
404d9e3f 850 free_irq(adap->msix_info[msi_index++].vec, &s->ethrxq[i].rspq);
b8ff05a9 851 for_each_ofldrxq(s, i)
404d9e3f 852 free_irq(adap->msix_info[msi_index++].vec, &s->ofldrxq[i].rspq);
b8ff05a9 853 for_each_rdmarxq(s, i)
404d9e3f 854 free_irq(adap->msix_info[msi_index++].vec, &s->rdmarxq[i].rspq);
cf38be6d
HS
855 for_each_rdmaciq(s, i)
856 free_irq(adap->msix_info[msi_index++].vec, &s->rdmaciq[i].rspq);
b8ff05a9
DM
857}
858
671b0060 859/**
812034f1 860 * cxgb4_write_rss - write the RSS table for a given port
671b0060
DM
861 * @pi: the port
862 * @queues: array of queue indices for RSS
863 *
864 * Sets up the portion of the HW RSS table for the port's VI to distribute
865 * packets to the Rx queues in @queues.
c035e183 866 * Should never be called before setting up sge eth rx queues
671b0060 867 */
812034f1 868int cxgb4_write_rss(const struct port_info *pi, const u16 *queues)
671b0060
DM
869{
870 u16 *rss;
871 int i, err;
c035e183
HS
872 struct adapter *adapter = pi->adapter;
873 const struct sge_eth_rxq *rxq;
671b0060 874
c035e183 875 rxq = &adapter->sge.ethrxq[pi->first_qset];
671b0060
DM
876 rss = kmalloc(pi->rss_size * sizeof(u16), GFP_KERNEL);
877 if (!rss)
878 return -ENOMEM;
879
880 /* map the queue indices to queue ids */
881 for (i = 0; i < pi->rss_size; i++, queues++)
c035e183 882 rss[i] = rxq[*queues].rspq.abs_id;
671b0060 883
b2612722 884 err = t4_config_rss_range(adapter, adapter->pf, pi->viid, 0,
060e0c75 885 pi->rss_size, rss, pi->rss_size);
c035e183
HS
886 /* If Tunnel All Lookup isn't specified in the global RSS
887 * Configuration, then we need to specify a default Ingress
888 * Queue for any ingress packets which aren't hashed. We'll
889 * use our first ingress queue ...
890 */
891 if (!err)
892 err = t4_config_vi_rss(adapter, adapter->mbox, pi->viid,
893 FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F |
894 FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F |
895 FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F |
896 FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F |
897 FW_RSS_VI_CONFIG_CMD_UDPEN_F,
898 rss[0]);
671b0060
DM
899 kfree(rss);
900 return err;
901}
902
b8ff05a9
DM
903/**
904 * setup_rss - configure RSS
905 * @adap: the adapter
906 *
671b0060 907 * Sets up RSS for each port.
b8ff05a9
DM
908 */
909static int setup_rss(struct adapter *adap)
910{
c035e183 911 int i, j, err;
b8ff05a9
DM
912
913 for_each_port(adap, i) {
914 const struct port_info *pi = adap2pinfo(adap, i);
b8ff05a9 915
c035e183
HS
916 /* Fill default values with equal distribution */
917 for (j = 0; j < pi->rss_size; j++)
918 pi->rss[j] = j % pi->nqsets;
919
812034f1 920 err = cxgb4_write_rss(pi, pi->rss);
b8ff05a9
DM
921 if (err)
922 return err;
923 }
924 return 0;
925}
926
e46dab4d
DM
927/*
928 * Return the channel of the ingress queue with the given qid.
929 */
930static unsigned int rxq_to_chan(const struct sge *p, unsigned int qid)
931{
932 qid -= p->ingr_start;
933 return netdev2pinfo(p->ingr_map[qid]->netdev)->tx_chan;
934}
935
b8ff05a9
DM
936/*
937 * Wait until all NAPI handlers are descheduled.
938 */
939static void quiesce_rx(struct adapter *adap)
940{
941 int i;
942
4b8e27a8 943 for (i = 0; i < adap->sge.ingr_sz; i++) {
b8ff05a9
DM
944 struct sge_rspq *q = adap->sge.ingr_map[i];
945
3a336cb1 946 if (q && q->handler) {
b8ff05a9 947 napi_disable(&q->napi);
3a336cb1
HS
948 local_bh_disable();
949 while (!cxgb_poll_lock_napi(q))
950 mdelay(1);
951 local_bh_enable();
952 }
953
b8ff05a9
DM
954 }
955}
956
b37987e8
HS
957/* Disable interrupt and napi handler */
958static void disable_interrupts(struct adapter *adap)
959{
960 if (adap->flags & FULL_INIT_DONE) {
961 t4_intr_disable(adap);
962 if (adap->flags & USING_MSIX) {
963 free_msix_queue_irqs(adap);
964 free_irq(adap->msix_info[0].vec, adap);
965 } else {
966 free_irq(adap->pdev->irq, adap);
967 }
968 quiesce_rx(adap);
969 }
970}
971
b8ff05a9
DM
972/*
973 * Enable NAPI scheduling and interrupt generation for all Rx queues.
974 */
975static void enable_rx(struct adapter *adap)
976{
977 int i;
978
4b8e27a8 979 for (i = 0; i < adap->sge.ingr_sz; i++) {
b8ff05a9
DM
980 struct sge_rspq *q = adap->sge.ingr_map[i];
981
982 if (!q)
983 continue;
3a336cb1
HS
984 if (q->handler) {
985 cxgb_busy_poll_init_lock(q);
b8ff05a9 986 napi_enable(&q->napi);
3a336cb1 987 }
b8ff05a9 988 /* 0-increment GTS to start the timer and enable interrupts */
f612b815
HS
989 t4_write_reg(adap, MYPF_REG(SGE_PF_GTS_A),
990 SEINTARM_V(q->intr_params) |
991 INGRESSQID_V(q->cntxt_id));
b8ff05a9
DM
992 }
993}
994
1c6a5b0e
HS
995static int alloc_ofld_rxqs(struct adapter *adap, struct sge_ofld_rxq *q,
996 unsigned int nq, unsigned int per_chan, int msi_idx,
997 u16 *ids)
998{
999 int i, err;
1000
1001 for (i = 0; i < nq; i++, q++) {
1002 if (msi_idx > 0)
1003 msi_idx++;
1004 err = t4_sge_alloc_rxq(adap, &q->rspq, false,
1005 adap->port[i / per_chan],
1006 msi_idx, q->fl.size ? &q->fl : NULL,
145ef8a5 1007 uldrx_handler, 0);
1c6a5b0e
HS
1008 if (err)
1009 return err;
1010 memset(&q->stats, 0, sizeof(q->stats));
1011 if (ids)
1012 ids[i] = q->rspq.abs_id;
1013 }
1014 return 0;
1015}
1016
b8ff05a9
DM
1017/**
1018 * setup_sge_queues - configure SGE Tx/Rx/response queues
1019 * @adap: the adapter
1020 *
1021 * Determines how many sets of SGE queues to use and initializes them.
1022 * We support multiple queue sets per port if we have MSI-X, otherwise
1023 * just one queue set per port.
1024 */
1025static int setup_sge_queues(struct adapter *adap)
1026{
1027 int err, msi_idx, i, j;
1028 struct sge *s = &adap->sge;
1029
4b8e27a8
HS
1030 bitmap_zero(s->starving_fl, s->egr_sz);
1031 bitmap_zero(s->txq_maperr, s->egr_sz);
b8ff05a9
DM
1032
1033 if (adap->flags & USING_MSIX)
1034 msi_idx = 1; /* vector 0 is for non-queue interrupts */
1035 else {
1036 err = t4_sge_alloc_rxq(adap, &s->intrq, false, adap->port[0], 0,
145ef8a5 1037 NULL, NULL, -1);
b8ff05a9
DM
1038 if (err)
1039 return err;
1040 msi_idx = -((int)s->intrq.abs_id + 1);
1041 }
1042
4b8e27a8
HS
1043 /* NOTE: If you add/delete any Ingress/Egress Queue allocations in here,
1044 * don't forget to update the following which need to be
1045 * synchronized to and changes here.
1046 *
1047 * 1. The calculations of MAX_INGQ in cxgb4.h.
1048 *
1049 * 2. Update enable_msix/name_msix_vecs/request_msix_queue_irqs
1050 * to accommodate any new/deleted Ingress Queues
1051 * which need MSI-X Vectors.
1052 *
1053 * 3. Update sge_qinfo_show() to include information on the
1054 * new/deleted queues.
1055 */
b8ff05a9 1056 err = t4_sge_alloc_rxq(adap, &s->fw_evtq, true, adap->port[0],
145ef8a5 1057 msi_idx, NULL, fwevtq_handler, -1);
b8ff05a9
DM
1058 if (err) {
1059freeout: t4_free_sge_resources(adap);
1060 return err;
1061 }
1062
1063 for_each_port(adap, i) {
1064 struct net_device *dev = adap->port[i];
1065 struct port_info *pi = netdev_priv(dev);
1066 struct sge_eth_rxq *q = &s->ethrxq[pi->first_qset];
1067 struct sge_eth_txq *t = &s->ethtxq[pi->first_qset];
1068
1069 for (j = 0; j < pi->nqsets; j++, q++) {
1070 if (msi_idx > 0)
1071 msi_idx++;
1072 err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev,
1073 msi_idx, &q->fl,
145ef8a5
HS
1074 t4_ethrx_handler,
1075 t4_get_mps_bg_map(adap,
1076 pi->tx_chan));
b8ff05a9
DM
1077 if (err)
1078 goto freeout;
1079 q->rspq.idx = j;
1080 memset(&q->stats, 0, sizeof(q->stats));
1081 }
1082 for (j = 0; j < pi->nqsets; j++, t++) {
1083 err = t4_sge_alloc_eth_txq(adap, t, dev,
1084 netdev_get_tx_queue(dev, j),
1085 s->fw_evtq.cntxt_id);
1086 if (err)
1087 goto freeout;
1088 }
1089 }
1090
1091 j = s->ofldqsets / adap->params.nports; /* ofld queues per channel */
1092 for_each_ofldrxq(s, i) {
1c6a5b0e
HS
1093 err = t4_sge_alloc_ofld_txq(adap, &s->ofldtxq[i],
1094 adap->port[i / j],
b8ff05a9
DM
1095 s->fw_evtq.cntxt_id);
1096 if (err)
1097 goto freeout;
1098 }
1099
1c6a5b0e
HS
1100#define ALLOC_OFLD_RXQS(firstq, nq, per_chan, ids) do { \
1101 err = alloc_ofld_rxqs(adap, firstq, nq, per_chan, msi_idx, ids); \
1102 if (err) \
1103 goto freeout; \
1104 if (msi_idx > 0) \
1105 msi_idx += nq; \
1106} while (0)
b8ff05a9 1107
1c6a5b0e
HS
1108 ALLOC_OFLD_RXQS(s->ofldrxq, s->ofldqsets, j, s->ofld_rxq);
1109 ALLOC_OFLD_RXQS(s->rdmarxq, s->rdmaqs, 1, s->rdma_rxq);
f36e58e5
HS
1110 j = s->rdmaciqs / adap->params.nports; /* rdmaq queues per channel */
1111 ALLOC_OFLD_RXQS(s->rdmaciq, s->rdmaciqs, j, s->rdma_ciq);
b8ff05a9 1112
1c6a5b0e 1113#undef ALLOC_OFLD_RXQS
cf38be6d 1114
b8ff05a9
DM
1115 for_each_port(adap, i) {
1116 /*
1117 * Note that ->rdmarxq[i].rspq.cntxt_id below is 0 if we don't
1118 * have RDMA queues, and that's the right value.
1119 */
1120 err = t4_sge_alloc_ctrl_txq(adap, &s->ctrlq[i], adap->port[i],
1121 s->fw_evtq.cntxt_id,
1122 s->rdmarxq[i].rspq.cntxt_id);
1123 if (err)
1124 goto freeout;
1125 }
1126
9bb59b96 1127 t4_write_reg(adap, is_t4(adap->params.chip) ?
837e4a42
HS
1128 MPS_TRC_RSS_CONTROL_A :
1129 MPS_T5_TRC_RSS_CONTROL_A,
1130 RSSCONTROL_V(netdev2pinfo(adap->port[0])->tx_chan) |
1131 QUEUENUMBER_V(s->ethrxq[0].rspq.abs_id));
b8ff05a9
DM
1132 return 0;
1133}
1134
b8ff05a9
DM
1135/*
1136 * Allocate a chunk of memory using kmalloc or, if that fails, vmalloc.
1137 * The allocated memory is cleared.
1138 */
1139void *t4_alloc_mem(size_t size)
1140{
8be04b93 1141 void *p = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
b8ff05a9
DM
1142
1143 if (!p)
89bf67f1 1144 p = vzalloc(size);
b8ff05a9
DM
1145 return p;
1146}
1147
1148/*
1149 * Free memory allocated through alloc_mem().
1150 */
fd88b31a 1151void t4_free_mem(void *addr)
b8ff05a9
DM
1152{
1153 if (is_vmalloc_addr(addr))
1154 vfree(addr);
1155 else
1156 kfree(addr);
1157}
1158
f2b7e78d
VP
1159/* Send a Work Request to write the filter at a specified index. We construct
1160 * a Firmware Filter Work Request to have the work done and put the indicated
1161 * filter into "pending" mode which will prevent any further actions against
1162 * it till we get a reply from the firmware on the completion status of the
1163 * request.
1164 */
1165static int set_filter_wr(struct adapter *adapter, int fidx)
1166{
1167 struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
1168 struct sk_buff *skb;
1169 struct fw_filter_wr *fwr;
1170 unsigned int ftid;
1171
f72f116a
MH
1172 skb = alloc_skb(sizeof(*fwr), GFP_KERNEL);
1173 if (!skb)
1174 return -ENOMEM;
1175
f2b7e78d
VP
1176 /* If the new filter requires loopback Destination MAC and/or VLAN
1177 * rewriting then we need to allocate a Layer 2 Table (L2T) entry for
1178 * the filter.
1179 */
1180 if (f->fs.newdmac || f->fs.newvlan) {
1181 /* allocate L2T entry for new filter */
1182 f->l2t = t4_l2t_alloc_switching(adapter->l2t);
f72f116a
MH
1183 if (f->l2t == NULL) {
1184 kfree_skb(skb);
f2b7e78d 1185 return -EAGAIN;
f72f116a 1186 }
f2b7e78d
VP
1187 if (t4_l2t_set_switching(adapter, f->l2t, f->fs.vlan,
1188 f->fs.eport, f->fs.dmac)) {
1189 cxgb4_l2t_release(f->l2t);
1190 f->l2t = NULL;
f72f116a 1191 kfree_skb(skb);
f2b7e78d
VP
1192 return -ENOMEM;
1193 }
1194 }
1195
1196 ftid = adapter->tids.ftid_base + fidx;
1197
f2b7e78d
VP
1198 fwr = (struct fw_filter_wr *)__skb_put(skb, sizeof(*fwr));
1199 memset(fwr, 0, sizeof(*fwr));
1200
1201 /* It would be nice to put most of the following in t4_hw.c but most
1202 * of the work is translating the cxgbtool ch_filter_specification
1203 * into the Work Request and the definition of that structure is
1204 * currently in cxgbtool.h which isn't appropriate to pull into the
1205 * common code. We may eventually try to come up with a more neutral
1206 * filter specification structure but for now it's easiest to simply
1207 * put this fairly direct code in line ...
1208 */
e2ac9628
HS
1209 fwr->op_pkd = htonl(FW_WR_OP_V(FW_FILTER_WR));
1210 fwr->len16_pkd = htonl(FW_WR_LEN16_V(sizeof(*fwr)/16));
f2b7e78d 1211 fwr->tid_to_iq =
77a80e23
HS
1212 htonl(FW_FILTER_WR_TID_V(ftid) |
1213 FW_FILTER_WR_RQTYPE_V(f->fs.type) |
1214 FW_FILTER_WR_NOREPLY_V(0) |
1215 FW_FILTER_WR_IQ_V(f->fs.iq));
f2b7e78d 1216 fwr->del_filter_to_l2tix =
77a80e23
HS
1217 htonl(FW_FILTER_WR_RPTTID_V(f->fs.rpttid) |
1218 FW_FILTER_WR_DROP_V(f->fs.action == FILTER_DROP) |
1219 FW_FILTER_WR_DIRSTEER_V(f->fs.dirsteer) |
1220 FW_FILTER_WR_MASKHASH_V(f->fs.maskhash) |
1221 FW_FILTER_WR_DIRSTEERHASH_V(f->fs.dirsteerhash) |
1222 FW_FILTER_WR_LPBK_V(f->fs.action == FILTER_SWITCH) |
1223 FW_FILTER_WR_DMAC_V(f->fs.newdmac) |
1224 FW_FILTER_WR_SMAC_V(f->fs.newsmac) |
1225 FW_FILTER_WR_INSVLAN_V(f->fs.newvlan == VLAN_INSERT ||
f2b7e78d 1226 f->fs.newvlan == VLAN_REWRITE) |
77a80e23 1227 FW_FILTER_WR_RMVLAN_V(f->fs.newvlan == VLAN_REMOVE ||
f2b7e78d 1228 f->fs.newvlan == VLAN_REWRITE) |
77a80e23
HS
1229 FW_FILTER_WR_HITCNTS_V(f->fs.hitcnts) |
1230 FW_FILTER_WR_TXCHAN_V(f->fs.eport) |
1231 FW_FILTER_WR_PRIO_V(f->fs.prio) |
1232 FW_FILTER_WR_L2TIX_V(f->l2t ? f->l2t->idx : 0));
f2b7e78d
VP
1233 fwr->ethtype = htons(f->fs.val.ethtype);
1234 fwr->ethtypem = htons(f->fs.mask.ethtype);
1235 fwr->frag_to_ovlan_vldm =
77a80e23
HS
1236 (FW_FILTER_WR_FRAG_V(f->fs.val.frag) |
1237 FW_FILTER_WR_FRAGM_V(f->fs.mask.frag) |
1238 FW_FILTER_WR_IVLAN_VLD_V(f->fs.val.ivlan_vld) |
1239 FW_FILTER_WR_OVLAN_VLD_V(f->fs.val.ovlan_vld) |
1240 FW_FILTER_WR_IVLAN_VLDM_V(f->fs.mask.ivlan_vld) |
1241 FW_FILTER_WR_OVLAN_VLDM_V(f->fs.mask.ovlan_vld));
f2b7e78d
VP
1242 fwr->smac_sel = 0;
1243 fwr->rx_chan_rx_rpl_iq =
77a80e23
HS
1244 htons(FW_FILTER_WR_RX_CHAN_V(0) |
1245 FW_FILTER_WR_RX_RPL_IQ_V(adapter->sge.fw_evtq.abs_id));
f2b7e78d 1246 fwr->maci_to_matchtypem =
77a80e23
HS
1247 htonl(FW_FILTER_WR_MACI_V(f->fs.val.macidx) |
1248 FW_FILTER_WR_MACIM_V(f->fs.mask.macidx) |
1249 FW_FILTER_WR_FCOE_V(f->fs.val.fcoe) |
1250 FW_FILTER_WR_FCOEM_V(f->fs.mask.fcoe) |
1251 FW_FILTER_WR_PORT_V(f->fs.val.iport) |
1252 FW_FILTER_WR_PORTM_V(f->fs.mask.iport) |
1253 FW_FILTER_WR_MATCHTYPE_V(f->fs.val.matchtype) |
1254 FW_FILTER_WR_MATCHTYPEM_V(f->fs.mask.matchtype));
f2b7e78d
VP
1255 fwr->ptcl = f->fs.val.proto;
1256 fwr->ptclm = f->fs.mask.proto;
1257 fwr->ttyp = f->fs.val.tos;
1258 fwr->ttypm = f->fs.mask.tos;
1259 fwr->ivlan = htons(f->fs.val.ivlan);
1260 fwr->ivlanm = htons(f->fs.mask.ivlan);
1261 fwr->ovlan = htons(f->fs.val.ovlan);
1262 fwr->ovlanm = htons(f->fs.mask.ovlan);
1263 memcpy(fwr->lip, f->fs.val.lip, sizeof(fwr->lip));
1264 memcpy(fwr->lipm, f->fs.mask.lip, sizeof(fwr->lipm));
1265 memcpy(fwr->fip, f->fs.val.fip, sizeof(fwr->fip));
1266 memcpy(fwr->fipm, f->fs.mask.fip, sizeof(fwr->fipm));
1267 fwr->lp = htons(f->fs.val.lport);
1268 fwr->lpm = htons(f->fs.mask.lport);
1269 fwr->fp = htons(f->fs.val.fport);
1270 fwr->fpm = htons(f->fs.mask.fport);
1271 if (f->fs.newsmac)
1272 memcpy(fwr->sma, f->fs.smac, sizeof(fwr->sma));
1273
1274 /* Mark the filter as "pending" and ship off the Filter Work Request.
1275 * When we get the Work Request Reply we'll clear the pending status.
1276 */
1277 f->pending = 1;
1278 set_wr_txq(skb, CPL_PRIORITY_CONTROL, f->fs.val.iport & 0x3);
1279 t4_ofld_send(adapter, skb);
1280 return 0;
1281}
1282
1283/* Delete the filter at a specified index.
1284 */
1285static int del_filter_wr(struct adapter *adapter, int fidx)
1286{
1287 struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
1288 struct sk_buff *skb;
1289 struct fw_filter_wr *fwr;
1290 unsigned int len, ftid;
1291
1292 len = sizeof(*fwr);
1293 ftid = adapter->tids.ftid_base + fidx;
1294
f72f116a
MH
1295 skb = alloc_skb(len, GFP_KERNEL);
1296 if (!skb)
1297 return -ENOMEM;
1298
f2b7e78d
VP
1299 fwr = (struct fw_filter_wr *)__skb_put(skb, len);
1300 t4_mk_filtdelwr(ftid, fwr, adapter->sge.fw_evtq.abs_id);
1301
1302 /* Mark the filter as "pending" and ship off the Filter Work Request.
1303 * When we get the Work Request Reply we'll clear the pending status.
1304 */
1305 f->pending = 1;
1306 t4_mgmt_tx(adapter, skb);
1307 return 0;
1308}
1309
688848b1
AB
1310static u16 cxgb_select_queue(struct net_device *dev, struct sk_buff *skb,
1311 void *accel_priv, select_queue_fallback_t fallback)
1312{
1313 int txq;
1314
1315#ifdef CONFIG_CHELSIO_T4_DCB
1316 /* If a Data Center Bridging has been successfully negotiated on this
1317 * link then we'll use the skb's priority to map it to a TX Queue.
1318 * The skb's priority is determined via the VLAN Tag Priority Code
1319 * Point field.
1320 */
1321 if (cxgb4_dcb_enabled(dev)) {
1322 u16 vlan_tci;
1323 int err;
1324
1325 err = vlan_get_tag(skb, &vlan_tci);
1326 if (unlikely(err)) {
1327 if (net_ratelimit())
1328 netdev_warn(dev,
1329 "TX Packet without VLAN Tag on DCB Link\n");
1330 txq = 0;
1331 } else {
1332 txq = (vlan_tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
84a200b3
VP
1333#ifdef CONFIG_CHELSIO_T4_FCOE
1334 if (skb->protocol == htons(ETH_P_FCOE))
1335 txq = skb->priority & 0x7;
1336#endif /* CONFIG_CHELSIO_T4_FCOE */
688848b1
AB
1337 }
1338 return txq;
1339 }
1340#endif /* CONFIG_CHELSIO_T4_DCB */
1341
1342 if (select_queue) {
1343 txq = (skb_rx_queue_recorded(skb)
1344 ? skb_get_rx_queue(skb)
1345 : smp_processor_id());
1346
1347 while (unlikely(txq >= dev->real_num_tx_queues))
1348 txq -= dev->real_num_tx_queues;
1349
1350 return txq;
1351 }
1352
1353 return fallback(dev, skb) % dev->real_num_tx_queues;
1354}
1355
b8ff05a9
DM
1356static inline int is_offload(const struct adapter *adap)
1357{
1358 return adap->params.offload;
1359}
1360
b8ff05a9
DM
1361static int closest_timer(const struct sge *s, int time)
1362{
1363 int i, delta, match = 0, min_delta = INT_MAX;
1364
1365 for (i = 0; i < ARRAY_SIZE(s->timer_val); i++) {
1366 delta = time - s->timer_val[i];
1367 if (delta < 0)
1368 delta = -delta;
1369 if (delta < min_delta) {
1370 min_delta = delta;
1371 match = i;
1372 }
1373 }
1374 return match;
1375}
1376
1377static int closest_thres(const struct sge *s, int thres)
1378{
1379 int i, delta, match = 0, min_delta = INT_MAX;
1380
1381 for (i = 0; i < ARRAY_SIZE(s->counter_val); i++) {
1382 delta = thres - s->counter_val[i];
1383 if (delta < 0)
1384 delta = -delta;
1385 if (delta < min_delta) {
1386 min_delta = delta;
1387 match = i;
1388 }
1389 }
1390 return match;
1391}
1392
b8ff05a9 1393/**
812034f1 1394 * cxgb4_set_rspq_intr_params - set a queue's interrupt holdoff parameters
b8ff05a9
DM
1395 * @q: the Rx queue
1396 * @us: the hold-off time in us, or 0 to disable timer
1397 * @cnt: the hold-off packet count, or 0 to disable counter
1398 *
1399 * Sets an Rx queue's interrupt hold-off time and packet count. At least
1400 * one of the two needs to be enabled for the queue to generate interrupts.
1401 */
812034f1
HS
1402int cxgb4_set_rspq_intr_params(struct sge_rspq *q,
1403 unsigned int us, unsigned int cnt)
b8ff05a9 1404{
c887ad0e
HS
1405 struct adapter *adap = q->adap;
1406
b8ff05a9
DM
1407 if ((us | cnt) == 0)
1408 cnt = 1;
1409
1410 if (cnt) {
1411 int err;
1412 u32 v, new_idx;
1413
1414 new_idx = closest_thres(&adap->sge, cnt);
1415 if (q->desc && q->pktcnt_idx != new_idx) {
1416 /* the queue has already been created, update it */
5167865a
HS
1417 v = FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
1418 FW_PARAMS_PARAM_X_V(
1419 FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH) |
1420 FW_PARAMS_PARAM_YZ_V(q->cntxt_id);
b2612722
HS
1421 err = t4_set_params(adap, adap->mbox, adap->pf, 0, 1,
1422 &v, &new_idx);
b8ff05a9
DM
1423 if (err)
1424 return err;
1425 }
1426 q->pktcnt_idx = new_idx;
1427 }
1428
1429 us = us == 0 ? 6 : closest_timer(&adap->sge, us);
1ecc7b7a 1430 q->intr_params = QINTR_TIMER_IDX_V(us) | QINTR_CNT_EN_V(cnt > 0);
b8ff05a9
DM
1431 return 0;
1432}
1433
c8f44aff 1434static int cxgb_set_features(struct net_device *dev, netdev_features_t features)
87b6cf51 1435{
2ed28baa 1436 const struct port_info *pi = netdev_priv(dev);
c8f44aff 1437 netdev_features_t changed = dev->features ^ features;
19ecae2c 1438 int err;
19ecae2c 1439
f646968f 1440 if (!(changed & NETIF_F_HW_VLAN_CTAG_RX))
2ed28baa 1441 return 0;
19ecae2c 1442
b2612722 1443 err = t4_set_rxmode(pi->adapter, pi->adapter->pf, pi->viid, -1,
2ed28baa 1444 -1, -1, -1,
f646968f 1445 !!(features & NETIF_F_HW_VLAN_CTAG_RX), true);
2ed28baa 1446 if (unlikely(err))
f646968f 1447 dev->features = features ^ NETIF_F_HW_VLAN_CTAG_RX;
19ecae2c 1448 return err;
87b6cf51
DM
1449}
1450
91744948 1451static int setup_debugfs(struct adapter *adap)
b8ff05a9 1452{
b8ff05a9
DM
1453 if (IS_ERR_OR_NULL(adap->debugfs_root))
1454 return -1;
1455
fd88b31a
HS
1456#ifdef CONFIG_DEBUG_FS
1457 t4_setup_debugfs(adap);
1458#endif
b8ff05a9
DM
1459 return 0;
1460}
1461
1462/*
1463 * upper-layer driver support
1464 */
1465
1466/*
1467 * Allocate an active-open TID and set it to the supplied value.
1468 */
1469int cxgb4_alloc_atid(struct tid_info *t, void *data)
1470{
1471 int atid = -1;
1472
1473 spin_lock_bh(&t->atid_lock);
1474 if (t->afree) {
1475 union aopen_entry *p = t->afree;
1476
f2b7e78d 1477 atid = (p - t->atid_tab) + t->atid_base;
b8ff05a9
DM
1478 t->afree = p->next;
1479 p->data = data;
1480 t->atids_in_use++;
1481 }
1482 spin_unlock_bh(&t->atid_lock);
1483 return atid;
1484}
1485EXPORT_SYMBOL(cxgb4_alloc_atid);
1486
1487/*
1488 * Release an active-open TID.
1489 */
1490void cxgb4_free_atid(struct tid_info *t, unsigned int atid)
1491{
f2b7e78d 1492 union aopen_entry *p = &t->atid_tab[atid - t->atid_base];
b8ff05a9
DM
1493
1494 spin_lock_bh(&t->atid_lock);
1495 p->next = t->afree;
1496 t->afree = p;
1497 t->atids_in_use--;
1498 spin_unlock_bh(&t->atid_lock);
1499}
1500EXPORT_SYMBOL(cxgb4_free_atid);
1501
1502/*
1503 * Allocate a server TID and set it to the supplied value.
1504 */
1505int cxgb4_alloc_stid(struct tid_info *t, int family, void *data)
1506{
1507 int stid;
1508
1509 spin_lock_bh(&t->stid_lock);
1510 if (family == PF_INET) {
1511 stid = find_first_zero_bit(t->stid_bmap, t->nstids);
1512 if (stid < t->nstids)
1513 __set_bit(stid, t->stid_bmap);
1514 else
1515 stid = -1;
1516 } else {
1517 stid = bitmap_find_free_region(t->stid_bmap, t->nstids, 2);
1518 if (stid < 0)
1519 stid = -1;
1520 }
1521 if (stid >= 0) {
1522 t->stid_tab[stid].data = data;
1523 stid += t->stid_base;
15f63b74
KS
1524 /* IPv6 requires max of 520 bits or 16 cells in TCAM
1525 * This is equivalent to 4 TIDs. With CLIP enabled it
1526 * needs 2 TIDs.
1527 */
1528 if (family == PF_INET)
1529 t->stids_in_use++;
1530 else
1531 t->stids_in_use += 4;
b8ff05a9
DM
1532 }
1533 spin_unlock_bh(&t->stid_lock);
1534 return stid;
1535}
1536EXPORT_SYMBOL(cxgb4_alloc_stid);
1537
dca4faeb
VP
1538/* Allocate a server filter TID and set it to the supplied value.
1539 */
1540int cxgb4_alloc_sftid(struct tid_info *t, int family, void *data)
1541{
1542 int stid;
1543
1544 spin_lock_bh(&t->stid_lock);
1545 if (family == PF_INET) {
1546 stid = find_next_zero_bit(t->stid_bmap,
1547 t->nstids + t->nsftids, t->nstids);
1548 if (stid < (t->nstids + t->nsftids))
1549 __set_bit(stid, t->stid_bmap);
1550 else
1551 stid = -1;
1552 } else {
1553 stid = -1;
1554 }
1555 if (stid >= 0) {
1556 t->stid_tab[stid].data = data;
470c60c4
KS
1557 stid -= t->nstids;
1558 stid += t->sftid_base;
dca4faeb
VP
1559 t->stids_in_use++;
1560 }
1561 spin_unlock_bh(&t->stid_lock);
1562 return stid;
1563}
1564EXPORT_SYMBOL(cxgb4_alloc_sftid);
1565
1566/* Release a server TID.
b8ff05a9
DM
1567 */
1568void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family)
1569{
470c60c4
KS
1570 /* Is it a server filter TID? */
1571 if (t->nsftids && (stid >= t->sftid_base)) {
1572 stid -= t->sftid_base;
1573 stid += t->nstids;
1574 } else {
1575 stid -= t->stid_base;
1576 }
1577
b8ff05a9
DM
1578 spin_lock_bh(&t->stid_lock);
1579 if (family == PF_INET)
1580 __clear_bit(stid, t->stid_bmap);
1581 else
1582 bitmap_release_region(t->stid_bmap, stid, 2);
1583 t->stid_tab[stid].data = NULL;
15f63b74
KS
1584 if (family == PF_INET)
1585 t->stids_in_use--;
1586 else
1587 t->stids_in_use -= 4;
b8ff05a9
DM
1588 spin_unlock_bh(&t->stid_lock);
1589}
1590EXPORT_SYMBOL(cxgb4_free_stid);
1591
1592/*
1593 * Populate a TID_RELEASE WR. Caller must properly size the skb.
1594 */
1595static void mk_tid_release(struct sk_buff *skb, unsigned int chan,
1596 unsigned int tid)
1597{
1598 struct cpl_tid_release *req;
1599
1600 set_wr_txq(skb, CPL_PRIORITY_SETUP, chan);
1601 req = (struct cpl_tid_release *)__skb_put(skb, sizeof(*req));
1602 INIT_TP_WR(req, tid);
1603 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, tid));
1604}
1605
1606/*
1607 * Queue a TID release request and if necessary schedule a work queue to
1608 * process it.
1609 */
31b9c19b 1610static void cxgb4_queue_tid_release(struct tid_info *t, unsigned int chan,
1611 unsigned int tid)
b8ff05a9
DM
1612{
1613 void **p = &t->tid_tab[tid];
1614 struct adapter *adap = container_of(t, struct adapter, tids);
1615
1616 spin_lock_bh(&adap->tid_release_lock);
1617 *p = adap->tid_release_head;
1618 /* Low 2 bits encode the Tx channel number */
1619 adap->tid_release_head = (void **)((uintptr_t)p | chan);
1620 if (!adap->tid_release_task_busy) {
1621 adap->tid_release_task_busy = true;
29aaee65 1622 queue_work(adap->workq, &adap->tid_release_task);
b8ff05a9
DM
1623 }
1624 spin_unlock_bh(&adap->tid_release_lock);
1625}
b8ff05a9
DM
1626
1627/*
1628 * Process the list of pending TID release requests.
1629 */
1630static void process_tid_release_list(struct work_struct *work)
1631{
1632 struct sk_buff *skb;
1633 struct adapter *adap;
1634
1635 adap = container_of(work, struct adapter, tid_release_task);
1636
1637 spin_lock_bh(&adap->tid_release_lock);
1638 while (adap->tid_release_head) {
1639 void **p = adap->tid_release_head;
1640 unsigned int chan = (uintptr_t)p & 3;
1641 p = (void *)p - chan;
1642
1643 adap->tid_release_head = *p;
1644 *p = NULL;
1645 spin_unlock_bh(&adap->tid_release_lock);
1646
1647 while (!(skb = alloc_skb(sizeof(struct cpl_tid_release),
1648 GFP_KERNEL)))
1649 schedule_timeout_uninterruptible(1);
1650
1651 mk_tid_release(skb, chan, p - adap->tids.tid_tab);
1652 t4_ofld_send(adap, skb);
1653 spin_lock_bh(&adap->tid_release_lock);
1654 }
1655 adap->tid_release_task_busy = false;
1656 spin_unlock_bh(&adap->tid_release_lock);
1657}
1658
1659/*
1660 * Release a TID and inform HW. If we are unable to allocate the release
1661 * message we defer to a work queue.
1662 */
1663void cxgb4_remove_tid(struct tid_info *t, unsigned int chan, unsigned int tid)
1664{
1665 void *old;
1666 struct sk_buff *skb;
1667 struct adapter *adap = container_of(t, struct adapter, tids);
1668
1669 old = t->tid_tab[tid];
1670 skb = alloc_skb(sizeof(struct cpl_tid_release), GFP_ATOMIC);
1671 if (likely(skb)) {
1672 t->tid_tab[tid] = NULL;
1673 mk_tid_release(skb, chan, tid);
1674 t4_ofld_send(adap, skb);
1675 } else
1676 cxgb4_queue_tid_release(t, chan, tid);
1677 if (old)
1678 atomic_dec(&t->tids_in_use);
1679}
1680EXPORT_SYMBOL(cxgb4_remove_tid);
1681
1682/*
1683 * Allocate and initialize the TID tables. Returns 0 on success.
1684 */
1685static int tid_init(struct tid_info *t)
1686{
1687 size_t size;
f2b7e78d 1688 unsigned int stid_bmap_size;
b8ff05a9 1689 unsigned int natids = t->natids;
b6f8eaec 1690 struct adapter *adap = container_of(t, struct adapter, tids);
b8ff05a9 1691
dca4faeb 1692 stid_bmap_size = BITS_TO_LONGS(t->nstids + t->nsftids);
f2b7e78d
VP
1693 size = t->ntids * sizeof(*t->tid_tab) +
1694 natids * sizeof(*t->atid_tab) +
b8ff05a9 1695 t->nstids * sizeof(*t->stid_tab) +
dca4faeb 1696 t->nsftids * sizeof(*t->stid_tab) +
f2b7e78d 1697 stid_bmap_size * sizeof(long) +
dca4faeb
VP
1698 t->nftids * sizeof(*t->ftid_tab) +
1699 t->nsftids * sizeof(*t->ftid_tab);
f2b7e78d 1700
b8ff05a9
DM
1701 t->tid_tab = t4_alloc_mem(size);
1702 if (!t->tid_tab)
1703 return -ENOMEM;
1704
1705 t->atid_tab = (union aopen_entry *)&t->tid_tab[t->ntids];
1706 t->stid_tab = (struct serv_entry *)&t->atid_tab[natids];
dca4faeb 1707 t->stid_bmap = (unsigned long *)&t->stid_tab[t->nstids + t->nsftids];
f2b7e78d 1708 t->ftid_tab = (struct filter_entry *)&t->stid_bmap[stid_bmap_size];
b8ff05a9
DM
1709 spin_lock_init(&t->stid_lock);
1710 spin_lock_init(&t->atid_lock);
1711
1712 t->stids_in_use = 0;
1713 t->afree = NULL;
1714 t->atids_in_use = 0;
1715 atomic_set(&t->tids_in_use, 0);
1716
1717 /* Setup the free list for atid_tab and clear the stid bitmap. */
1718 if (natids) {
1719 while (--natids)
1720 t->atid_tab[natids - 1].next = &t->atid_tab[natids];
1721 t->afree = t->atid_tab;
1722 }
dca4faeb 1723 bitmap_zero(t->stid_bmap, t->nstids + t->nsftids);
b6f8eaec
KS
1724 /* Reserve stid 0 for T4/T5 adapters */
1725 if (!t->stid_base &&
3ccc6cf7 1726 (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5))
b6f8eaec
KS
1727 __set_bit(0, t->stid_bmap);
1728
b8ff05a9
DM
1729 return 0;
1730}
1731
1732/**
1733 * cxgb4_create_server - create an IP server
1734 * @dev: the device
1735 * @stid: the server TID
1736 * @sip: local IP address to bind server to
1737 * @sport: the server's TCP port
1738 * @queue: queue to direct messages from this server to
1739 *
1740 * Create an IP server for the given port and address.
1741 * Returns <0 on error and one of the %NET_XMIT_* values on success.
1742 */
1743int cxgb4_create_server(const struct net_device *dev, unsigned int stid,
793dad94
VP
1744 __be32 sip, __be16 sport, __be16 vlan,
1745 unsigned int queue)
b8ff05a9
DM
1746{
1747 unsigned int chan;
1748 struct sk_buff *skb;
1749 struct adapter *adap;
1750 struct cpl_pass_open_req *req;
80f40c1f 1751 int ret;
b8ff05a9
DM
1752
1753 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
1754 if (!skb)
1755 return -ENOMEM;
1756
1757 adap = netdev2adap(dev);
1758 req = (struct cpl_pass_open_req *)__skb_put(skb, sizeof(*req));
1759 INIT_TP_WR(req, 0);
1760 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ, stid));
1761 req->local_port = sport;
1762 req->peer_port = htons(0);
1763 req->local_ip = sip;
1764 req->peer_ip = htonl(0);
e46dab4d 1765 chan = rxq_to_chan(&adap->sge, queue);
d7990b0c 1766 req->opt0 = cpu_to_be64(TX_CHAN_V(chan));
6c53e938
HS
1767 req->opt1 = cpu_to_be64(CONN_POLICY_V(CPL_CONN_POLICY_ASK) |
1768 SYN_RSS_ENABLE_F | SYN_RSS_QUEUE_V(queue));
80f40c1f
VP
1769 ret = t4_mgmt_tx(adap, skb);
1770 return net_xmit_eval(ret);
b8ff05a9
DM
1771}
1772EXPORT_SYMBOL(cxgb4_create_server);
1773
80f40c1f
VP
1774/* cxgb4_create_server6 - create an IPv6 server
1775 * @dev: the device
1776 * @stid: the server TID
1777 * @sip: local IPv6 address to bind server to
1778 * @sport: the server's TCP port
1779 * @queue: queue to direct messages from this server to
1780 *
1781 * Create an IPv6 server for the given port and address.
1782 * Returns <0 on error and one of the %NET_XMIT_* values on success.
1783 */
1784int cxgb4_create_server6(const struct net_device *dev, unsigned int stid,
1785 const struct in6_addr *sip, __be16 sport,
1786 unsigned int queue)
1787{
1788 unsigned int chan;
1789 struct sk_buff *skb;
1790 struct adapter *adap;
1791 struct cpl_pass_open_req6 *req;
1792 int ret;
1793
1794 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
1795 if (!skb)
1796 return -ENOMEM;
1797
1798 adap = netdev2adap(dev);
1799 req = (struct cpl_pass_open_req6 *)__skb_put(skb, sizeof(*req));
1800 INIT_TP_WR(req, 0);
1801 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ6, stid));
1802 req->local_port = sport;
1803 req->peer_port = htons(0);
1804 req->local_ip_hi = *(__be64 *)(sip->s6_addr);
1805 req->local_ip_lo = *(__be64 *)(sip->s6_addr + 8);
1806 req->peer_ip_hi = cpu_to_be64(0);
1807 req->peer_ip_lo = cpu_to_be64(0);
1808 chan = rxq_to_chan(&adap->sge, queue);
d7990b0c 1809 req->opt0 = cpu_to_be64(TX_CHAN_V(chan));
6c53e938
HS
1810 req->opt1 = cpu_to_be64(CONN_POLICY_V(CPL_CONN_POLICY_ASK) |
1811 SYN_RSS_ENABLE_F | SYN_RSS_QUEUE_V(queue));
80f40c1f
VP
1812 ret = t4_mgmt_tx(adap, skb);
1813 return net_xmit_eval(ret);
1814}
1815EXPORT_SYMBOL(cxgb4_create_server6);
1816
1817int cxgb4_remove_server(const struct net_device *dev, unsigned int stid,
1818 unsigned int queue, bool ipv6)
1819{
1820 struct sk_buff *skb;
1821 struct adapter *adap;
1822 struct cpl_close_listsvr_req *req;
1823 int ret;
1824
1825 adap = netdev2adap(dev);
1826
1827 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
1828 if (!skb)
1829 return -ENOMEM;
1830
1831 req = (struct cpl_close_listsvr_req *)__skb_put(skb, sizeof(*req));
1832 INIT_TP_WR(req, 0);
1833 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ, stid));
bdc590b9
HS
1834 req->reply_ctrl = htons(NO_REPLY_V(0) | (ipv6 ? LISTSVR_IPV6_V(1) :
1835 LISTSVR_IPV6_V(0)) | QUEUENO_V(queue));
80f40c1f
VP
1836 ret = t4_mgmt_tx(adap, skb);
1837 return net_xmit_eval(ret);
1838}
1839EXPORT_SYMBOL(cxgb4_remove_server);
1840
b8ff05a9
DM
1841/**
1842 * cxgb4_best_mtu - find the entry in the MTU table closest to an MTU
1843 * @mtus: the HW MTU table
1844 * @mtu: the target MTU
1845 * @idx: index of selected entry in the MTU table
1846 *
1847 * Returns the index and the value in the HW MTU table that is closest to
1848 * but does not exceed @mtu, unless @mtu is smaller than any value in the
1849 * table, in which case that smallest available value is selected.
1850 */
1851unsigned int cxgb4_best_mtu(const unsigned short *mtus, unsigned short mtu,
1852 unsigned int *idx)
1853{
1854 unsigned int i = 0;
1855
1856 while (i < NMTUS - 1 && mtus[i + 1] <= mtu)
1857 ++i;
1858 if (idx)
1859 *idx = i;
1860 return mtus[i];
1861}
1862EXPORT_SYMBOL(cxgb4_best_mtu);
1863
92e7ae71
HS
1864/**
1865 * cxgb4_best_aligned_mtu - find best MTU, [hopefully] data size aligned
1866 * @mtus: the HW MTU table
1867 * @header_size: Header Size
1868 * @data_size_max: maximum Data Segment Size
1869 * @data_size_align: desired Data Segment Size Alignment (2^N)
1870 * @mtu_idxp: HW MTU Table Index return value pointer (possibly NULL)
1871 *
1872 * Similar to cxgb4_best_mtu() but instead of searching the Hardware
1873 * MTU Table based solely on a Maximum MTU parameter, we break that
1874 * parameter up into a Header Size and Maximum Data Segment Size, and
1875 * provide a desired Data Segment Size Alignment. If we find an MTU in
1876 * the Hardware MTU Table which will result in a Data Segment Size with
1877 * the requested alignment _and_ that MTU isn't "too far" from the
1878 * closest MTU, then we'll return that rather than the closest MTU.
1879 */
1880unsigned int cxgb4_best_aligned_mtu(const unsigned short *mtus,
1881 unsigned short header_size,
1882 unsigned short data_size_max,
1883 unsigned short data_size_align,
1884 unsigned int *mtu_idxp)
1885{
1886 unsigned short max_mtu = header_size + data_size_max;
1887 unsigned short data_size_align_mask = data_size_align - 1;
1888 int mtu_idx, aligned_mtu_idx;
1889
1890 /* Scan the MTU Table till we find an MTU which is larger than our
1891 * Maximum MTU or we reach the end of the table. Along the way,
1892 * record the last MTU found, if any, which will result in a Data
1893 * Segment Length matching the requested alignment.
1894 */
1895 for (mtu_idx = 0, aligned_mtu_idx = -1; mtu_idx < NMTUS; mtu_idx++) {
1896 unsigned short data_size = mtus[mtu_idx] - header_size;
1897
1898 /* If this MTU minus the Header Size would result in a
1899 * Data Segment Size of the desired alignment, remember it.
1900 */
1901 if ((data_size & data_size_align_mask) == 0)
1902 aligned_mtu_idx = mtu_idx;
1903
1904 /* If we're not at the end of the Hardware MTU Table and the
1905 * next element is larger than our Maximum MTU, drop out of
1906 * the loop.
1907 */
1908 if (mtu_idx+1 < NMTUS && mtus[mtu_idx+1] > max_mtu)
1909 break;
1910 }
1911
1912 /* If we fell out of the loop because we ran to the end of the table,
1913 * then we just have to use the last [largest] entry.
1914 */
1915 if (mtu_idx == NMTUS)
1916 mtu_idx--;
1917
1918 /* If we found an MTU which resulted in the requested Data Segment
1919 * Length alignment and that's "not far" from the largest MTU which is
1920 * less than or equal to the maximum MTU, then use that.
1921 */
1922 if (aligned_mtu_idx >= 0 &&
1923 mtu_idx - aligned_mtu_idx <= 1)
1924 mtu_idx = aligned_mtu_idx;
1925
1926 /* If the caller has passed in an MTU Index pointer, pass the
1927 * MTU Index back. Return the MTU value.
1928 */
1929 if (mtu_idxp)
1930 *mtu_idxp = mtu_idx;
1931 return mtus[mtu_idx];
1932}
1933EXPORT_SYMBOL(cxgb4_best_aligned_mtu);
1934
b8ff05a9
DM
1935/**
1936 * cxgb4_port_chan - get the HW channel of a port
1937 * @dev: the net device for the port
1938 *
1939 * Return the HW Tx channel of the given port.
1940 */
1941unsigned int cxgb4_port_chan(const struct net_device *dev)
1942{
1943 return netdev2pinfo(dev)->tx_chan;
1944}
1945EXPORT_SYMBOL(cxgb4_port_chan);
1946
881806bc
VP
1947unsigned int cxgb4_dbfifo_count(const struct net_device *dev, int lpfifo)
1948{
1949 struct adapter *adap = netdev2adap(dev);
2cc301d2 1950 u32 v1, v2, lp_count, hp_count;
881806bc 1951
f061de42
HS
1952 v1 = t4_read_reg(adap, SGE_DBFIFO_STATUS_A);
1953 v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2_A);
d14807dd 1954 if (is_t4(adap->params.chip)) {
f061de42
HS
1955 lp_count = LP_COUNT_G(v1);
1956 hp_count = HP_COUNT_G(v1);
2cc301d2 1957 } else {
f061de42
HS
1958 lp_count = LP_COUNT_T5_G(v1);
1959 hp_count = HP_COUNT_T5_G(v2);
2cc301d2
SR
1960 }
1961 return lpfifo ? lp_count : hp_count;
881806bc
VP
1962}
1963EXPORT_SYMBOL(cxgb4_dbfifo_count);
1964
b8ff05a9
DM
1965/**
1966 * cxgb4_port_viid - get the VI id of a port
1967 * @dev: the net device for the port
1968 *
1969 * Return the VI id of the given port.
1970 */
1971unsigned int cxgb4_port_viid(const struct net_device *dev)
1972{
1973 return netdev2pinfo(dev)->viid;
1974}
1975EXPORT_SYMBOL(cxgb4_port_viid);
1976
1977/**
1978 * cxgb4_port_idx - get the index of a port
1979 * @dev: the net device for the port
1980 *
1981 * Return the index of the given port.
1982 */
1983unsigned int cxgb4_port_idx(const struct net_device *dev)
1984{
1985 return netdev2pinfo(dev)->port_id;
1986}
1987EXPORT_SYMBOL(cxgb4_port_idx);
1988
b8ff05a9
DM
1989void cxgb4_get_tcp_stats(struct pci_dev *pdev, struct tp_tcp_stats *v4,
1990 struct tp_tcp_stats *v6)
1991{
1992 struct adapter *adap = pci_get_drvdata(pdev);
1993
1994 spin_lock(&adap->stats_lock);
1995 t4_tp_get_tcp_stats(adap, v4, v6);
1996 spin_unlock(&adap->stats_lock);
1997}
1998EXPORT_SYMBOL(cxgb4_get_tcp_stats);
1999
2000void cxgb4_iscsi_init(struct net_device *dev, unsigned int tag_mask,
2001 const unsigned int *pgsz_order)
2002{
2003 struct adapter *adap = netdev2adap(dev);
2004
0d804338
HS
2005 t4_write_reg(adap, ULP_RX_ISCSI_TAGMASK_A, tag_mask);
2006 t4_write_reg(adap, ULP_RX_ISCSI_PSZ_A, HPZ0_V(pgsz_order[0]) |
2007 HPZ1_V(pgsz_order[1]) | HPZ2_V(pgsz_order[2]) |
2008 HPZ3_V(pgsz_order[3]));
b8ff05a9
DM
2009}
2010EXPORT_SYMBOL(cxgb4_iscsi_init);
2011
3069ee9b
VP
2012int cxgb4_flush_eq_cache(struct net_device *dev)
2013{
2014 struct adapter *adap = netdev2adap(dev);
2015 int ret;
2016
2017 ret = t4_fwaddrspace_write(adap, adap->mbox,
f061de42 2018 0xe1000000 + SGE_CTXT_CMD_A, 0x20000000);
3069ee9b
VP
2019 return ret;
2020}
2021EXPORT_SYMBOL(cxgb4_flush_eq_cache);
2022
2023static int read_eq_indices(struct adapter *adap, u16 qid, u16 *pidx, u16 *cidx)
2024{
f061de42 2025 u32 addr = t4_read_reg(adap, SGE_DBQ_CTXT_BADDR_A) + 24 * qid + 8;
3069ee9b
VP
2026 __be64 indices;
2027 int ret;
2028
fc5ab020
HS
2029 spin_lock(&adap->win0_lock);
2030 ret = t4_memory_rw(adap, 0, MEM_EDC0, addr,
2031 sizeof(indices), (__be32 *)&indices,
2032 T4_MEMORY_READ);
2033 spin_unlock(&adap->win0_lock);
3069ee9b 2034 if (!ret) {
404d9e3f
VP
2035 *cidx = (be64_to_cpu(indices) >> 25) & 0xffff;
2036 *pidx = (be64_to_cpu(indices) >> 9) & 0xffff;
3069ee9b
VP
2037 }
2038 return ret;
2039}
2040
2041int cxgb4_sync_txq_pidx(struct net_device *dev, u16 qid, u16 pidx,
2042 u16 size)
2043{
2044 struct adapter *adap = netdev2adap(dev);
2045 u16 hw_pidx, hw_cidx;
2046 int ret;
2047
2048 ret = read_eq_indices(adap, qid, &hw_pidx, &hw_cidx);
2049 if (ret)
2050 goto out;
2051
2052 if (pidx != hw_pidx) {
2053 u16 delta;
f612b815 2054 u32 val;
3069ee9b
VP
2055
2056 if (pidx >= hw_pidx)
2057 delta = pidx - hw_pidx;
2058 else
2059 delta = size - hw_pidx + pidx;
f612b815
HS
2060
2061 if (is_t4(adap->params.chip))
2062 val = PIDX_V(delta);
2063 else
2064 val = PIDX_T5_V(delta);
3069ee9b 2065 wmb();
f612b815
HS
2066 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A),
2067 QID_V(qid) | val);
3069ee9b
VP
2068 }
2069out:
2070 return ret;
2071}
2072EXPORT_SYMBOL(cxgb4_sync_txq_pidx);
2073
031cf476
HS
2074int cxgb4_read_tpte(struct net_device *dev, u32 stag, __be32 *tpte)
2075{
2076 struct adapter *adap;
2077 u32 offset, memtype, memaddr;
6559a7e8 2078 u32 edc0_size, edc1_size, mc0_size, mc1_size, size;
031cf476
HS
2079 u32 edc0_end, edc1_end, mc0_end, mc1_end;
2080 int ret;
2081
2082 adap = netdev2adap(dev);
2083
2084 offset = ((stag >> 8) * 32) + adap->vres.stag.start;
2085
2086 /* Figure out where the offset lands in the Memory Type/Address scheme.
2087 * This code assumes that the memory is laid out starting at offset 0
2088 * with no breaks as: EDC0, EDC1, MC0, MC1. All cards have both EDC0
2089 * and EDC1. Some cards will have neither MC0 nor MC1, most cards have
2090 * MC0, and some have both MC0 and MC1.
2091 */
6559a7e8
HS
2092 size = t4_read_reg(adap, MA_EDRAM0_BAR_A);
2093 edc0_size = EDRAM0_SIZE_G(size) << 20;
2094 size = t4_read_reg(adap, MA_EDRAM1_BAR_A);
2095 edc1_size = EDRAM1_SIZE_G(size) << 20;
2096 size = t4_read_reg(adap, MA_EXT_MEMORY0_BAR_A);
2097 mc0_size = EXT_MEM0_SIZE_G(size) << 20;
031cf476
HS
2098
2099 edc0_end = edc0_size;
2100 edc1_end = edc0_end + edc1_size;
2101 mc0_end = edc1_end + mc0_size;
2102
2103 if (offset < edc0_end) {
2104 memtype = MEM_EDC0;
2105 memaddr = offset;
2106 } else if (offset < edc1_end) {
2107 memtype = MEM_EDC1;
2108 memaddr = offset - edc0_end;
2109 } else {
2110 if (offset < mc0_end) {
2111 memtype = MEM_MC0;
2112 memaddr = offset - edc1_end;
3ccc6cf7 2113 } else if (is_t5(adap->params.chip)) {
6559a7e8
HS
2114 size = t4_read_reg(adap, MA_EXT_MEMORY1_BAR_A);
2115 mc1_size = EXT_MEM1_SIZE_G(size) << 20;
031cf476
HS
2116 mc1_end = mc0_end + mc1_size;
2117 if (offset < mc1_end) {
2118 memtype = MEM_MC1;
2119 memaddr = offset - mc0_end;
2120 } else {
2121 /* offset beyond the end of any memory */
2122 goto err;
2123 }
3ccc6cf7
HS
2124 } else {
2125 /* T4/T6 only has a single memory channel */
2126 goto err;
031cf476
HS
2127 }
2128 }
2129
2130 spin_lock(&adap->win0_lock);
2131 ret = t4_memory_rw(adap, 0, memtype, memaddr, 32, tpte, T4_MEMORY_READ);
2132 spin_unlock(&adap->win0_lock);
2133 return ret;
2134
2135err:
2136 dev_err(adap->pdev_dev, "stag %#x, offset %#x out of range\n",
2137 stag, offset);
2138 return -EINVAL;
2139}
2140EXPORT_SYMBOL(cxgb4_read_tpte);
2141
7730b4c7
HS
2142u64 cxgb4_read_sge_timestamp(struct net_device *dev)
2143{
2144 u32 hi, lo;
2145 struct adapter *adap;
2146
2147 adap = netdev2adap(dev);
f612b815
HS
2148 lo = t4_read_reg(adap, SGE_TIMESTAMP_LO_A);
2149 hi = TSVAL_G(t4_read_reg(adap, SGE_TIMESTAMP_HI_A));
7730b4c7
HS
2150
2151 return ((u64)hi << 32) | (u64)lo;
2152}
2153EXPORT_SYMBOL(cxgb4_read_sge_timestamp);
2154
df64e4d3
HS
2155int cxgb4_bar2_sge_qregs(struct net_device *dev,
2156 unsigned int qid,
2157 enum cxgb4_bar2_qtype qtype,
2158 u64 *pbar2_qoffset,
2159 unsigned int *pbar2_qid)
2160{
b2612722 2161 return t4_bar2_sge_qregs(netdev2adap(dev),
df64e4d3
HS
2162 qid,
2163 (qtype == CXGB4_BAR2_QTYPE_EGRESS
2164 ? T4_BAR2_QTYPE_EGRESS
2165 : T4_BAR2_QTYPE_INGRESS),
2166 pbar2_qoffset,
2167 pbar2_qid);
2168}
2169EXPORT_SYMBOL(cxgb4_bar2_sge_qregs);
2170
b8ff05a9
DM
2171static struct pci_driver cxgb4_driver;
2172
2173static void check_neigh_update(struct neighbour *neigh)
2174{
2175 const struct device *parent;
2176 const struct net_device *netdev = neigh->dev;
2177
2178 if (netdev->priv_flags & IFF_802_1Q_VLAN)
2179 netdev = vlan_dev_real_dev(netdev);
2180 parent = netdev->dev.parent;
2181 if (parent && parent->driver == &cxgb4_driver.driver)
2182 t4_l2t_update(dev_get_drvdata(parent), neigh);
2183}
2184
2185static int netevent_cb(struct notifier_block *nb, unsigned long event,
2186 void *data)
2187{
2188 switch (event) {
2189 case NETEVENT_NEIGH_UPDATE:
2190 check_neigh_update(data);
2191 break;
b8ff05a9
DM
2192 case NETEVENT_REDIRECT:
2193 default:
2194 break;
2195 }
2196 return 0;
2197}
2198
2199static bool netevent_registered;
2200static struct notifier_block cxgb4_netevent_nb = {
2201 .notifier_call = netevent_cb
2202};
2203
3069ee9b
VP
2204static void drain_db_fifo(struct adapter *adap, int usecs)
2205{
2cc301d2 2206 u32 v1, v2, lp_count, hp_count;
3069ee9b
VP
2207
2208 do {
f061de42
HS
2209 v1 = t4_read_reg(adap, SGE_DBFIFO_STATUS_A);
2210 v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2_A);
d14807dd 2211 if (is_t4(adap->params.chip)) {
f061de42
HS
2212 lp_count = LP_COUNT_G(v1);
2213 hp_count = HP_COUNT_G(v1);
2cc301d2 2214 } else {
f061de42
HS
2215 lp_count = LP_COUNT_T5_G(v1);
2216 hp_count = HP_COUNT_T5_G(v2);
2cc301d2
SR
2217 }
2218
2219 if (lp_count == 0 && hp_count == 0)
2220 break;
3069ee9b
VP
2221 set_current_state(TASK_UNINTERRUPTIBLE);
2222 schedule_timeout(usecs_to_jiffies(usecs));
3069ee9b
VP
2223 } while (1);
2224}
2225
2226static void disable_txq_db(struct sge_txq *q)
2227{
05eb2389
SW
2228 unsigned long flags;
2229
2230 spin_lock_irqsave(&q->db_lock, flags);
3069ee9b 2231 q->db_disabled = 1;
05eb2389 2232 spin_unlock_irqrestore(&q->db_lock, flags);
3069ee9b
VP
2233}
2234
05eb2389 2235static void enable_txq_db(struct adapter *adap, struct sge_txq *q)
3069ee9b
VP
2236{
2237 spin_lock_irq(&q->db_lock);
05eb2389
SW
2238 if (q->db_pidx_inc) {
2239 /* Make sure that all writes to the TX descriptors
2240 * are committed before we tell HW about them.
2241 */
2242 wmb();
f612b815
HS
2243 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A),
2244 QID_V(q->cntxt_id) | PIDX_V(q->db_pidx_inc));
05eb2389
SW
2245 q->db_pidx_inc = 0;
2246 }
3069ee9b
VP
2247 q->db_disabled = 0;
2248 spin_unlock_irq(&q->db_lock);
2249}
2250
2251static void disable_dbs(struct adapter *adap)
2252{
2253 int i;
2254
2255 for_each_ethrxq(&adap->sge, i)
2256 disable_txq_db(&adap->sge.ethtxq[i].q);
2257 for_each_ofldrxq(&adap->sge, i)
2258 disable_txq_db(&adap->sge.ofldtxq[i].q);
2259 for_each_port(adap, i)
2260 disable_txq_db(&adap->sge.ctrlq[i].q);
2261}
2262
2263static void enable_dbs(struct adapter *adap)
2264{
2265 int i;
2266
2267 for_each_ethrxq(&adap->sge, i)
05eb2389 2268 enable_txq_db(adap, &adap->sge.ethtxq[i].q);
3069ee9b 2269 for_each_ofldrxq(&adap->sge, i)
05eb2389 2270 enable_txq_db(adap, &adap->sge.ofldtxq[i].q);
3069ee9b 2271 for_each_port(adap, i)
05eb2389
SW
2272 enable_txq_db(adap, &adap->sge.ctrlq[i].q);
2273}
2274
2275static void notify_rdma_uld(struct adapter *adap, enum cxgb4_control cmd)
2276{
2277 if (adap->uld_handle[CXGB4_ULD_RDMA])
2278 ulds[CXGB4_ULD_RDMA].control(adap->uld_handle[CXGB4_ULD_RDMA],
2279 cmd);
2280}
2281
2282static void process_db_full(struct work_struct *work)
2283{
2284 struct adapter *adap;
2285
2286 adap = container_of(work, struct adapter, db_full_task);
2287
2288 drain_db_fifo(adap, dbfifo_drain_delay);
2289 enable_dbs(adap);
2290 notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY);
3ccc6cf7
HS
2291 if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
2292 t4_set_reg_field(adap, SGE_INT_ENABLE3_A,
2293 DBFIFO_HP_INT_F | DBFIFO_LP_INT_F,
2294 DBFIFO_HP_INT_F | DBFIFO_LP_INT_F);
2295 else
2296 t4_set_reg_field(adap, SGE_INT_ENABLE3_A,
2297 DBFIFO_LP_INT_F, DBFIFO_LP_INT_F);
3069ee9b
VP
2298}
2299
2300static void sync_txq_pidx(struct adapter *adap, struct sge_txq *q)
2301{
2302 u16 hw_pidx, hw_cidx;
2303 int ret;
2304
05eb2389 2305 spin_lock_irq(&q->db_lock);
3069ee9b
VP
2306 ret = read_eq_indices(adap, (u16)q->cntxt_id, &hw_pidx, &hw_cidx);
2307 if (ret)
2308 goto out;
2309 if (q->db_pidx != hw_pidx) {
2310 u16 delta;
f612b815 2311 u32 val;
3069ee9b
VP
2312
2313 if (q->db_pidx >= hw_pidx)
2314 delta = q->db_pidx - hw_pidx;
2315 else
2316 delta = q->size - hw_pidx + q->db_pidx;
f612b815
HS
2317
2318 if (is_t4(adap->params.chip))
2319 val = PIDX_V(delta);
2320 else
2321 val = PIDX_T5_V(delta);
3069ee9b 2322 wmb();
f612b815
HS
2323 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A),
2324 QID_V(q->cntxt_id) | val);
3069ee9b
VP
2325 }
2326out:
2327 q->db_disabled = 0;
05eb2389
SW
2328 q->db_pidx_inc = 0;
2329 spin_unlock_irq(&q->db_lock);
3069ee9b
VP
2330 if (ret)
2331 CH_WARN(adap, "DB drop recovery failed.\n");
2332}
2333static void recover_all_queues(struct adapter *adap)
2334{
2335 int i;
2336
2337 for_each_ethrxq(&adap->sge, i)
2338 sync_txq_pidx(adap, &adap->sge.ethtxq[i].q);
2339 for_each_ofldrxq(&adap->sge, i)
2340 sync_txq_pidx(adap, &adap->sge.ofldtxq[i].q);
2341 for_each_port(adap, i)
2342 sync_txq_pidx(adap, &adap->sge.ctrlq[i].q);
2343}
2344
881806bc
VP
2345static void process_db_drop(struct work_struct *work)
2346{
2347 struct adapter *adap;
881806bc 2348
3069ee9b 2349 adap = container_of(work, struct adapter, db_drop_task);
881806bc 2350
d14807dd 2351 if (is_t4(adap->params.chip)) {
05eb2389 2352 drain_db_fifo(adap, dbfifo_drain_delay);
2cc301d2 2353 notify_rdma_uld(adap, CXGB4_CONTROL_DB_DROP);
05eb2389 2354 drain_db_fifo(adap, dbfifo_drain_delay);
2cc301d2 2355 recover_all_queues(adap);
05eb2389 2356 drain_db_fifo(adap, dbfifo_drain_delay);
2cc301d2 2357 enable_dbs(adap);
05eb2389 2358 notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY);
3ccc6cf7 2359 } else if (is_t5(adap->params.chip)) {
2cc301d2
SR
2360 u32 dropped_db = t4_read_reg(adap, 0x010ac);
2361 u16 qid = (dropped_db >> 15) & 0x1ffff;
2362 u16 pidx_inc = dropped_db & 0x1fff;
df64e4d3
HS
2363 u64 bar2_qoffset;
2364 unsigned int bar2_qid;
2365 int ret;
2cc301d2 2366
b2612722 2367 ret = t4_bar2_sge_qregs(adap, qid, T4_BAR2_QTYPE_EGRESS,
df64e4d3
HS
2368 &bar2_qoffset, &bar2_qid);
2369 if (ret)
2370 dev_err(adap->pdev_dev, "doorbell drop recovery: "
2371 "qid=%d, pidx_inc=%d\n", qid, pidx_inc);
2372 else
f612b815 2373 writel(PIDX_T5_V(pidx_inc) | QID_V(bar2_qid),
df64e4d3 2374 adap->bar2 + bar2_qoffset + SGE_UDB_KDOORBELL);
2cc301d2
SR
2375
2376 /* Re-enable BAR2 WC */
2377 t4_set_reg_field(adap, 0x10b0, 1<<15, 1<<15);
2378 }
2379
3ccc6cf7
HS
2380 if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
2381 t4_set_reg_field(adap, SGE_DOORBELL_CONTROL_A, DROPPED_DB_F, 0);
881806bc
VP
2382}
2383
2384void t4_db_full(struct adapter *adap)
2385{
d14807dd 2386 if (is_t4(adap->params.chip)) {
05eb2389
SW
2387 disable_dbs(adap);
2388 notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL);
f612b815
HS
2389 t4_set_reg_field(adap, SGE_INT_ENABLE3_A,
2390 DBFIFO_HP_INT_F | DBFIFO_LP_INT_F, 0);
29aaee65 2391 queue_work(adap->workq, &adap->db_full_task);
2cc301d2 2392 }
881806bc
VP
2393}
2394
2395void t4_db_dropped(struct adapter *adap)
2396{
05eb2389
SW
2397 if (is_t4(adap->params.chip)) {
2398 disable_dbs(adap);
2399 notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL);
2400 }
29aaee65 2401 queue_work(adap->workq, &adap->db_drop_task);
881806bc
VP
2402}
2403
b8ff05a9
DM
2404static void uld_attach(struct adapter *adap, unsigned int uld)
2405{
2406 void *handle;
2407 struct cxgb4_lld_info lli;
dca4faeb 2408 unsigned short i;
b8ff05a9
DM
2409
2410 lli.pdev = adap->pdev;
b2612722 2411 lli.pf = adap->pf;
b8ff05a9
DM
2412 lli.l2t = adap->l2t;
2413 lli.tids = &adap->tids;
2414 lli.ports = adap->port;
2415 lli.vr = &adap->vres;
2416 lli.mtus = adap->params.mtus;
2417 if (uld == CXGB4_ULD_RDMA) {
2418 lli.rxq_ids = adap->sge.rdma_rxq;
cf38be6d 2419 lli.ciq_ids = adap->sge.rdma_ciq;
b8ff05a9 2420 lli.nrxq = adap->sge.rdmaqs;
cf38be6d 2421 lli.nciq = adap->sge.rdmaciqs;
b8ff05a9
DM
2422 } else if (uld == CXGB4_ULD_ISCSI) {
2423 lli.rxq_ids = adap->sge.ofld_rxq;
2424 lli.nrxq = adap->sge.ofldqsets;
2425 }
2426 lli.ntxq = adap->sge.ofldqsets;
2427 lli.nchan = adap->params.nports;
2428 lli.nports = adap->params.nports;
2429 lli.wr_cred = adap->params.ofldq_wr_cred;
d14807dd 2430 lli.adapter_type = adap->params.chip;
837e4a42 2431 lli.iscsi_iolen = MAXRXDATA_G(t4_read_reg(adap, TP_PARA_REG2_A));
7730b4c7 2432 lli.cclk_ps = 1000000000 / adap->params.vpd.cclk;
df64e4d3
HS
2433 lli.udb_density = 1 << adap->params.sge.eq_qpp;
2434 lli.ucq_density = 1 << adap->params.sge.iq_qpp;
dcf7b6f5 2435 lli.filt_mode = adap->params.tp.vlan_pri_map;
dca4faeb
VP
2436 /* MODQ_REQ_MAP sets queues 0-3 to chan 0-3 */
2437 for (i = 0; i < NCHAN; i++)
2438 lli.tx_modq[i] = i;
f612b815
HS
2439 lli.gts_reg = adap->regs + MYPF_REG(SGE_PF_GTS_A);
2440 lli.db_reg = adap->regs + MYPF_REG(SGE_PF_KDOORBELL_A);
b8ff05a9 2441 lli.fw_vers = adap->params.fw_vers;
3069ee9b 2442 lli.dbfifo_int_thresh = dbfifo_int_thresh;
04e10e21
HS
2443 lli.sge_ingpadboundary = adap->sge.fl_align;
2444 lli.sge_egrstatuspagesize = adap->sge.stat_len;
dca4faeb
VP
2445 lli.sge_pktshift = adap->sge.pktshift;
2446 lli.enable_fw_ofld_conn = adap->flags & FW_OFLD_CONN;
4c2c5763
HS
2447 lli.max_ordird_qp = adap->params.max_ordird_qp;
2448 lli.max_ird_adapter = adap->params.max_ird_adapter;
1ac0f095 2449 lli.ulptx_memwrite_dsgl = adap->params.ulptx_memwrite_dsgl;
982b81eb 2450 lli.nodeid = dev_to_node(adap->pdev_dev);
b8ff05a9
DM
2451
2452 handle = ulds[uld].add(&lli);
2453 if (IS_ERR(handle)) {
2454 dev_warn(adap->pdev_dev,
2455 "could not attach to the %s driver, error %ld\n",
2456 uld_str[uld], PTR_ERR(handle));
2457 return;
2458 }
2459
2460 adap->uld_handle[uld] = handle;
2461
2462 if (!netevent_registered) {
2463 register_netevent_notifier(&cxgb4_netevent_nb);
2464 netevent_registered = true;
2465 }
e29f5dbc
DM
2466
2467 if (adap->flags & FULL_INIT_DONE)
2468 ulds[uld].state_change(handle, CXGB4_STATE_UP);
b8ff05a9
DM
2469}
2470
2471static void attach_ulds(struct adapter *adap)
2472{
2473 unsigned int i;
2474
01bcca68
VP
2475 spin_lock(&adap_rcu_lock);
2476 list_add_tail_rcu(&adap->rcu_node, &adap_rcu_list);
2477 spin_unlock(&adap_rcu_lock);
2478
b8ff05a9
DM
2479 mutex_lock(&uld_mutex);
2480 list_add_tail(&adap->list_node, &adapter_list);
2481 for (i = 0; i < CXGB4_ULD_MAX; i++)
2482 if (ulds[i].add)
2483 uld_attach(adap, i);
2484 mutex_unlock(&uld_mutex);
2485}
2486
2487static void detach_ulds(struct adapter *adap)
2488{
2489 unsigned int i;
2490
2491 mutex_lock(&uld_mutex);
2492 list_del(&adap->list_node);
2493 for (i = 0; i < CXGB4_ULD_MAX; i++)
2494 if (adap->uld_handle[i]) {
2495 ulds[i].state_change(adap->uld_handle[i],
2496 CXGB4_STATE_DETACH);
2497 adap->uld_handle[i] = NULL;
2498 }
2499 if (netevent_registered && list_empty(&adapter_list)) {
2500 unregister_netevent_notifier(&cxgb4_netevent_nb);
2501 netevent_registered = false;
2502 }
2503 mutex_unlock(&uld_mutex);
01bcca68
VP
2504
2505 spin_lock(&adap_rcu_lock);
2506 list_del_rcu(&adap->rcu_node);
2507 spin_unlock(&adap_rcu_lock);
b8ff05a9
DM
2508}
2509
2510static void notify_ulds(struct adapter *adap, enum cxgb4_state new_state)
2511{
2512 unsigned int i;
2513
2514 mutex_lock(&uld_mutex);
2515 for (i = 0; i < CXGB4_ULD_MAX; i++)
2516 if (adap->uld_handle[i])
2517 ulds[i].state_change(adap->uld_handle[i], new_state);
2518 mutex_unlock(&uld_mutex);
2519}
2520
2521/**
2522 * cxgb4_register_uld - register an upper-layer driver
2523 * @type: the ULD type
2524 * @p: the ULD methods
2525 *
2526 * Registers an upper-layer driver with this driver and notifies the ULD
2527 * about any presently available devices that support its type. Returns
2528 * %-EBUSY if a ULD of the same type is already registered.
2529 */
2530int cxgb4_register_uld(enum cxgb4_uld type, const struct cxgb4_uld_info *p)
2531{
2532 int ret = 0;
2533 struct adapter *adap;
2534
2535 if (type >= CXGB4_ULD_MAX)
2536 return -EINVAL;
2537 mutex_lock(&uld_mutex);
2538 if (ulds[type].add) {
2539 ret = -EBUSY;
2540 goto out;
2541 }
2542 ulds[type] = *p;
2543 list_for_each_entry(adap, &adapter_list, list_node)
2544 uld_attach(adap, type);
2545out: mutex_unlock(&uld_mutex);
2546 return ret;
2547}
2548EXPORT_SYMBOL(cxgb4_register_uld);
2549
2550/**
2551 * cxgb4_unregister_uld - unregister an upper-layer driver
2552 * @type: the ULD type
2553 *
2554 * Unregisters an existing upper-layer driver.
2555 */
2556int cxgb4_unregister_uld(enum cxgb4_uld type)
2557{
2558 struct adapter *adap;
2559
2560 if (type >= CXGB4_ULD_MAX)
2561 return -EINVAL;
2562 mutex_lock(&uld_mutex);
2563 list_for_each_entry(adap, &adapter_list, list_node)
2564 adap->uld_handle[type] = NULL;
2565 ulds[type].add = NULL;
2566 mutex_unlock(&uld_mutex);
2567 return 0;
2568}
2569EXPORT_SYMBOL(cxgb4_unregister_uld);
2570
1bb60376 2571#if IS_ENABLED(CONFIG_IPV6)
b5a02f50
AB
2572static int cxgb4_inet6addr_handler(struct notifier_block *this,
2573 unsigned long event, void *data)
01bcca68 2574{
b5a02f50
AB
2575 struct inet6_ifaddr *ifa = data;
2576 struct net_device *event_dev = ifa->idev->dev;
2577 const struct device *parent = NULL;
2578#if IS_ENABLED(CONFIG_BONDING)
01bcca68 2579 struct adapter *adap;
b5a02f50
AB
2580#endif
2581 if (event_dev->priv_flags & IFF_802_1Q_VLAN)
2582 event_dev = vlan_dev_real_dev(event_dev);
2583#if IS_ENABLED(CONFIG_BONDING)
2584 if (event_dev->flags & IFF_MASTER) {
2585 list_for_each_entry(adap, &adapter_list, list_node) {
2586 switch (event) {
2587 case NETDEV_UP:
2588 cxgb4_clip_get(adap->port[0],
2589 (const u32 *)ifa, 1);
2590 break;
2591 case NETDEV_DOWN:
2592 cxgb4_clip_release(adap->port[0],
2593 (const u32 *)ifa, 1);
2594 break;
2595 default:
2596 break;
2597 }
2598 }
2599 return NOTIFY_OK;
2600 }
2601#endif
01bcca68 2602
b5a02f50
AB
2603 if (event_dev)
2604 parent = event_dev->dev.parent;
01bcca68 2605
b5a02f50 2606 if (parent && parent->driver == &cxgb4_driver.driver) {
01bcca68
VP
2607 switch (event) {
2608 case NETDEV_UP:
b5a02f50 2609 cxgb4_clip_get(event_dev, (const u32 *)ifa, 1);
01bcca68
VP
2610 break;
2611 case NETDEV_DOWN:
b5a02f50 2612 cxgb4_clip_release(event_dev, (const u32 *)ifa, 1);
01bcca68
VP
2613 break;
2614 default:
2615 break;
2616 }
2617 }
b5a02f50 2618 return NOTIFY_OK;
01bcca68
VP
2619}
2620
b5a02f50 2621static bool inet6addr_registered;
01bcca68
VP
2622static struct notifier_block cxgb4_inet6addr_notifier = {
2623 .notifier_call = cxgb4_inet6addr_handler
2624};
2625
01bcca68
VP
2626static void update_clip(const struct adapter *adap)
2627{
2628 int i;
2629 struct net_device *dev;
2630 int ret;
2631
2632 rcu_read_lock();
2633
2634 for (i = 0; i < MAX_NPORTS; i++) {
2635 dev = adap->port[i];
2636 ret = 0;
2637
2638 if (dev)
b5a02f50 2639 ret = cxgb4_update_root_dev_clip(dev);
01bcca68
VP
2640
2641 if (ret < 0)
2642 break;
2643 }
2644 rcu_read_unlock();
2645}
1bb60376 2646#endif /* IS_ENABLED(CONFIG_IPV6) */
01bcca68 2647
b8ff05a9
DM
2648/**
2649 * cxgb_up - enable the adapter
2650 * @adap: adapter being enabled
2651 *
2652 * Called when the first port is enabled, this function performs the
2653 * actions necessary to make an adapter operational, such as completing
2654 * the initialization of HW modules, and enabling interrupts.
2655 *
2656 * Must be called with the rtnl lock held.
2657 */
2658static int cxgb_up(struct adapter *adap)
2659{
aaefae9b 2660 int err;
b8ff05a9 2661
aaefae9b
DM
2662 err = setup_sge_queues(adap);
2663 if (err)
2664 goto out;
2665 err = setup_rss(adap);
2666 if (err)
2667 goto freeq;
b8ff05a9
DM
2668
2669 if (adap->flags & USING_MSIX) {
aaefae9b 2670 name_msix_vecs(adap);
b8ff05a9
DM
2671 err = request_irq(adap->msix_info[0].vec, t4_nondata_intr, 0,
2672 adap->msix_info[0].desc, adap);
2673 if (err)
2674 goto irq_err;
2675
2676 err = request_msix_queue_irqs(adap);
2677 if (err) {
2678 free_irq(adap->msix_info[0].vec, adap);
2679 goto irq_err;
2680 }
2681 } else {
2682 err = request_irq(adap->pdev->irq, t4_intr_handler(adap),
2683 (adap->flags & USING_MSI) ? 0 : IRQF_SHARED,
b1a3c2b6 2684 adap->port[0]->name, adap);
b8ff05a9
DM
2685 if (err)
2686 goto irq_err;
2687 }
2688 enable_rx(adap);
2689 t4_sge_start(adap);
2690 t4_intr_enable(adap);
aaefae9b 2691 adap->flags |= FULL_INIT_DONE;
b8ff05a9 2692 notify_ulds(adap, CXGB4_STATE_UP);
1bb60376 2693#if IS_ENABLED(CONFIG_IPV6)
01bcca68 2694 update_clip(adap);
1bb60376 2695#endif
b8ff05a9
DM
2696 out:
2697 return err;
2698 irq_err:
2699 dev_err(adap->pdev_dev, "request_irq failed, err %d\n", err);
aaefae9b
DM
2700 freeq:
2701 t4_free_sge_resources(adap);
b8ff05a9
DM
2702 goto out;
2703}
2704
2705static void cxgb_down(struct adapter *adapter)
2706{
b8ff05a9 2707 cancel_work_sync(&adapter->tid_release_task);
881806bc
VP
2708 cancel_work_sync(&adapter->db_full_task);
2709 cancel_work_sync(&adapter->db_drop_task);
b8ff05a9 2710 adapter->tid_release_task_busy = false;
204dc3c0 2711 adapter->tid_release_head = NULL;
b8ff05a9 2712
aaefae9b
DM
2713 t4_sge_stop(adapter);
2714 t4_free_sge_resources(adapter);
2715 adapter->flags &= ~FULL_INIT_DONE;
b8ff05a9
DM
2716}
2717
2718/*
2719 * net_device operations
2720 */
2721static int cxgb_open(struct net_device *dev)
2722{
2723 int err;
2724 struct port_info *pi = netdev_priv(dev);
2725 struct adapter *adapter = pi->adapter;
2726
6a3c869a
DM
2727 netif_carrier_off(dev);
2728
aaefae9b
DM
2729 if (!(adapter->flags & FULL_INIT_DONE)) {
2730 err = cxgb_up(adapter);
2731 if (err < 0)
2732 return err;
2733 }
b8ff05a9 2734
f68707b8
DM
2735 err = link_start(dev);
2736 if (!err)
2737 netif_tx_start_all_queues(dev);
2738 return err;
b8ff05a9
DM
2739}
2740
2741static int cxgb_close(struct net_device *dev)
2742{
b8ff05a9
DM
2743 struct port_info *pi = netdev_priv(dev);
2744 struct adapter *adapter = pi->adapter;
2745
2746 netif_tx_stop_all_queues(dev);
2747 netif_carrier_off(dev);
b2612722 2748 return t4_enable_vi(adapter, adapter->pf, pi->viid, false, false);
b8ff05a9
DM
2749}
2750
f2b7e78d
VP
2751/* Return an error number if the indicated filter isn't writable ...
2752 */
2753static int writable_filter(struct filter_entry *f)
2754{
2755 if (f->locked)
2756 return -EPERM;
2757 if (f->pending)
2758 return -EBUSY;
2759
2760 return 0;
2761}
2762
2763/* Delete the filter at the specified index (if valid). The checks for all
2764 * the common problems with doing this like the filter being locked, currently
2765 * pending in another operation, etc.
2766 */
2767static int delete_filter(struct adapter *adapter, unsigned int fidx)
2768{
2769 struct filter_entry *f;
2770 int ret;
2771
dca4faeb 2772 if (fidx >= adapter->tids.nftids + adapter->tids.nsftids)
f2b7e78d
VP
2773 return -EINVAL;
2774
2775 f = &adapter->tids.ftid_tab[fidx];
2776 ret = writable_filter(f);
2777 if (ret)
2778 return ret;
2779 if (f->valid)
2780 return del_filter_wr(adapter, fidx);
2781
2782 return 0;
2783}
2784
dca4faeb 2785int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid,
793dad94
VP
2786 __be32 sip, __be16 sport, __be16 vlan,
2787 unsigned int queue, unsigned char port, unsigned char mask)
dca4faeb
VP
2788{
2789 int ret;
2790 struct filter_entry *f;
2791 struct adapter *adap;
2792 int i;
2793 u8 *val;
2794
2795 adap = netdev2adap(dev);
2796
1cab775c 2797 /* Adjust stid to correct filter index */
470c60c4 2798 stid -= adap->tids.sftid_base;
1cab775c
VP
2799 stid += adap->tids.nftids;
2800
dca4faeb
VP
2801 /* Check to make sure the filter requested is writable ...
2802 */
2803 f = &adap->tids.ftid_tab[stid];
2804 ret = writable_filter(f);
2805 if (ret)
2806 return ret;
2807
2808 /* Clear out any old resources being used by the filter before
2809 * we start constructing the new filter.
2810 */
2811 if (f->valid)
2812 clear_filter(adap, f);
2813
2814 /* Clear out filter specifications */
2815 memset(&f->fs, 0, sizeof(struct ch_filter_specification));
2816 f->fs.val.lport = cpu_to_be16(sport);
2817 f->fs.mask.lport = ~0;
2818 val = (u8 *)&sip;
793dad94 2819 if ((val[0] | val[1] | val[2] | val[3]) != 0) {
dca4faeb
VP
2820 for (i = 0; i < 4; i++) {
2821 f->fs.val.lip[i] = val[i];
2822 f->fs.mask.lip[i] = ~0;
2823 }
0d804338 2824 if (adap->params.tp.vlan_pri_map & PORT_F) {
793dad94
VP
2825 f->fs.val.iport = port;
2826 f->fs.mask.iport = mask;
2827 }
2828 }
dca4faeb 2829
0d804338 2830 if (adap->params.tp.vlan_pri_map & PROTOCOL_F) {
7c89e555
KS
2831 f->fs.val.proto = IPPROTO_TCP;
2832 f->fs.mask.proto = ~0;
2833 }
2834
dca4faeb
VP
2835 f->fs.dirsteer = 1;
2836 f->fs.iq = queue;
2837 /* Mark filter as locked */
2838 f->locked = 1;
2839 f->fs.rpttid = 1;
2840
2841 ret = set_filter_wr(adap, stid);
2842 if (ret) {
2843 clear_filter(adap, f);
2844 return ret;
2845 }
2846
2847 return 0;
2848}
2849EXPORT_SYMBOL(cxgb4_create_server_filter);
2850
2851int cxgb4_remove_server_filter(const struct net_device *dev, unsigned int stid,
2852 unsigned int queue, bool ipv6)
2853{
2854 int ret;
2855 struct filter_entry *f;
2856 struct adapter *adap;
2857
2858 adap = netdev2adap(dev);
1cab775c
VP
2859
2860 /* Adjust stid to correct filter index */
470c60c4 2861 stid -= adap->tids.sftid_base;
1cab775c
VP
2862 stid += adap->tids.nftids;
2863
dca4faeb
VP
2864 f = &adap->tids.ftid_tab[stid];
2865 /* Unlock the filter */
2866 f->locked = 0;
2867
2868 ret = delete_filter(adap, stid);
2869 if (ret)
2870 return ret;
2871
2872 return 0;
2873}
2874EXPORT_SYMBOL(cxgb4_remove_server_filter);
2875
f5152c90
DM
2876static struct rtnl_link_stats64 *cxgb_get_stats(struct net_device *dev,
2877 struct rtnl_link_stats64 *ns)
b8ff05a9
DM
2878{
2879 struct port_stats stats;
2880 struct port_info *p = netdev_priv(dev);
2881 struct adapter *adapter = p->adapter;
b8ff05a9 2882
9fe6cb58
GS
2883 /* Block retrieving statistics during EEH error
2884 * recovery. Otherwise, the recovery might fail
2885 * and the PCI device will be removed permanently
2886 */
b8ff05a9 2887 spin_lock(&adapter->stats_lock);
9fe6cb58
GS
2888 if (!netif_device_present(dev)) {
2889 spin_unlock(&adapter->stats_lock);
2890 return ns;
2891 }
b8ff05a9
DM
2892 t4_get_port_stats(adapter, p->tx_chan, &stats);
2893 spin_unlock(&adapter->stats_lock);
2894
2895 ns->tx_bytes = stats.tx_octets;
2896 ns->tx_packets = stats.tx_frames;
2897 ns->rx_bytes = stats.rx_octets;
2898 ns->rx_packets = stats.rx_frames;
2899 ns->multicast = stats.rx_mcast_frames;
2900
2901 /* detailed rx_errors */
2902 ns->rx_length_errors = stats.rx_jabber + stats.rx_too_long +
2903 stats.rx_runt;
2904 ns->rx_over_errors = 0;
2905 ns->rx_crc_errors = stats.rx_fcs_err;
2906 ns->rx_frame_errors = stats.rx_symbol_err;
2907 ns->rx_fifo_errors = stats.rx_ovflow0 + stats.rx_ovflow1 +
2908 stats.rx_ovflow2 + stats.rx_ovflow3 +
2909 stats.rx_trunc0 + stats.rx_trunc1 +
2910 stats.rx_trunc2 + stats.rx_trunc3;
2911 ns->rx_missed_errors = 0;
2912
2913 /* detailed tx_errors */
2914 ns->tx_aborted_errors = 0;
2915 ns->tx_carrier_errors = 0;
2916 ns->tx_fifo_errors = 0;
2917 ns->tx_heartbeat_errors = 0;
2918 ns->tx_window_errors = 0;
2919
2920 ns->tx_errors = stats.tx_error_frames;
2921 ns->rx_errors = stats.rx_symbol_err + stats.rx_fcs_err +
2922 ns->rx_length_errors + stats.rx_len_err + ns->rx_fifo_errors;
2923 return ns;
2924}
2925
2926static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
2927{
060e0c75 2928 unsigned int mbox;
b8ff05a9
DM
2929 int ret = 0, prtad, devad;
2930 struct port_info *pi = netdev_priv(dev);
2931 struct mii_ioctl_data *data = (struct mii_ioctl_data *)&req->ifr_data;
2932
2933 switch (cmd) {
2934 case SIOCGMIIPHY:
2935 if (pi->mdio_addr < 0)
2936 return -EOPNOTSUPP;
2937 data->phy_id = pi->mdio_addr;
2938 break;
2939 case SIOCGMIIREG:
2940 case SIOCSMIIREG:
2941 if (mdio_phy_id_is_c45(data->phy_id)) {
2942 prtad = mdio_phy_id_prtad(data->phy_id);
2943 devad = mdio_phy_id_devad(data->phy_id);
2944 } else if (data->phy_id < 32) {
2945 prtad = data->phy_id;
2946 devad = 0;
2947 data->reg_num &= 0x1f;
2948 } else
2949 return -EINVAL;
2950
b2612722 2951 mbox = pi->adapter->pf;
b8ff05a9 2952 if (cmd == SIOCGMIIREG)
060e0c75 2953 ret = t4_mdio_rd(pi->adapter, mbox, prtad, devad,
b8ff05a9
DM
2954 data->reg_num, &data->val_out);
2955 else
060e0c75 2956 ret = t4_mdio_wr(pi->adapter, mbox, prtad, devad,
b8ff05a9
DM
2957 data->reg_num, data->val_in);
2958 break;
2959 default:
2960 return -EOPNOTSUPP;
2961 }
2962 return ret;
2963}
2964
2965static void cxgb_set_rxmode(struct net_device *dev)
2966{
2967 /* unfortunately we can't return errors to the stack */
2968 set_rxmode(dev, -1, false);
2969}
2970
2971static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
2972{
2973 int ret;
2974 struct port_info *pi = netdev_priv(dev);
2975
2976 if (new_mtu < 81 || new_mtu > MAX_MTU) /* accommodate SACK */
2977 return -EINVAL;
b2612722 2978 ret = t4_set_rxmode(pi->adapter, pi->adapter->pf, pi->viid, new_mtu, -1,
060e0c75 2979 -1, -1, -1, true);
b8ff05a9
DM
2980 if (!ret)
2981 dev->mtu = new_mtu;
2982 return ret;
2983}
2984
2985static int cxgb_set_mac_addr(struct net_device *dev, void *p)
2986{
2987 int ret;
2988 struct sockaddr *addr = p;
2989 struct port_info *pi = netdev_priv(dev);
2990
2991 if (!is_valid_ether_addr(addr->sa_data))
504f9b5a 2992 return -EADDRNOTAVAIL;
b8ff05a9 2993
b2612722 2994 ret = t4_change_mac(pi->adapter, pi->adapter->pf, pi->viid,
060e0c75 2995 pi->xact_addr_filt, addr->sa_data, true, true);
b8ff05a9
DM
2996 if (ret < 0)
2997 return ret;
2998
2999 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
3000 pi->xact_addr_filt = ret;
3001 return 0;
3002}
3003
b8ff05a9
DM
3004#ifdef CONFIG_NET_POLL_CONTROLLER
3005static void cxgb_netpoll(struct net_device *dev)
3006{
3007 struct port_info *pi = netdev_priv(dev);
3008 struct adapter *adap = pi->adapter;
3009
3010 if (adap->flags & USING_MSIX) {
3011 int i;
3012 struct sge_eth_rxq *rx = &adap->sge.ethrxq[pi->first_qset];
3013
3014 for (i = pi->nqsets; i; i--, rx++)
3015 t4_sge_intr_msix(0, &rx->rspq);
3016 } else
3017 t4_intr_handler(adap)(0, adap);
3018}
3019#endif
3020
3021static const struct net_device_ops cxgb4_netdev_ops = {
3022 .ndo_open = cxgb_open,
3023 .ndo_stop = cxgb_close,
3024 .ndo_start_xmit = t4_eth_xmit,
688848b1 3025 .ndo_select_queue = cxgb_select_queue,
9be793bf 3026 .ndo_get_stats64 = cxgb_get_stats,
b8ff05a9
DM
3027 .ndo_set_rx_mode = cxgb_set_rxmode,
3028 .ndo_set_mac_address = cxgb_set_mac_addr,
2ed28baa 3029 .ndo_set_features = cxgb_set_features,
b8ff05a9
DM
3030 .ndo_validate_addr = eth_validate_addr,
3031 .ndo_do_ioctl = cxgb_ioctl,
3032 .ndo_change_mtu = cxgb_change_mtu,
b8ff05a9
DM
3033#ifdef CONFIG_NET_POLL_CONTROLLER
3034 .ndo_poll_controller = cxgb_netpoll,
3035#endif
84a200b3
VP
3036#ifdef CONFIG_CHELSIO_T4_FCOE
3037 .ndo_fcoe_enable = cxgb_fcoe_enable,
3038 .ndo_fcoe_disable = cxgb_fcoe_disable,
3039#endif /* CONFIG_CHELSIO_T4_FCOE */
3a336cb1
HS
3040#ifdef CONFIG_NET_RX_BUSY_POLL
3041 .ndo_busy_poll = cxgb_busy_poll,
3042#endif
3043
b8ff05a9
DM
3044};
3045
3046void t4_fatal_err(struct adapter *adap)
3047{
f612b815 3048 t4_set_reg_field(adap, SGE_CONTROL_A, GLOBALENABLE_F, 0);
b8ff05a9
DM
3049 t4_intr_disable(adap);
3050 dev_alert(adap->pdev_dev, "encountered fatal error, adapter stopped\n");
3051}
3052
3053static void setup_memwin(struct adapter *adap)
3054{
b562fc37 3055 u32 nic_win_base = t4_get_util_window(adap);
b8ff05a9 3056
b562fc37 3057 t4_setup_memwin(adap, nic_win_base, MEMWIN_NIC);
636f9d37
VP
3058}
3059
3060static void setup_memwin_rdma(struct adapter *adap)
3061{
1ae970e0 3062 if (adap->vres.ocq.size) {
0abfd152
HS
3063 u32 start;
3064 unsigned int sz_kb;
1ae970e0 3065
0abfd152
HS
3066 start = t4_read_pcie_cfg4(adap, PCI_BASE_ADDRESS_2);
3067 start &= PCI_BASE_ADDRESS_MEM_MASK;
3068 start += OCQ_WIN_OFFSET(adap->pdev, &adap->vres);
1ae970e0
DM
3069 sz_kb = roundup_pow_of_two(adap->vres.ocq.size) >> 10;
3070 t4_write_reg(adap,
f061de42
HS
3071 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, 3),
3072 start | BIR_V(1) | WINDOW_V(ilog2(sz_kb)));
1ae970e0 3073 t4_write_reg(adap,
f061de42 3074 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, 3),
1ae970e0
DM
3075 adap->vres.ocq.start);
3076 t4_read_reg(adap,
f061de42 3077 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, 3));
1ae970e0 3078 }
b8ff05a9
DM
3079}
3080
02b5fb8e
DM
3081static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c)
3082{
3083 u32 v;
3084 int ret;
3085
3086 /* get device capabilities */
3087 memset(c, 0, sizeof(*c));
e2ac9628
HS
3088 c->op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
3089 FW_CMD_REQUEST_F | FW_CMD_READ_F);
ce91a923 3090 c->cfvalid_to_len16 = htonl(FW_LEN16(*c));
b2612722 3091 ret = t4_wr_mbox(adap, adap->mbox, c, sizeof(*c), c);
02b5fb8e
DM
3092 if (ret < 0)
3093 return ret;
3094
3095 /* select capabilities we'll be using */
3096 if (c->niccaps & htons(FW_CAPS_CONFIG_NIC_VM)) {
3097 if (!vf_acls)
3098 c->niccaps ^= htons(FW_CAPS_CONFIG_NIC_VM);
3099 else
3100 c->niccaps = htons(FW_CAPS_CONFIG_NIC_VM);
3101 } else if (vf_acls) {
3102 dev_err(adap->pdev_dev, "virtualization ACLs not supported");
3103 return ret;
3104 }
e2ac9628
HS
3105 c->op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
3106 FW_CMD_REQUEST_F | FW_CMD_WRITE_F);
b2612722 3107 ret = t4_wr_mbox(adap, adap->mbox, c, sizeof(*c), NULL);
02b5fb8e
DM
3108 if (ret < 0)
3109 return ret;
3110
b2612722 3111 ret = t4_config_glbl_rss(adap, adap->pf,
02b5fb8e 3112 FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL,
b2e1a3f0
HS
3113 FW_RSS_GLB_CONFIG_CMD_TNLMAPEN_F |
3114 FW_RSS_GLB_CONFIG_CMD_TNLALLLKP_F);
02b5fb8e
DM
3115 if (ret < 0)
3116 return ret;
3117
b2612722 3118 ret = t4_cfg_pfvf(adap, adap->mbox, adap->pf, 0, adap->sge.egr_sz, 64,
4b8e27a8
HS
3119 MAX_INGQ, 0, 0, 4, 0xf, 0xf, 16, FW_CMD_CAP_PF,
3120 FW_CMD_CAP_PF);
02b5fb8e
DM
3121 if (ret < 0)
3122 return ret;
3123
3124 t4_sge_init(adap);
3125
02b5fb8e 3126 /* tweak some settings */
837e4a42 3127 t4_write_reg(adap, TP_SHIFT_CNT_A, 0x64f8849);
0d804338 3128 t4_write_reg(adap, ULP_RX_TDDP_PSZ_A, HPZ0_V(PAGE_SHIFT - 12));
837e4a42
HS
3129 t4_write_reg(adap, TP_PIO_ADDR_A, TP_INGRESS_CONFIG_A);
3130 v = t4_read_reg(adap, TP_PIO_DATA_A);
3131 t4_write_reg(adap, TP_PIO_DATA_A, v & ~CSUM_HAS_PSEUDO_HDR_F);
060e0c75 3132
dca4faeb
VP
3133 /* first 4 Tx modulation queues point to consecutive Tx channels */
3134 adap->params.tp.tx_modq_map = 0xE4;
0d804338
HS
3135 t4_write_reg(adap, TP_TX_MOD_QUEUE_REQ_MAP_A,
3136 TX_MOD_QUEUE_REQ_MAP_V(adap->params.tp.tx_modq_map));
dca4faeb
VP
3137
3138 /* associate each Tx modulation queue with consecutive Tx channels */
3139 v = 0x84218421;
837e4a42 3140 t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A,
0d804338 3141 &v, 1, TP_TX_SCHED_HDR_A);
837e4a42 3142 t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A,
0d804338 3143 &v, 1, TP_TX_SCHED_FIFO_A);
837e4a42 3144 t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A,
0d804338 3145 &v, 1, TP_TX_SCHED_PCMD_A);
dca4faeb
VP
3146
3147#define T4_TX_MODQ_10G_WEIGHT_DEFAULT 16 /* in KB units */
3148 if (is_offload(adap)) {
0d804338
HS
3149 t4_write_reg(adap, TP_TX_MOD_QUEUE_WEIGHT0_A,
3150 TX_MODQ_WEIGHT0_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
3151 TX_MODQ_WEIGHT1_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
3152 TX_MODQ_WEIGHT2_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
3153 TX_MODQ_WEIGHT3_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT));
3154 t4_write_reg(adap, TP_TX_MOD_CHANNEL_WEIGHT_A,
3155 TX_MODQ_WEIGHT0_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
3156 TX_MODQ_WEIGHT1_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
3157 TX_MODQ_WEIGHT2_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
3158 TX_MODQ_WEIGHT3_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT));
dca4faeb
VP
3159 }
3160
060e0c75 3161 /* get basic stuff going */
b2612722 3162 return t4_early_init(adap, adap->pf);
02b5fb8e
DM
3163}
3164
b8ff05a9
DM
3165/*
3166 * Max # of ATIDs. The absolute HW max is 16K but we keep it lower.
3167 */
3168#define MAX_ATIDS 8192U
3169
636f9d37
VP
3170/*
3171 * Phase 0 of initialization: contact FW, obtain config, perform basic init.
3172 *
3173 * If the firmware we're dealing with has Configuration File support, then
3174 * we use that to perform all configuration
3175 */
3176
3177/*
3178 * Tweak configuration based on module parameters, etc. Most of these have
3179 * defaults assigned to them by Firmware Configuration Files (if we're using
3180 * them) but need to be explicitly set if we're using hard-coded
3181 * initialization. But even in the case of using Firmware Configuration
3182 * Files, we'd like to expose the ability to change these via module
3183 * parameters so these are essentially common tweaks/settings for
3184 * Configuration Files and hard-coded initialization ...
3185 */
3186static int adap_init0_tweaks(struct adapter *adapter)
3187{
3188 /*
3189 * Fix up various Host-Dependent Parameters like Page Size, Cache
3190 * Line Size, etc. The firmware default is for a 4KB Page Size and
3191 * 64B Cache Line Size ...
3192 */
3193 t4_fixup_host_params(adapter, PAGE_SIZE, L1_CACHE_BYTES);
3194
3195 /*
3196 * Process module parameters which affect early initialization.
3197 */
3198 if (rx_dma_offset != 2 && rx_dma_offset != 0) {
3199 dev_err(&adapter->pdev->dev,
3200 "Ignoring illegal rx_dma_offset=%d, using 2\n",
3201 rx_dma_offset);
3202 rx_dma_offset = 2;
3203 }
f612b815
HS
3204 t4_set_reg_field(adapter, SGE_CONTROL_A,
3205 PKTSHIFT_V(PKTSHIFT_M),
3206 PKTSHIFT_V(rx_dma_offset));
636f9d37
VP
3207
3208 /*
3209 * Don't include the "IP Pseudo Header" in CPL_RX_PKT checksums: Linux
3210 * adds the pseudo header itself.
3211 */
837e4a42
HS
3212 t4_tp_wr_bits_indirect(adapter, TP_INGRESS_CONFIG_A,
3213 CSUM_HAS_PSEUDO_HDR_F, 0);
636f9d37
VP
3214
3215 return 0;
3216}
3217
01b69614
HS
3218/* 10Gb/s-BT PHY Support. chip-external 10Gb/s-BT PHYs are complex chips
3219 * unto themselves and they contain their own firmware to perform their
3220 * tasks ...
3221 */
3222static int phy_aq1202_version(const u8 *phy_fw_data,
3223 size_t phy_fw_size)
3224{
3225 int offset;
3226
3227 /* At offset 0x8 you're looking for the primary image's
3228 * starting offset which is 3 Bytes wide
3229 *
3230 * At offset 0xa of the primary image, you look for the offset
3231 * of the DRAM segment which is 3 Bytes wide.
3232 *
3233 * The FW version is at offset 0x27e of the DRAM and is 2 Bytes
3234 * wide
3235 */
3236 #define be16(__p) (((__p)[0] << 8) | (__p)[1])
3237 #define le16(__p) ((__p)[0] | ((__p)[1] << 8))
3238 #define le24(__p) (le16(__p) | ((__p)[2] << 16))
3239
3240 offset = le24(phy_fw_data + 0x8) << 12;
3241 offset = le24(phy_fw_data + offset + 0xa);
3242 return be16(phy_fw_data + offset + 0x27e);
3243
3244 #undef be16
3245 #undef le16
3246 #undef le24
3247}
3248
3249static struct info_10gbt_phy_fw {
3250 unsigned int phy_fw_id; /* PCI Device ID */
3251 char *phy_fw_file; /* /lib/firmware/ PHY Firmware file */
3252 int (*phy_fw_version)(const u8 *phy_fw_data, size_t phy_fw_size);
3253 int phy_flash; /* Has FLASH for PHY Firmware */
3254} phy_info_array[] = {
3255 {
3256 PHY_AQ1202_DEVICEID,
3257 PHY_AQ1202_FIRMWARE,
3258 phy_aq1202_version,
3259 1,
3260 },
3261 {
3262 PHY_BCM84834_DEVICEID,
3263 PHY_BCM84834_FIRMWARE,
3264 NULL,
3265 0,
3266 },
3267 { 0, NULL, NULL },
3268};
3269
3270static struct info_10gbt_phy_fw *find_phy_info(int devid)
3271{
3272 int i;
3273
3274 for (i = 0; i < ARRAY_SIZE(phy_info_array); i++) {
3275 if (phy_info_array[i].phy_fw_id == devid)
3276 return &phy_info_array[i];
3277 }
3278 return NULL;
3279}
3280
3281/* Handle updating of chip-external 10Gb/s-BT PHY firmware. This needs to
3282 * happen after the FW_RESET_CMD but before the FW_INITIALIZE_CMD. On error
3283 * we return a negative error number. If we transfer new firmware we return 1
3284 * (from t4_load_phy_fw()). If we don't do anything we return 0.
3285 */
3286static int adap_init0_phy(struct adapter *adap)
3287{
3288 const struct firmware *phyf;
3289 int ret;
3290 struct info_10gbt_phy_fw *phy_info;
3291
3292 /* Use the device ID to determine which PHY file to flash.
3293 */
3294 phy_info = find_phy_info(adap->pdev->device);
3295 if (!phy_info) {
3296 dev_warn(adap->pdev_dev,
3297 "No PHY Firmware file found for this PHY\n");
3298 return -EOPNOTSUPP;
3299 }
3300
3301 /* If we have a T4 PHY firmware file under /lib/firmware/cxgb4/, then
3302 * use that. The adapter firmware provides us with a memory buffer
3303 * where we can load a PHY firmware file from the host if we want to
3304 * override the PHY firmware File in flash.
3305 */
3306 ret = request_firmware_direct(&phyf, phy_info->phy_fw_file,
3307 adap->pdev_dev);
3308 if (ret < 0) {
3309 /* For adapters without FLASH attached to PHY for their
3310 * firmware, it's obviously a fatal error if we can't get the
3311 * firmware to the adapter. For adapters with PHY firmware
3312 * FLASH storage, it's worth a warning if we can't find the
3313 * PHY Firmware but we'll neuter the error ...
3314 */
3315 dev_err(adap->pdev_dev, "unable to find PHY Firmware image "
3316 "/lib/firmware/%s, error %d\n",
3317 phy_info->phy_fw_file, -ret);
3318 if (phy_info->phy_flash) {
3319 int cur_phy_fw_ver = 0;
3320
3321 t4_phy_fw_ver(adap, &cur_phy_fw_ver);
3322 dev_warn(adap->pdev_dev, "continuing with, on-adapter "
3323 "FLASH copy, version %#x\n", cur_phy_fw_ver);
3324 ret = 0;
3325 }
3326
3327 return ret;
3328 }
3329
3330 /* Load PHY Firmware onto adapter.
3331 */
3332 ret = t4_load_phy_fw(adap, MEMWIN_NIC, &adap->win0_lock,
3333 phy_info->phy_fw_version,
3334 (u8 *)phyf->data, phyf->size);
3335 if (ret < 0)
3336 dev_err(adap->pdev_dev, "PHY Firmware transfer error %d\n",
3337 -ret);
3338 else if (ret > 0) {
3339 int new_phy_fw_ver = 0;
3340
3341 if (phy_info->phy_fw_version)
3342 new_phy_fw_ver = phy_info->phy_fw_version(phyf->data,
3343 phyf->size);
3344 dev_info(adap->pdev_dev, "Successfully transferred PHY "
3345 "Firmware /lib/firmware/%s, version %#x\n",
3346 phy_info->phy_fw_file, new_phy_fw_ver);
3347 }
3348
3349 release_firmware(phyf);
3350
3351 return ret;
3352}
3353
636f9d37
VP
3354/*
3355 * Attempt to initialize the adapter via a Firmware Configuration File.
3356 */
3357static int adap_init0_config(struct adapter *adapter, int reset)
3358{
3359 struct fw_caps_config_cmd caps_cmd;
3360 const struct firmware *cf;
3361 unsigned long mtype = 0, maddr = 0;
3362 u32 finiver, finicsum, cfcsum;
16e47624
HS
3363 int ret;
3364 int config_issued = 0;
0a57a536 3365 char *fw_config_file, fw_config_file_path[256];
16e47624 3366 char *config_name = NULL;
636f9d37
VP
3367
3368 /*
3369 * Reset device if necessary.
3370 */
3371 if (reset) {
3372 ret = t4_fw_reset(adapter, adapter->mbox,
0d804338 3373 PIORSTMODE_F | PIORST_F);
636f9d37
VP
3374 if (ret < 0)
3375 goto bye;
3376 }
3377
01b69614
HS
3378 /* If this is a 10Gb/s-BT adapter make sure the chip-external
3379 * 10Gb/s-BT PHYs have up-to-date firmware. Note that this step needs
3380 * to be performed after any global adapter RESET above since some
3381 * PHYs only have local RAM copies of the PHY firmware.
3382 */
3383 if (is_10gbt_device(adapter->pdev->device)) {
3384 ret = adap_init0_phy(adapter);
3385 if (ret < 0)
3386 goto bye;
3387 }
636f9d37
VP
3388 /*
3389 * If we have a T4 configuration file under /lib/firmware/cxgb4/,
3390 * then use that. Otherwise, use the configuration file stored
3391 * in the adapter flash ...
3392 */
d14807dd 3393 switch (CHELSIO_CHIP_VERSION(adapter->params.chip)) {
0a57a536 3394 case CHELSIO_T4:
16e47624 3395 fw_config_file = FW4_CFNAME;
0a57a536
SR
3396 break;
3397 case CHELSIO_T5:
3398 fw_config_file = FW5_CFNAME;
3399 break;
3ccc6cf7
HS
3400 case CHELSIO_T6:
3401 fw_config_file = FW6_CFNAME;
3402 break;
0a57a536
SR
3403 default:
3404 dev_err(adapter->pdev_dev, "Device %d is not supported\n",
3405 adapter->pdev->device);
3406 ret = -EINVAL;
3407 goto bye;
3408 }
3409
3410 ret = request_firmware(&cf, fw_config_file, adapter->pdev_dev);
636f9d37 3411 if (ret < 0) {
16e47624 3412 config_name = "On FLASH";
636f9d37
VP
3413 mtype = FW_MEMTYPE_CF_FLASH;
3414 maddr = t4_flash_cfg_addr(adapter);
3415 } else {
3416 u32 params[7], val[7];
3417
16e47624
HS
3418 sprintf(fw_config_file_path,
3419 "/lib/firmware/%s", fw_config_file);
3420 config_name = fw_config_file_path;
3421
636f9d37
VP
3422 if (cf->size >= FLASH_CFG_MAX_SIZE)
3423 ret = -ENOMEM;
3424 else {
5167865a
HS
3425 params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
3426 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CF));
636f9d37 3427 ret = t4_query_params(adapter, adapter->mbox,
b2612722 3428 adapter->pf, 0, 1, params, val);
636f9d37
VP
3429 if (ret == 0) {
3430 /*
fc5ab020 3431 * For t4_memory_rw() below addresses and
636f9d37
VP
3432 * sizes have to be in terms of multiples of 4
3433 * bytes. So, if the Configuration File isn't
3434 * a multiple of 4 bytes in length we'll have
3435 * to write that out separately since we can't
3436 * guarantee that the bytes following the
3437 * residual byte in the buffer returned by
3438 * request_firmware() are zeroed out ...
3439 */
3440 size_t resid = cf->size & 0x3;
3441 size_t size = cf->size & ~0x3;
3442 __be32 *data = (__be32 *)cf->data;
3443
5167865a
HS
3444 mtype = FW_PARAMS_PARAM_Y_G(val[0]);
3445 maddr = FW_PARAMS_PARAM_Z_G(val[0]) << 16;
636f9d37 3446
fc5ab020
HS
3447 spin_lock(&adapter->win0_lock);
3448 ret = t4_memory_rw(adapter, 0, mtype, maddr,
3449 size, data, T4_MEMORY_WRITE);
636f9d37
VP
3450 if (ret == 0 && resid != 0) {
3451 union {
3452 __be32 word;
3453 char buf[4];
3454 } last;
3455 int i;
3456
3457 last.word = data[size >> 2];
3458 for (i = resid; i < 4; i++)
3459 last.buf[i] = 0;
fc5ab020
HS
3460 ret = t4_memory_rw(adapter, 0, mtype,
3461 maddr + size,
3462 4, &last.word,
3463 T4_MEMORY_WRITE);
636f9d37 3464 }
fc5ab020 3465 spin_unlock(&adapter->win0_lock);
636f9d37
VP
3466 }
3467 }
3468
3469 release_firmware(cf);
3470 if (ret)
3471 goto bye;
3472 }
3473
3474 /*
3475 * Issue a Capability Configuration command to the firmware to get it
3476 * to parse the Configuration File. We don't use t4_fw_config_file()
3477 * because we want the ability to modify various features after we've
3478 * processed the configuration file ...
3479 */
3480 memset(&caps_cmd, 0, sizeof(caps_cmd));
3481 caps_cmd.op_to_write =
e2ac9628
HS
3482 htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
3483 FW_CMD_REQUEST_F |
3484 FW_CMD_READ_F);
ce91a923 3485 caps_cmd.cfvalid_to_len16 =
5167865a
HS
3486 htonl(FW_CAPS_CONFIG_CMD_CFVALID_F |
3487 FW_CAPS_CONFIG_CMD_MEMTYPE_CF_V(mtype) |
3488 FW_CAPS_CONFIG_CMD_MEMADDR64K_CF_V(maddr >> 16) |
636f9d37
VP
3489 FW_LEN16(caps_cmd));
3490 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
3491 &caps_cmd);
16e47624
HS
3492
3493 /* If the CAPS_CONFIG failed with an ENOENT (for a Firmware
3494 * Configuration File in FLASH), our last gasp effort is to use the
3495 * Firmware Configuration File which is embedded in the firmware. A
3496 * very few early versions of the firmware didn't have one embedded
3497 * but we can ignore those.
3498 */
3499 if (ret == -ENOENT) {
3500 memset(&caps_cmd, 0, sizeof(caps_cmd));
3501 caps_cmd.op_to_write =
e2ac9628
HS
3502 htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
3503 FW_CMD_REQUEST_F |
3504 FW_CMD_READ_F);
16e47624
HS
3505 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
3506 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd,
3507 sizeof(caps_cmd), &caps_cmd);
3508 config_name = "Firmware Default";
3509 }
3510
3511 config_issued = 1;
636f9d37
VP
3512 if (ret < 0)
3513 goto bye;
3514
3515 finiver = ntohl(caps_cmd.finiver);
3516 finicsum = ntohl(caps_cmd.finicsum);
3517 cfcsum = ntohl(caps_cmd.cfcsum);
3518 if (finicsum != cfcsum)
3519 dev_warn(adapter->pdev_dev, "Configuration File checksum "\
3520 "mismatch: [fini] csum=%#x, computed csum=%#x\n",
3521 finicsum, cfcsum);
3522
636f9d37
VP
3523 /*
3524 * And now tell the firmware to use the configuration we just loaded.
3525 */
3526 caps_cmd.op_to_write =
e2ac9628
HS
3527 htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
3528 FW_CMD_REQUEST_F |
3529 FW_CMD_WRITE_F);
ce91a923 3530 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
636f9d37
VP
3531 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
3532 NULL);
3533 if (ret < 0)
3534 goto bye;
3535
3536 /*
3537 * Tweak configuration based on system architecture, module
3538 * parameters, etc.
3539 */
3540 ret = adap_init0_tweaks(adapter);
3541 if (ret < 0)
3542 goto bye;
3543
3544 /*
3545 * And finally tell the firmware to initialize itself using the
3546 * parameters from the Configuration File.
3547 */
3548 ret = t4_fw_initialize(adapter, adapter->mbox);
3549 if (ret < 0)
3550 goto bye;
3551
06640310
HS
3552 /* Emit Firmware Configuration File information and return
3553 * successfully.
636f9d37 3554 */
636f9d37 3555 dev_info(adapter->pdev_dev, "Successfully configured using Firmware "\
16e47624
HS
3556 "Configuration File \"%s\", version %#x, computed checksum %#x\n",
3557 config_name, finiver, cfcsum);
636f9d37
VP
3558 return 0;
3559
3560 /*
3561 * Something bad happened. Return the error ... (If the "error"
3562 * is that there's no Configuration File on the adapter we don't
3563 * want to issue a warning since this is fairly common.)
3564 */
3565bye:
16e47624
HS
3566 if (config_issued && ret != -ENOENT)
3567 dev_warn(adapter->pdev_dev, "\"%s\" configuration file error %d\n",
3568 config_name, -ret);
636f9d37
VP
3569 return ret;
3570}
3571
16e47624
HS
3572static struct fw_info fw_info_array[] = {
3573 {
3574 .chip = CHELSIO_T4,
3575 .fs_name = FW4_CFNAME,
3576 .fw_mod_name = FW4_FNAME,
3577 .fw_hdr = {
3578 .chip = FW_HDR_CHIP_T4,
3579 .fw_ver = __cpu_to_be32(FW_VERSION(T4)),
3580 .intfver_nic = FW_INTFVER(T4, NIC),
3581 .intfver_vnic = FW_INTFVER(T4, VNIC),
3582 .intfver_ri = FW_INTFVER(T4, RI),
3583 .intfver_iscsi = FW_INTFVER(T4, ISCSI),
3584 .intfver_fcoe = FW_INTFVER(T4, FCOE),
3585 },
3586 }, {
3587 .chip = CHELSIO_T5,
3588 .fs_name = FW5_CFNAME,
3589 .fw_mod_name = FW5_FNAME,
3590 .fw_hdr = {
3591 .chip = FW_HDR_CHIP_T5,
3592 .fw_ver = __cpu_to_be32(FW_VERSION(T5)),
3593 .intfver_nic = FW_INTFVER(T5, NIC),
3594 .intfver_vnic = FW_INTFVER(T5, VNIC),
3595 .intfver_ri = FW_INTFVER(T5, RI),
3596 .intfver_iscsi = FW_INTFVER(T5, ISCSI),
3597 .intfver_fcoe = FW_INTFVER(T5, FCOE),
3598 },
3ccc6cf7
HS
3599 }, {
3600 .chip = CHELSIO_T6,
3601 .fs_name = FW6_CFNAME,
3602 .fw_mod_name = FW6_FNAME,
3603 .fw_hdr = {
3604 .chip = FW_HDR_CHIP_T6,
3605 .fw_ver = __cpu_to_be32(FW_VERSION(T6)),
3606 .intfver_nic = FW_INTFVER(T6, NIC),
3607 .intfver_vnic = FW_INTFVER(T6, VNIC),
3608 .intfver_ofld = FW_INTFVER(T6, OFLD),
3609 .intfver_ri = FW_INTFVER(T6, RI),
3610 .intfver_iscsipdu = FW_INTFVER(T6, ISCSIPDU),
3611 .intfver_iscsi = FW_INTFVER(T6, ISCSI),
3612 .intfver_fcoepdu = FW_INTFVER(T6, FCOEPDU),
3613 .intfver_fcoe = FW_INTFVER(T6, FCOE),
3614 },
16e47624 3615 }
3ccc6cf7 3616
16e47624
HS
3617};
3618
3619static struct fw_info *find_fw_info(int chip)
3620{
3621 int i;
3622
3623 for (i = 0; i < ARRAY_SIZE(fw_info_array); i++) {
3624 if (fw_info_array[i].chip == chip)
3625 return &fw_info_array[i];
3626 }
3627 return NULL;
3628}
3629
b8ff05a9
DM
3630/*
3631 * Phase 0 of initialization: contact FW, obtain config, perform basic init.
3632 */
3633static int adap_init0(struct adapter *adap)
3634{
3635 int ret;
3636 u32 v, port_vec;
3637 enum dev_state state;
3638 u32 params[7], val[7];
9a4da2cd 3639 struct fw_caps_config_cmd caps_cmd;
dcf7b6f5 3640 int reset = 1;
b8ff05a9 3641
ae469b68
HS
3642 /* Grab Firmware Device Log parameters as early as possible so we have
3643 * access to it for debugging, etc.
3644 */
3645 ret = t4_init_devlog_params(adap);
3646 if (ret < 0)
3647 return ret;
3648
666224d4
HS
3649 /* Contact FW, advertising Master capability */
3650 ret = t4_fw_hello(adap, adap->mbox, adap->mbox, MASTER_MAY, &state);
b8ff05a9
DM
3651 if (ret < 0) {
3652 dev_err(adap->pdev_dev, "could not connect to FW, error %d\n",
3653 ret);
3654 return ret;
3655 }
636f9d37
VP
3656 if (ret == adap->mbox)
3657 adap->flags |= MASTER_PF;
b8ff05a9 3658
636f9d37
VP
3659 /*
3660 * If we're the Master PF Driver and the device is uninitialized,
3661 * then let's consider upgrading the firmware ... (We always want
3662 * to check the firmware version number in order to A. get it for
3663 * later reporting and B. to warn if the currently loaded firmware
3664 * is excessively mismatched relative to the driver.)
3665 */
16e47624
HS
3666 t4_get_fw_version(adap, &adap->params.fw_vers);
3667 t4_get_tp_version(adap, &adap->params.tp_vers);
636f9d37 3668 if ((adap->flags & MASTER_PF) && state != DEV_STATE_INIT) {
16e47624
HS
3669 struct fw_info *fw_info;
3670 struct fw_hdr *card_fw;
3671 const struct firmware *fw;
3672 const u8 *fw_data = NULL;
3673 unsigned int fw_size = 0;
3674
3675 /* This is the firmware whose headers the driver was compiled
3676 * against
3677 */
3678 fw_info = find_fw_info(CHELSIO_CHIP_VERSION(adap->params.chip));
3679 if (fw_info == NULL) {
3680 dev_err(adap->pdev_dev,
3681 "unable to get firmware info for chip %d.\n",
3682 CHELSIO_CHIP_VERSION(adap->params.chip));
3683 return -EINVAL;
636f9d37 3684 }
16e47624
HS
3685
3686 /* allocate memory to read the header of the firmware on the
3687 * card
3688 */
3689 card_fw = t4_alloc_mem(sizeof(*card_fw));
3690
3691 /* Get FW from from /lib/firmware/ */
3692 ret = request_firmware(&fw, fw_info->fw_mod_name,
3693 adap->pdev_dev);
3694 if (ret < 0) {
3695 dev_err(adap->pdev_dev,
3696 "unable to load firmware image %s, error %d\n",
3697 fw_info->fw_mod_name, ret);
3698 } else {
3699 fw_data = fw->data;
3700 fw_size = fw->size;
3701 }
3702
3703 /* upgrade FW logic */
3704 ret = t4_prep_fw(adap, fw_info, fw_data, fw_size, card_fw,
3705 state, &reset);
3706
3707 /* Cleaning up */
0b5b6bee 3708 release_firmware(fw);
16e47624
HS
3709 t4_free_mem(card_fw);
3710
636f9d37 3711 if (ret < 0)
16e47624 3712 goto bye;
636f9d37 3713 }
b8ff05a9 3714
636f9d37
VP
3715 /*
3716 * Grab VPD parameters. This should be done after we establish a
3717 * connection to the firmware since some of the VPD parameters
3718 * (notably the Core Clock frequency) are retrieved via requests to
3719 * the firmware. On the other hand, we need these fairly early on
3720 * so we do this right after getting ahold of the firmware.
3721 */
3722 ret = get_vpd_params(adap, &adap->params.vpd);
a0881cab
DM
3723 if (ret < 0)
3724 goto bye;
a0881cab 3725
636f9d37 3726 /*
13ee15d3
VP
3727 * Find out what ports are available to us. Note that we need to do
3728 * this before calling adap_init0_no_config() since it needs nports
3729 * and portvec ...
636f9d37
VP
3730 */
3731 v =
5167865a
HS
3732 FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
3733 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PORTVEC);
b2612722 3734 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, &v, &port_vec);
a0881cab
DM
3735 if (ret < 0)
3736 goto bye;
3737
636f9d37
VP
3738 adap->params.nports = hweight32(port_vec);
3739 adap->params.portvec = port_vec;
3740
06640310
HS
3741 /* If the firmware is initialized already, emit a simply note to that
3742 * effect. Otherwise, it's time to try initializing the adapter.
636f9d37
VP
3743 */
3744 if (state == DEV_STATE_INIT) {
3745 dev_info(adap->pdev_dev, "Coming up as %s: "\
3746 "Adapter already initialized\n",
3747 adap->flags & MASTER_PF ? "MASTER" : "SLAVE");
636f9d37
VP
3748 } else {
3749 dev_info(adap->pdev_dev, "Coming up as MASTER: "\
3750 "Initializing adapter\n");
06640310
HS
3751
3752 /* Find out whether we're dealing with a version of the
3753 * firmware which has configuration file support.
636f9d37 3754 */
06640310
HS
3755 params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
3756 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CF));
b2612722 3757 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1,
06640310 3758 params, val);
13ee15d3 3759
06640310
HS
3760 /* If the firmware doesn't support Configuration Files,
3761 * return an error.
3762 */
3763 if (ret < 0) {
3764 dev_err(adap->pdev_dev, "firmware doesn't support "
3765 "Firmware Configuration Files\n");
3766 goto bye;
3767 }
3768
3769 /* The firmware provides us with a memory buffer where we can
3770 * load a Configuration File from the host if we want to
3771 * override the Configuration File in flash.
3772 */
3773 ret = adap_init0_config(adap, reset);
3774 if (ret == -ENOENT) {
3775 dev_err(adap->pdev_dev, "no Configuration File "
3776 "present on adapter.\n");
3777 goto bye;
636f9d37
VP
3778 }
3779 if (ret < 0) {
06640310
HS
3780 dev_err(adap->pdev_dev, "could not initialize "
3781 "adapter, error %d\n", -ret);
636f9d37
VP
3782 goto bye;
3783 }
3784 }
3785
06640310
HS
3786 /* Give the SGE code a chance to pull in anything that it needs ...
3787 * Note that this must be called after we retrieve our VPD parameters
3788 * in order to know how to convert core ticks to seconds, etc.
636f9d37 3789 */
06640310
HS
3790 ret = t4_sge_init(adap);
3791 if (ret < 0)
3792 goto bye;
636f9d37 3793
9a4da2cd
VP
3794 if (is_bypass_device(adap->pdev->device))
3795 adap->params.bypass = 1;
3796
636f9d37
VP
3797 /*
3798 * Grab some of our basic fundamental operating parameters.
3799 */
3800#define FW_PARAM_DEV(param) \
5167865a
HS
3801 (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | \
3802 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_##param))
636f9d37 3803
b8ff05a9 3804#define FW_PARAM_PFVF(param) \
5167865a
HS
3805 FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) | \
3806 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_##param)| \
3807 FW_PARAMS_PARAM_Y_V(0) | \
3808 FW_PARAMS_PARAM_Z_V(0)
b8ff05a9 3809
636f9d37 3810 params[0] = FW_PARAM_PFVF(EQ_START);
b8ff05a9
DM
3811 params[1] = FW_PARAM_PFVF(L2T_START);
3812 params[2] = FW_PARAM_PFVF(L2T_END);
3813 params[3] = FW_PARAM_PFVF(FILTER_START);
3814 params[4] = FW_PARAM_PFVF(FILTER_END);
e46dab4d 3815 params[5] = FW_PARAM_PFVF(IQFLINT_START);
b2612722 3816 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 6, params, val);
b8ff05a9
DM
3817 if (ret < 0)
3818 goto bye;
636f9d37
VP
3819 adap->sge.egr_start = val[0];
3820 adap->l2t_start = val[1];
3821 adap->l2t_end = val[2];
b8ff05a9
DM
3822 adap->tids.ftid_base = val[3];
3823 adap->tids.nftids = val[4] - val[3] + 1;
e46dab4d 3824 adap->sge.ingr_start = val[5];
b8ff05a9 3825
4b8e27a8
HS
3826 /* qids (ingress/egress) returned from firmware can be anywhere
3827 * in the range from EQ(IQFLINT)_START to EQ(IQFLINT)_END.
3828 * Hence driver needs to allocate memory for this range to
3829 * store the queue info. Get the highest IQFLINT/EQ index returned
3830 * in FW_EQ_*_CMD.alloc command.
3831 */
3832 params[0] = FW_PARAM_PFVF(EQ_END);
3833 params[1] = FW_PARAM_PFVF(IQFLINT_END);
b2612722 3834 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params, val);
4b8e27a8
HS
3835 if (ret < 0)
3836 goto bye;
3837 adap->sge.egr_sz = val[0] - adap->sge.egr_start + 1;
3838 adap->sge.ingr_sz = val[1] - adap->sge.ingr_start + 1;
3839
3840 adap->sge.egr_map = kcalloc(adap->sge.egr_sz,
3841 sizeof(*adap->sge.egr_map), GFP_KERNEL);
3842 if (!adap->sge.egr_map) {
3843 ret = -ENOMEM;
3844 goto bye;
3845 }
3846
3847 adap->sge.ingr_map = kcalloc(adap->sge.ingr_sz,
3848 sizeof(*adap->sge.ingr_map), GFP_KERNEL);
3849 if (!adap->sge.ingr_map) {
3850 ret = -ENOMEM;
3851 goto bye;
3852 }
3853
3854 /* Allocate the memory for the vaious egress queue bitmaps
5b377d11 3855 * ie starving_fl, txq_maperr and blocked_fl.
4b8e27a8
HS
3856 */
3857 adap->sge.starving_fl = kcalloc(BITS_TO_LONGS(adap->sge.egr_sz),
3858 sizeof(long), GFP_KERNEL);
3859 if (!adap->sge.starving_fl) {
3860 ret = -ENOMEM;
3861 goto bye;
3862 }
3863
3864 adap->sge.txq_maperr = kcalloc(BITS_TO_LONGS(adap->sge.egr_sz),
3865 sizeof(long), GFP_KERNEL);
3866 if (!adap->sge.txq_maperr) {
3867 ret = -ENOMEM;
3868 goto bye;
3869 }
3870
5b377d11
HS
3871#ifdef CONFIG_DEBUG_FS
3872 adap->sge.blocked_fl = kcalloc(BITS_TO_LONGS(adap->sge.egr_sz),
3873 sizeof(long), GFP_KERNEL);
3874 if (!adap->sge.blocked_fl) {
3875 ret = -ENOMEM;
3876 goto bye;
3877 }
3878#endif
3879
b5a02f50
AB
3880 params[0] = FW_PARAM_PFVF(CLIP_START);
3881 params[1] = FW_PARAM_PFVF(CLIP_END);
b2612722 3882 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params, val);
b5a02f50
AB
3883 if (ret < 0)
3884 goto bye;
3885 adap->clipt_start = val[0];
3886 adap->clipt_end = val[1];
3887
636f9d37
VP
3888 /* query params related to active filter region */
3889 params[0] = FW_PARAM_PFVF(ACTIVE_FILTER_START);
3890 params[1] = FW_PARAM_PFVF(ACTIVE_FILTER_END);
b2612722 3891 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params, val);
636f9d37
VP
3892 /* If Active filter size is set we enable establishing
3893 * offload connection through firmware work request
3894 */
3895 if ((val[0] != val[1]) && (ret >= 0)) {
3896 adap->flags |= FW_OFLD_CONN;
3897 adap->tids.aftid_base = val[0];
3898 adap->tids.aftid_end = val[1];
3899 }
3900
b407a4a9
VP
3901 /* If we're running on newer firmware, let it know that we're
3902 * prepared to deal with encapsulated CPL messages. Older
3903 * firmware won't understand this and we'll just get
3904 * unencapsulated messages ...
3905 */
3906 params[0] = FW_PARAM_PFVF(CPLFW4MSG_ENCAP);
3907 val[0] = 1;
b2612722 3908 (void)t4_set_params(adap, adap->mbox, adap->pf, 0, 1, params, val);
b407a4a9 3909
1ac0f095
KS
3910 /*
3911 * Find out whether we're allowed to use the T5+ ULPTX MEMWRITE DSGL
3912 * capability. Earlier versions of the firmware didn't have the
3913 * ULPTX_MEMWRITE_DSGL so we'll interpret a query failure as no
3914 * permission to use ULPTX MEMWRITE DSGL.
3915 */
3916 if (is_t4(adap->params.chip)) {
3917 adap->params.ulptx_memwrite_dsgl = false;
3918 } else {
3919 params[0] = FW_PARAM_DEV(ULPTX_MEMWRITE_DSGL);
b2612722 3920 ret = t4_query_params(adap, adap->mbox, adap->pf, 0,
1ac0f095
KS
3921 1, params, val);
3922 adap->params.ulptx_memwrite_dsgl = (ret == 0 && val[0] != 0);
3923 }
3924
636f9d37
VP
3925 /*
3926 * Get device capabilities so we can determine what resources we need
3927 * to manage.
3928 */
3929 memset(&caps_cmd, 0, sizeof(caps_cmd));
e2ac9628
HS
3930 caps_cmd.op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
3931 FW_CMD_REQUEST_F | FW_CMD_READ_F);
ce91a923 3932 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
636f9d37
VP
3933 ret = t4_wr_mbox(adap, adap->mbox, &caps_cmd, sizeof(caps_cmd),
3934 &caps_cmd);
3935 if (ret < 0)
3936 goto bye;
3937
13ee15d3 3938 if (caps_cmd.ofldcaps) {
b8ff05a9
DM
3939 /* query offload-related parameters */
3940 params[0] = FW_PARAM_DEV(NTID);
3941 params[1] = FW_PARAM_PFVF(SERVER_START);
3942 params[2] = FW_PARAM_PFVF(SERVER_END);
3943 params[3] = FW_PARAM_PFVF(TDDP_START);
3944 params[4] = FW_PARAM_PFVF(TDDP_END);
3945 params[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
b2612722 3946 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 6,
636f9d37 3947 params, val);
b8ff05a9
DM
3948 if (ret < 0)
3949 goto bye;
3950 adap->tids.ntids = val[0];
3951 adap->tids.natids = min(adap->tids.ntids / 2, MAX_ATIDS);
3952 adap->tids.stid_base = val[1];
3953 adap->tids.nstids = val[2] - val[1] + 1;
636f9d37 3954 /*
dbedd44e 3955 * Setup server filter region. Divide the available filter
636f9d37
VP
3956 * region into two parts. Regular filters get 1/3rd and server
3957 * filters get 2/3rd part. This is only enabled if workarond
3958 * path is enabled.
3959 * 1. For regular filters.
3960 * 2. Server filter: This are special filters which are used
3961 * to redirect SYN packets to offload queue.
3962 */
3963 if (adap->flags & FW_OFLD_CONN && !is_bypass(adap)) {
3964 adap->tids.sftid_base = adap->tids.ftid_base +
3965 DIV_ROUND_UP(adap->tids.nftids, 3);
3966 adap->tids.nsftids = adap->tids.nftids -
3967 DIV_ROUND_UP(adap->tids.nftids, 3);
3968 adap->tids.nftids = adap->tids.sftid_base -
3969 adap->tids.ftid_base;
3970 }
b8ff05a9
DM
3971 adap->vres.ddp.start = val[3];
3972 adap->vres.ddp.size = val[4] - val[3] + 1;
3973 adap->params.ofldq_wr_cred = val[5];
636f9d37 3974
b8ff05a9
DM
3975 adap->params.offload = 1;
3976 }
636f9d37 3977 if (caps_cmd.rdmacaps) {
b8ff05a9
DM
3978 params[0] = FW_PARAM_PFVF(STAG_START);
3979 params[1] = FW_PARAM_PFVF(STAG_END);
3980 params[2] = FW_PARAM_PFVF(RQ_START);
3981 params[3] = FW_PARAM_PFVF(RQ_END);
3982 params[4] = FW_PARAM_PFVF(PBL_START);
3983 params[5] = FW_PARAM_PFVF(PBL_END);
b2612722 3984 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 6,
636f9d37 3985 params, val);
b8ff05a9
DM
3986 if (ret < 0)
3987 goto bye;
3988 adap->vres.stag.start = val[0];
3989 adap->vres.stag.size = val[1] - val[0] + 1;
3990 adap->vres.rq.start = val[2];
3991 adap->vres.rq.size = val[3] - val[2] + 1;
3992 adap->vres.pbl.start = val[4];
3993 adap->vres.pbl.size = val[5] - val[4] + 1;
a0881cab
DM
3994
3995 params[0] = FW_PARAM_PFVF(SQRQ_START);
3996 params[1] = FW_PARAM_PFVF(SQRQ_END);
3997 params[2] = FW_PARAM_PFVF(CQ_START);
3998 params[3] = FW_PARAM_PFVF(CQ_END);
1ae970e0
DM
3999 params[4] = FW_PARAM_PFVF(OCQ_START);
4000 params[5] = FW_PARAM_PFVF(OCQ_END);
b2612722 4001 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 6, params,
5c937dd3 4002 val);
a0881cab
DM
4003 if (ret < 0)
4004 goto bye;
4005 adap->vres.qp.start = val[0];
4006 adap->vres.qp.size = val[1] - val[0] + 1;
4007 adap->vres.cq.start = val[2];
4008 adap->vres.cq.size = val[3] - val[2] + 1;
1ae970e0
DM
4009 adap->vres.ocq.start = val[4];
4010 adap->vres.ocq.size = val[5] - val[4] + 1;
4c2c5763
HS
4011
4012 params[0] = FW_PARAM_DEV(MAXORDIRD_QP);
4013 params[1] = FW_PARAM_DEV(MAXIRD_ADAPTER);
b2612722 4014 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params,
5c937dd3 4015 val);
4c2c5763
HS
4016 if (ret < 0) {
4017 adap->params.max_ordird_qp = 8;
4018 adap->params.max_ird_adapter = 32 * adap->tids.ntids;
4019 ret = 0;
4020 } else {
4021 adap->params.max_ordird_qp = val[0];
4022 adap->params.max_ird_adapter = val[1];
4023 }
4024 dev_info(adap->pdev_dev,
4025 "max_ordird_qp %d max_ird_adapter %d\n",
4026 adap->params.max_ordird_qp,
4027 adap->params.max_ird_adapter);
b8ff05a9 4028 }
636f9d37 4029 if (caps_cmd.iscsicaps) {
b8ff05a9
DM
4030 params[0] = FW_PARAM_PFVF(ISCSI_START);
4031 params[1] = FW_PARAM_PFVF(ISCSI_END);
b2612722 4032 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2,
636f9d37 4033 params, val);
b8ff05a9
DM
4034 if (ret < 0)
4035 goto bye;
4036 adap->vres.iscsi.start = val[0];
4037 adap->vres.iscsi.size = val[1] - val[0] + 1;
4038 }
4039#undef FW_PARAM_PFVF
4040#undef FW_PARAM_DEV
4041
92e7ae71
HS
4042 /* The MTU/MSS Table is initialized by now, so load their values. If
4043 * we're initializing the adapter, then we'll make any modifications
4044 * we want to the MTU/MSS Table and also initialize the congestion
4045 * parameters.
636f9d37 4046 */
b8ff05a9 4047 t4_read_mtu_tbl(adap, adap->params.mtus, NULL);
92e7ae71
HS
4048 if (state != DEV_STATE_INIT) {
4049 int i;
4050
4051 /* The default MTU Table contains values 1492 and 1500.
4052 * However, for TCP, it's better to have two values which are
4053 * a multiple of 8 +/- 4 bytes apart near this popular MTU.
4054 * This allows us to have a TCP Data Payload which is a
4055 * multiple of 8 regardless of what combination of TCP Options
4056 * are in use (always a multiple of 4 bytes) which is
4057 * important for performance reasons. For instance, if no
4058 * options are in use, then we have a 20-byte IP header and a
4059 * 20-byte TCP header. In this case, a 1500-byte MSS would
4060 * result in a TCP Data Payload of 1500 - 40 == 1460 bytes
4061 * which is not a multiple of 8. So using an MSS of 1488 in
4062 * this case results in a TCP Data Payload of 1448 bytes which
4063 * is a multiple of 8. On the other hand, if 12-byte TCP Time
4064 * Stamps have been negotiated, then an MTU of 1500 bytes
4065 * results in a TCP Data Payload of 1448 bytes which, as
4066 * above, is a multiple of 8 bytes ...
4067 */
4068 for (i = 0; i < NMTUS; i++)
4069 if (adap->params.mtus[i] == 1492) {
4070 adap->params.mtus[i] = 1488;
4071 break;
4072 }
7ee9ff94 4073
92e7ae71
HS
4074 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
4075 adap->params.b_wnd);
4076 }
df64e4d3 4077 t4_init_sge_params(adap);
dcf7b6f5 4078 t4_init_tp_params(adap);
636f9d37 4079 adap->flags |= FW_OK;
b8ff05a9
DM
4080 return 0;
4081
4082 /*
636f9d37
VP
4083 * Something bad happened. If a command timed out or failed with EIO
4084 * FW does not operate within its spec or something catastrophic
4085 * happened to HW/FW, stop issuing commands.
b8ff05a9 4086 */
636f9d37 4087bye:
4b8e27a8
HS
4088 kfree(adap->sge.egr_map);
4089 kfree(adap->sge.ingr_map);
4090 kfree(adap->sge.starving_fl);
4091 kfree(adap->sge.txq_maperr);
5b377d11
HS
4092#ifdef CONFIG_DEBUG_FS
4093 kfree(adap->sge.blocked_fl);
4094#endif
636f9d37
VP
4095 if (ret != -ETIMEDOUT && ret != -EIO)
4096 t4_fw_bye(adap, adap->mbox);
b8ff05a9
DM
4097 return ret;
4098}
4099
204dc3c0
DM
4100/* EEH callbacks */
4101
4102static pci_ers_result_t eeh_err_detected(struct pci_dev *pdev,
4103 pci_channel_state_t state)
4104{
4105 int i;
4106 struct adapter *adap = pci_get_drvdata(pdev);
4107
4108 if (!adap)
4109 goto out;
4110
4111 rtnl_lock();
4112 adap->flags &= ~FW_OK;
4113 notify_ulds(adap, CXGB4_STATE_START_RECOVERY);
9fe6cb58 4114 spin_lock(&adap->stats_lock);
204dc3c0
DM
4115 for_each_port(adap, i) {
4116 struct net_device *dev = adap->port[i];
4117
4118 netif_device_detach(dev);
4119 netif_carrier_off(dev);
4120 }
9fe6cb58 4121 spin_unlock(&adap->stats_lock);
b37987e8 4122 disable_interrupts(adap);
204dc3c0
DM
4123 if (adap->flags & FULL_INIT_DONE)
4124 cxgb_down(adap);
4125 rtnl_unlock();
144be3d9
GS
4126 if ((adap->flags & DEV_ENABLED)) {
4127 pci_disable_device(pdev);
4128 adap->flags &= ~DEV_ENABLED;
4129 }
204dc3c0
DM
4130out: return state == pci_channel_io_perm_failure ?
4131 PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET;
4132}
4133
4134static pci_ers_result_t eeh_slot_reset(struct pci_dev *pdev)
4135{
4136 int i, ret;
4137 struct fw_caps_config_cmd c;
4138 struct adapter *adap = pci_get_drvdata(pdev);
4139
4140 if (!adap) {
4141 pci_restore_state(pdev);
4142 pci_save_state(pdev);
4143 return PCI_ERS_RESULT_RECOVERED;
4144 }
4145
144be3d9
GS
4146 if (!(adap->flags & DEV_ENABLED)) {
4147 if (pci_enable_device(pdev)) {
4148 dev_err(&pdev->dev, "Cannot reenable PCI "
4149 "device after reset\n");
4150 return PCI_ERS_RESULT_DISCONNECT;
4151 }
4152 adap->flags |= DEV_ENABLED;
204dc3c0
DM
4153 }
4154
4155 pci_set_master(pdev);
4156 pci_restore_state(pdev);
4157 pci_save_state(pdev);
4158 pci_cleanup_aer_uncorrect_error_status(pdev);
4159
8203b509 4160 if (t4_wait_dev_ready(adap->regs) < 0)
204dc3c0 4161 return PCI_ERS_RESULT_DISCONNECT;
b2612722 4162 if (t4_fw_hello(adap, adap->mbox, adap->pf, MASTER_MUST, NULL) < 0)
204dc3c0
DM
4163 return PCI_ERS_RESULT_DISCONNECT;
4164 adap->flags |= FW_OK;
4165 if (adap_init1(adap, &c))
4166 return PCI_ERS_RESULT_DISCONNECT;
4167
4168 for_each_port(adap, i) {
4169 struct port_info *p = adap2pinfo(adap, i);
4170
b2612722 4171 ret = t4_alloc_vi(adap, adap->mbox, p->tx_chan, adap->pf, 0, 1,
060e0c75 4172 NULL, NULL);
204dc3c0
DM
4173 if (ret < 0)
4174 return PCI_ERS_RESULT_DISCONNECT;
4175 p->viid = ret;
4176 p->xact_addr_filt = -1;
4177 }
4178
4179 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
4180 adap->params.b_wnd);
1ae970e0 4181 setup_memwin(adap);
204dc3c0
DM
4182 if (cxgb_up(adap))
4183 return PCI_ERS_RESULT_DISCONNECT;
4184 return PCI_ERS_RESULT_RECOVERED;
4185}
4186
4187static void eeh_resume(struct pci_dev *pdev)
4188{
4189 int i;
4190 struct adapter *adap = pci_get_drvdata(pdev);
4191
4192 if (!adap)
4193 return;
4194
4195 rtnl_lock();
4196 for_each_port(adap, i) {
4197 struct net_device *dev = adap->port[i];
4198
4199 if (netif_running(dev)) {
4200 link_start(dev);
4201 cxgb_set_rxmode(dev);
4202 }
4203 netif_device_attach(dev);
4204 }
4205 rtnl_unlock();
4206}
4207
3646f0e5 4208static const struct pci_error_handlers cxgb4_eeh = {
204dc3c0
DM
4209 .error_detected = eeh_err_detected,
4210 .slot_reset = eeh_slot_reset,
4211 .resume = eeh_resume,
4212};
4213
57d8b764 4214static inline bool is_x_10g_port(const struct link_config *lc)
b8ff05a9 4215{
57d8b764
KS
4216 return (lc->supported & FW_PORT_CAP_SPEED_10G) != 0 ||
4217 (lc->supported & FW_PORT_CAP_SPEED_40G) != 0;
b8ff05a9
DM
4218}
4219
c887ad0e
HS
4220static inline void init_rspq(struct adapter *adap, struct sge_rspq *q,
4221 unsigned int us, unsigned int cnt,
b8ff05a9
DM
4222 unsigned int size, unsigned int iqe_size)
4223{
c887ad0e 4224 q->adap = adap;
812034f1 4225 cxgb4_set_rspq_intr_params(q, us, cnt);
b8ff05a9
DM
4226 q->iqe_len = iqe_size;
4227 q->size = size;
4228}
4229
4230/*
4231 * Perform default configuration of DMA queues depending on the number and type
4232 * of ports we found and the number of available CPUs. Most settings can be
4233 * modified by the admin prior to actual use.
4234 */
91744948 4235static void cfg_queues(struct adapter *adap)
b8ff05a9
DM
4236{
4237 struct sge *s = &adap->sge;
688848b1
AB
4238 int i, n10g = 0, qidx = 0;
4239#ifndef CONFIG_CHELSIO_T4_DCB
4240 int q10g = 0;
4241#endif
cf38be6d 4242 int ciq_size;
b8ff05a9
DM
4243
4244 for_each_port(adap, i)
57d8b764 4245 n10g += is_x_10g_port(&adap2pinfo(adap, i)->link_cfg);
688848b1
AB
4246#ifdef CONFIG_CHELSIO_T4_DCB
4247 /* For Data Center Bridging support we need to be able to support up
4248 * to 8 Traffic Priorities; each of which will be assigned to its
4249 * own TX Queue in order to prevent Head-Of-Line Blocking.
4250 */
4251 if (adap->params.nports * 8 > MAX_ETH_QSETS) {
4252 dev_err(adap->pdev_dev, "MAX_ETH_QSETS=%d < %d!\n",
4253 MAX_ETH_QSETS, adap->params.nports * 8);
4254 BUG_ON(1);
4255 }
b8ff05a9 4256
688848b1
AB
4257 for_each_port(adap, i) {
4258 struct port_info *pi = adap2pinfo(adap, i);
4259
4260 pi->first_qset = qidx;
4261 pi->nqsets = 8;
4262 qidx += pi->nqsets;
4263 }
4264#else /* !CONFIG_CHELSIO_T4_DCB */
b8ff05a9
DM
4265 /*
4266 * We default to 1 queue per non-10G port and up to # of cores queues
4267 * per 10G port.
4268 */
4269 if (n10g)
4270 q10g = (MAX_ETH_QSETS - (adap->params.nports - n10g)) / n10g;
5952dde7
YM
4271 if (q10g > netif_get_num_default_rss_queues())
4272 q10g = netif_get_num_default_rss_queues();
b8ff05a9
DM
4273
4274 for_each_port(adap, i) {
4275 struct port_info *pi = adap2pinfo(adap, i);
4276
4277 pi->first_qset = qidx;
57d8b764 4278 pi->nqsets = is_x_10g_port(&pi->link_cfg) ? q10g : 1;
b8ff05a9
DM
4279 qidx += pi->nqsets;
4280 }
688848b1 4281#endif /* !CONFIG_CHELSIO_T4_DCB */
b8ff05a9
DM
4282
4283 s->ethqsets = qidx;
4284 s->max_ethqsets = qidx; /* MSI-X may lower it later */
4285
4286 if (is_offload(adap)) {
4287 /*
4288 * For offload we use 1 queue/channel if all ports are up to 1G,
4289 * otherwise we divide all available queues amongst the channels
4290 * capped by the number of available cores.
4291 */
4292 if (n10g) {
4293 i = min_t(int, ARRAY_SIZE(s->ofldrxq),
4294 num_online_cpus());
4295 s->ofldqsets = roundup(i, adap->params.nports);
4296 } else
4297 s->ofldqsets = adap->params.nports;
4298 /* For RDMA one Rx queue per channel suffices */
4299 s->rdmaqs = adap->params.nports;
f36e58e5
HS
4300 /* Try and allow at least 1 CIQ per cpu rounding down
4301 * to the number of ports, with a minimum of 1 per port.
4302 * A 2 port card in a 6 cpu system: 6 CIQs, 3 / port.
4303 * A 4 port card in a 6 cpu system: 4 CIQs, 1 / port.
4304 * A 4 port card in a 2 cpu system: 4 CIQs, 1 / port.
4305 */
4306 s->rdmaciqs = min_t(int, MAX_RDMA_CIQS, num_online_cpus());
4307 s->rdmaciqs = (s->rdmaciqs / adap->params.nports) *
4308 adap->params.nports;
4309 s->rdmaciqs = max_t(int, s->rdmaciqs, adap->params.nports);
b8ff05a9
DM
4310 }
4311
4312 for (i = 0; i < ARRAY_SIZE(s->ethrxq); i++) {
4313 struct sge_eth_rxq *r = &s->ethrxq[i];
4314
c887ad0e 4315 init_rspq(adap, &r->rspq, 5, 10, 1024, 64);
b8ff05a9
DM
4316 r->fl.size = 72;
4317 }
4318
4319 for (i = 0; i < ARRAY_SIZE(s->ethtxq); i++)
4320 s->ethtxq[i].q.size = 1024;
4321
4322 for (i = 0; i < ARRAY_SIZE(s->ctrlq); i++)
4323 s->ctrlq[i].q.size = 512;
4324
4325 for (i = 0; i < ARRAY_SIZE(s->ofldtxq); i++)
4326 s->ofldtxq[i].q.size = 1024;
4327
4328 for (i = 0; i < ARRAY_SIZE(s->ofldrxq); i++) {
4329 struct sge_ofld_rxq *r = &s->ofldrxq[i];
4330
c887ad0e 4331 init_rspq(adap, &r->rspq, 5, 1, 1024, 64);
b8ff05a9
DM
4332 r->rspq.uld = CXGB4_ULD_ISCSI;
4333 r->fl.size = 72;
4334 }
4335
4336 for (i = 0; i < ARRAY_SIZE(s->rdmarxq); i++) {
4337 struct sge_ofld_rxq *r = &s->rdmarxq[i];
4338
c887ad0e 4339 init_rspq(adap, &r->rspq, 5, 1, 511, 64);
b8ff05a9
DM
4340 r->rspq.uld = CXGB4_ULD_RDMA;
4341 r->fl.size = 72;
4342 }
4343
cf38be6d
HS
4344 ciq_size = 64 + adap->vres.cq.size + adap->tids.nftids;
4345 if (ciq_size > SGE_MAX_IQ_SIZE) {
4346 CH_WARN(adap, "CIQ size too small for available IQs\n");
4347 ciq_size = SGE_MAX_IQ_SIZE;
4348 }
4349
4350 for (i = 0; i < ARRAY_SIZE(s->rdmaciq); i++) {
4351 struct sge_ofld_rxq *r = &s->rdmaciq[i];
4352
c887ad0e 4353 init_rspq(adap, &r->rspq, 5, 1, ciq_size, 64);
cf38be6d
HS
4354 r->rspq.uld = CXGB4_ULD_RDMA;
4355 }
4356
c887ad0e
HS
4357 init_rspq(adap, &s->fw_evtq, 0, 1, 1024, 64);
4358 init_rspq(adap, &s->intrq, 0, 1, 2 * MAX_INGQ, 64);
b8ff05a9
DM
4359}
4360
4361/*
4362 * Reduce the number of Ethernet queues across all ports to at most n.
4363 * n provides at least one queue per port.
4364 */
91744948 4365static void reduce_ethqs(struct adapter *adap, int n)
b8ff05a9
DM
4366{
4367 int i;
4368 struct port_info *pi;
4369
4370 while (n < adap->sge.ethqsets)
4371 for_each_port(adap, i) {
4372 pi = adap2pinfo(adap, i);
4373 if (pi->nqsets > 1) {
4374 pi->nqsets--;
4375 adap->sge.ethqsets--;
4376 if (adap->sge.ethqsets <= n)
4377 break;
4378 }
4379 }
4380
4381 n = 0;
4382 for_each_port(adap, i) {
4383 pi = adap2pinfo(adap, i);
4384 pi->first_qset = n;
4385 n += pi->nqsets;
4386 }
4387}
4388
4389/* 2 MSI-X vectors needed for the FW queue and non-data interrupts */
4390#define EXTRA_VECS 2
4391
91744948 4392static int enable_msix(struct adapter *adap)
b8ff05a9
DM
4393{
4394 int ofld_need = 0;
f36e58e5 4395 int i, want, need, allocated;
b8ff05a9
DM
4396 struct sge *s = &adap->sge;
4397 unsigned int nchan = adap->params.nports;
f36e58e5
HS
4398 struct msix_entry *entries;
4399
4400 entries = kmalloc(sizeof(*entries) * (MAX_INGQ + 1),
4401 GFP_KERNEL);
4402 if (!entries)
4403 return -ENOMEM;
b8ff05a9 4404
f36e58e5 4405 for (i = 0; i < MAX_INGQ + 1; ++i)
b8ff05a9
DM
4406 entries[i].entry = i;
4407
4408 want = s->max_ethqsets + EXTRA_VECS;
4409 if (is_offload(adap)) {
cf38be6d 4410 want += s->rdmaqs + s->rdmaciqs + s->ofldqsets;
b8ff05a9 4411 /* need nchan for each possible ULD */
cf38be6d 4412 ofld_need = 3 * nchan;
b8ff05a9 4413 }
688848b1
AB
4414#ifdef CONFIG_CHELSIO_T4_DCB
4415 /* For Data Center Bridging we need 8 Ethernet TX Priority Queues for
4416 * each port.
4417 */
4418 need = 8 * adap->params.nports + EXTRA_VECS + ofld_need;
4419#else
b8ff05a9 4420 need = adap->params.nports + EXTRA_VECS + ofld_need;
688848b1 4421#endif
f36e58e5
HS
4422 allocated = pci_enable_msix_range(adap->pdev, entries, need, want);
4423 if (allocated < 0) {
4424 dev_info(adap->pdev_dev, "not enough MSI-X vectors left,"
4425 " not using MSI-X\n");
4426 kfree(entries);
4427 return allocated;
4428 }
b8ff05a9 4429
f36e58e5 4430 /* Distribute available vectors to the various queue groups.
c32ad224
AG
4431 * Every group gets its minimum requirement and NIC gets top
4432 * priority for leftovers.
4433 */
f36e58e5 4434 i = allocated - EXTRA_VECS - ofld_need;
c32ad224
AG
4435 if (i < s->max_ethqsets) {
4436 s->max_ethqsets = i;
4437 if (i < s->ethqsets)
4438 reduce_ethqs(adap, i);
4439 }
4440 if (is_offload(adap)) {
f36e58e5
HS
4441 if (allocated < want) {
4442 s->rdmaqs = nchan;
4443 s->rdmaciqs = nchan;
4444 }
4445
4446 /* leftovers go to OFLD */
4447 i = allocated - EXTRA_VECS - s->max_ethqsets -
4448 s->rdmaqs - s->rdmaciqs;
c32ad224
AG
4449 s->ofldqsets = (i / nchan) * nchan; /* round down */
4450 }
f36e58e5 4451 for (i = 0; i < allocated; ++i)
c32ad224
AG
4452 adap->msix_info[i].vec = entries[i].vector;
4453
f36e58e5 4454 kfree(entries);
c32ad224 4455 return 0;
b8ff05a9
DM
4456}
4457
4458#undef EXTRA_VECS
4459
91744948 4460static int init_rss(struct adapter *adap)
671b0060 4461{
c035e183
HS
4462 unsigned int i;
4463 int err;
4464
4465 err = t4_init_rss_mode(adap, adap->mbox);
4466 if (err)
4467 return err;
671b0060
DM
4468
4469 for_each_port(adap, i) {
4470 struct port_info *pi = adap2pinfo(adap, i);
4471
4472 pi->rss = kcalloc(pi->rss_size, sizeof(u16), GFP_KERNEL);
4473 if (!pi->rss)
4474 return -ENOMEM;
671b0060
DM
4475 }
4476 return 0;
4477}
4478
91744948 4479static void print_port_info(const struct net_device *dev)
b8ff05a9 4480{
b8ff05a9 4481 char buf[80];
118969ed 4482 char *bufp = buf;
f1a051b9 4483 const char *spd = "";
118969ed
DM
4484 const struct port_info *pi = netdev_priv(dev);
4485 const struct adapter *adap = pi->adapter;
f1a051b9
DM
4486
4487 if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_2_5GB)
4488 spd = " 2.5 GT/s";
4489 else if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_5_0GB)
4490 spd = " 5 GT/s";
d2e752db
RD
4491 else if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_8_0GB)
4492 spd = " 8 GT/s";
b8ff05a9 4493
118969ed
DM
4494 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_100M)
4495 bufp += sprintf(bufp, "100/");
4496 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_1G)
4497 bufp += sprintf(bufp, "1000/");
4498 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_10G)
4499 bufp += sprintf(bufp, "10G/");
72aca4bf
KS
4500 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_40G)
4501 bufp += sprintf(bufp, "40G/");
118969ed
DM
4502 if (bufp != buf)
4503 --bufp;
72aca4bf 4504 sprintf(bufp, "BASE-%s", t4_get_port_type_description(pi->port_type));
118969ed
DM
4505
4506 netdev_info(dev, "Chelsio %s rev %d %s %sNIC PCIe x%d%s%s\n",
0a57a536 4507 adap->params.vpd.id,
d14807dd 4508 CHELSIO_CHIP_RELEASE(adap->params.chip), buf,
118969ed
DM
4509 is_offload(adap) ? "R" : "", adap->params.pci.width, spd,
4510 (adap->flags & USING_MSIX) ? " MSI-X" :
4511 (adap->flags & USING_MSI) ? " MSI" : "");
a94cd705
KS
4512 netdev_info(dev, "S/N: %s, P/N: %s\n",
4513 adap->params.vpd.sn, adap->params.vpd.pn);
b8ff05a9
DM
4514}
4515
91744948 4516static void enable_pcie_relaxed_ordering(struct pci_dev *dev)
ef306b50 4517{
e5c8ae5f 4518 pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_RELAX_EN);
ef306b50
DM
4519}
4520
06546391
DM
4521/*
4522 * Free the following resources:
4523 * - memory used for tables
4524 * - MSI/MSI-X
4525 * - net devices
4526 * - resources FW is holding for us
4527 */
4528static void free_some_resources(struct adapter *adapter)
4529{
4530 unsigned int i;
4531
4532 t4_free_mem(adapter->l2t);
4533 t4_free_mem(adapter->tids.tid_tab);
4b8e27a8
HS
4534 kfree(adapter->sge.egr_map);
4535 kfree(adapter->sge.ingr_map);
4536 kfree(adapter->sge.starving_fl);
4537 kfree(adapter->sge.txq_maperr);
5b377d11
HS
4538#ifdef CONFIG_DEBUG_FS
4539 kfree(adapter->sge.blocked_fl);
4540#endif
06546391
DM
4541 disable_msi(adapter);
4542
4543 for_each_port(adapter, i)
671b0060
DM
4544 if (adapter->port[i]) {
4545 kfree(adap2pinfo(adapter, i)->rss);
06546391 4546 free_netdev(adapter->port[i]);
671b0060 4547 }
06546391 4548 if (adapter->flags & FW_OK)
b2612722 4549 t4_fw_bye(adapter, adapter->pf);
06546391
DM
4550}
4551
2ed28baa 4552#define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
35d35682 4553#define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \
b8ff05a9 4554 NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
22adfe0a 4555#define SEGMENT_SIZE 128
b8ff05a9 4556
1dd06ae8 4557static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
b8ff05a9 4558{
22adfe0a 4559 int func, i, err, s_qpp, qpp, num_seg;
b8ff05a9 4560 struct port_info *pi;
c8f44aff 4561 bool highdma = false;
b8ff05a9 4562 struct adapter *adapter = NULL;
d6ce2628 4563 void __iomem *regs;
b8ff05a9
DM
4564
4565 printk_once(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
4566
4567 err = pci_request_regions(pdev, KBUILD_MODNAME);
4568 if (err) {
4569 /* Just info, some other driver may have claimed the device. */
4570 dev_info(&pdev->dev, "cannot obtain PCI resources\n");
4571 return err;
4572 }
4573
b8ff05a9
DM
4574 err = pci_enable_device(pdev);
4575 if (err) {
4576 dev_err(&pdev->dev, "cannot enable PCI device\n");
4577 goto out_release_regions;
4578 }
4579
d6ce2628
HS
4580 regs = pci_ioremap_bar(pdev, 0);
4581 if (!regs) {
4582 dev_err(&pdev->dev, "cannot map device registers\n");
4583 err = -ENOMEM;
4584 goto out_disable_device;
4585 }
4586
8203b509
HS
4587 err = t4_wait_dev_ready(regs);
4588 if (err < 0)
4589 goto out_unmap_bar0;
4590
d6ce2628 4591 /* We control everything through one PF */
0d804338 4592 func = SOURCEPF_G(readl(regs + PL_WHOAMI_A));
d6ce2628
HS
4593 if (func != ent->driver_data) {
4594 iounmap(regs);
4595 pci_disable_device(pdev);
4596 pci_save_state(pdev); /* to restore SR-IOV later */
4597 goto sriov;
4598 }
4599
b8ff05a9 4600 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
c8f44aff 4601 highdma = true;
b8ff05a9
DM
4602 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
4603 if (err) {
4604 dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
4605 "coherent allocations\n");
d6ce2628 4606 goto out_unmap_bar0;
b8ff05a9
DM
4607 }
4608 } else {
4609 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
4610 if (err) {
4611 dev_err(&pdev->dev, "no usable DMA configuration\n");
d6ce2628 4612 goto out_unmap_bar0;
b8ff05a9
DM
4613 }
4614 }
4615
4616 pci_enable_pcie_error_reporting(pdev);
ef306b50 4617 enable_pcie_relaxed_ordering(pdev);
b8ff05a9
DM
4618 pci_set_master(pdev);
4619 pci_save_state(pdev);
4620
4621 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
4622 if (!adapter) {
4623 err = -ENOMEM;
d6ce2628 4624 goto out_unmap_bar0;
b8ff05a9
DM
4625 }
4626
29aaee65
AB
4627 adapter->workq = create_singlethread_workqueue("cxgb4");
4628 if (!adapter->workq) {
4629 err = -ENOMEM;
4630 goto out_free_adapter;
4631 }
4632
144be3d9
GS
4633 /* PCI device has been enabled */
4634 adapter->flags |= DEV_ENABLED;
4635
d6ce2628 4636 adapter->regs = regs;
b8ff05a9
DM
4637 adapter->pdev = pdev;
4638 adapter->pdev_dev = &pdev->dev;
3069ee9b 4639 adapter->mbox = func;
b2612722 4640 adapter->pf = func;
b8ff05a9
DM
4641 adapter->msg_enable = dflt_msg_enable;
4642 memset(adapter->chan_map, 0xff, sizeof(adapter->chan_map));
4643
4644 spin_lock_init(&adapter->stats_lock);
4645 spin_lock_init(&adapter->tid_release_lock);
e327c225 4646 spin_lock_init(&adapter->win0_lock);
b8ff05a9
DM
4647
4648 INIT_WORK(&adapter->tid_release_task, process_tid_release_list);
881806bc
VP
4649 INIT_WORK(&adapter->db_full_task, process_db_full);
4650 INIT_WORK(&adapter->db_drop_task, process_db_drop);
b8ff05a9
DM
4651
4652 err = t4_prep_adapter(adapter);
4653 if (err)
d6ce2628
HS
4654 goto out_free_adapter;
4655
22adfe0a 4656
d14807dd 4657 if (!is_t4(adapter->params.chip)) {
f612b815
HS
4658 s_qpp = (QUEUESPERPAGEPF0_S +
4659 (QUEUESPERPAGEPF1_S - QUEUESPERPAGEPF0_S) *
b2612722 4660 adapter->pf);
f612b815
HS
4661 qpp = 1 << QUEUESPERPAGEPF0_G(t4_read_reg(adapter,
4662 SGE_EGRESS_QUEUES_PER_PAGE_PF_A) >> s_qpp);
22adfe0a
SR
4663 num_seg = PAGE_SIZE / SEGMENT_SIZE;
4664
4665 /* Each segment size is 128B. Write coalescing is enabled only
4666 * when SGE_EGRESS_QUEUES_PER_PAGE_PF reg value for the
4667 * queue is less no of segments that can be accommodated in
4668 * a page size.
4669 */
4670 if (qpp > num_seg) {
4671 dev_err(&pdev->dev,
4672 "Incorrect number of egress queues per page\n");
4673 err = -EINVAL;
d6ce2628 4674 goto out_free_adapter;
22adfe0a
SR
4675 }
4676 adapter->bar2 = ioremap_wc(pci_resource_start(pdev, 2),
4677 pci_resource_len(pdev, 2));
4678 if (!adapter->bar2) {
4679 dev_err(&pdev->dev, "cannot map device bar2 region\n");
4680 err = -ENOMEM;
d6ce2628 4681 goto out_free_adapter;
22adfe0a
SR
4682 }
4683 }
4684
636f9d37 4685 setup_memwin(adapter);
b8ff05a9 4686 err = adap_init0(adapter);
5b377d11
HS
4687#ifdef CONFIG_DEBUG_FS
4688 bitmap_zero(adapter->sge.blocked_fl, adapter->sge.egr_sz);
4689#endif
636f9d37 4690 setup_memwin_rdma(adapter);
b8ff05a9
DM
4691 if (err)
4692 goto out_unmap_bar;
4693
4694 for_each_port(adapter, i) {
4695 struct net_device *netdev;
4696
4697 netdev = alloc_etherdev_mq(sizeof(struct port_info),
4698 MAX_ETH_QSETS);
4699 if (!netdev) {
4700 err = -ENOMEM;
4701 goto out_free_dev;
4702 }
4703
4704 SET_NETDEV_DEV(netdev, &pdev->dev);
4705
4706 adapter->port[i] = netdev;
4707 pi = netdev_priv(netdev);
4708 pi->adapter = adapter;
4709 pi->xact_addr_filt = -1;
b8ff05a9 4710 pi->port_id = i;
b8ff05a9
DM
4711 netdev->irq = pdev->irq;
4712
2ed28baa
MM
4713 netdev->hw_features = NETIF_F_SG | TSO_FLAGS |
4714 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4715 NETIF_F_RXCSUM | NETIF_F_RXHASH |
f646968f 4716 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
c8f44aff
MM
4717 if (highdma)
4718 netdev->hw_features |= NETIF_F_HIGHDMA;
4719 netdev->features |= netdev->hw_features;
b8ff05a9
DM
4720 netdev->vlan_features = netdev->features & VLAN_FEAT;
4721
01789349
JP
4722 netdev->priv_flags |= IFF_UNICAST_FLT;
4723
b8ff05a9 4724 netdev->netdev_ops = &cxgb4_netdev_ops;
688848b1
AB
4725#ifdef CONFIG_CHELSIO_T4_DCB
4726 netdev->dcbnl_ops = &cxgb4_dcb_ops;
4727 cxgb4_dcb_state_init(netdev);
4728#endif
812034f1 4729 cxgb4_set_ethtool_ops(netdev);
b8ff05a9
DM
4730 }
4731
4732 pci_set_drvdata(pdev, adapter);
4733
4734 if (adapter->flags & FW_OK) {
060e0c75 4735 err = t4_port_init(adapter, func, func, 0);
b8ff05a9
DM
4736 if (err)
4737 goto out_free_dev;
4738 }
4739
4740 /*
4741 * Configure queues and allocate tables now, they can be needed as
4742 * soon as the first register_netdev completes.
4743 */
4744 cfg_queues(adapter);
4745
4746 adapter->l2t = t4_init_l2t();
4747 if (!adapter->l2t) {
4748 /* We tolerate a lack of L2T, giving up some functionality */
4749 dev_warn(&pdev->dev, "could not allocate L2T, continuing\n");
4750 adapter->params.offload = 0;
4751 }
4752
b5a02f50
AB
4753#if IS_ENABLED(CONFIG_IPV6)
4754 adapter->clipt = t4_init_clip_tbl(adapter->clipt_start,
4755 adapter->clipt_end);
4756 if (!adapter->clipt) {
4757 /* We tolerate a lack of clip_table, giving up
4758 * some functionality
4759 */
4760 dev_warn(&pdev->dev,
4761 "could not allocate Clip table, continuing\n");
4762 adapter->params.offload = 0;
4763 }
4764#endif
b8ff05a9
DM
4765 if (is_offload(adapter) && tid_init(&adapter->tids) < 0) {
4766 dev_warn(&pdev->dev, "could not allocate TID table, "
4767 "continuing\n");
4768 adapter->params.offload = 0;
4769 }
4770
f7cabcdd
DM
4771 /* See what interrupts we'll be using */
4772 if (msi > 1 && enable_msix(adapter) == 0)
4773 adapter->flags |= USING_MSIX;
4774 else if (msi > 0 && pci_enable_msi(pdev) == 0)
4775 adapter->flags |= USING_MSI;
4776
671b0060
DM
4777 err = init_rss(adapter);
4778 if (err)
4779 goto out_free_dev;
4780
b8ff05a9
DM
4781 /*
4782 * The card is now ready to go. If any errors occur during device
4783 * registration we do not fail the whole card but rather proceed only
4784 * with the ports we manage to register successfully. However we must
4785 * register at least one net device.
4786 */
4787 for_each_port(adapter, i) {
a57cabe0
DM
4788 pi = adap2pinfo(adapter, i);
4789 netif_set_real_num_tx_queues(adapter->port[i], pi->nqsets);
4790 netif_set_real_num_rx_queues(adapter->port[i], pi->nqsets);
4791
b8ff05a9
DM
4792 err = register_netdev(adapter->port[i]);
4793 if (err)
b1a3c2b6 4794 break;
b1a3c2b6
DM
4795 adapter->chan_map[pi->tx_chan] = i;
4796 print_port_info(adapter->port[i]);
b8ff05a9 4797 }
b1a3c2b6 4798 if (i == 0) {
b8ff05a9
DM
4799 dev_err(&pdev->dev, "could not register any net devices\n");
4800 goto out_free_dev;
4801 }
b1a3c2b6
DM
4802 if (err) {
4803 dev_warn(&pdev->dev, "only %d net devices registered\n", i);
4804 err = 0;
6403eab1 4805 }
b8ff05a9
DM
4806
4807 if (cxgb4_debugfs_root) {
4808 adapter->debugfs_root = debugfs_create_dir(pci_name(pdev),
4809 cxgb4_debugfs_root);
4810 setup_debugfs(adapter);
4811 }
4812
6482aa7c
DLR
4813 /* PCIe EEH recovery on powerpc platforms needs fundamental reset */
4814 pdev->needs_freset = 1;
4815
b8ff05a9
DM
4816 if (is_offload(adapter))
4817 attach_ulds(adapter);
4818
8e1e6059 4819sriov:
b8ff05a9 4820#ifdef CONFIG_PCI_IOV
7d6727cf 4821 if (func < ARRAY_SIZE(num_vf) && num_vf[func] > 0)
b8ff05a9
DM
4822 if (pci_enable_sriov(pdev, num_vf[func]) == 0)
4823 dev_info(&pdev->dev,
4824 "instantiated %u virtual functions\n",
4825 num_vf[func]);
4826#endif
4827 return 0;
4828
4829 out_free_dev:
06546391 4830 free_some_resources(adapter);
b8ff05a9 4831 out_unmap_bar:
d14807dd 4832 if (!is_t4(adapter->params.chip))
22adfe0a 4833 iounmap(adapter->bar2);
b8ff05a9 4834 out_free_adapter:
29aaee65
AB
4835 if (adapter->workq)
4836 destroy_workqueue(adapter->workq);
4837
b8ff05a9 4838 kfree(adapter);
d6ce2628
HS
4839 out_unmap_bar0:
4840 iounmap(regs);
b8ff05a9
DM
4841 out_disable_device:
4842 pci_disable_pcie_error_reporting(pdev);
4843 pci_disable_device(pdev);
4844 out_release_regions:
4845 pci_release_regions(pdev);
b8ff05a9
DM
4846 return err;
4847}
4848
91744948 4849static void remove_one(struct pci_dev *pdev)
b8ff05a9
DM
4850{
4851 struct adapter *adapter = pci_get_drvdata(pdev);
4852
636f9d37 4853#ifdef CONFIG_PCI_IOV
b8ff05a9
DM
4854 pci_disable_sriov(pdev);
4855
636f9d37
VP
4856#endif
4857
b8ff05a9
DM
4858 if (adapter) {
4859 int i;
4860
29aaee65
AB
4861 /* Tear down per-adapter Work Queue first since it can contain
4862 * references to our adapter data structure.
4863 */
4864 destroy_workqueue(adapter->workq);
4865
b8ff05a9
DM
4866 if (is_offload(adapter))
4867 detach_ulds(adapter);
4868
b37987e8
HS
4869 disable_interrupts(adapter);
4870
b8ff05a9 4871 for_each_port(adapter, i)
8f3a7676 4872 if (adapter->port[i]->reg_state == NETREG_REGISTERED)
b8ff05a9
DM
4873 unregister_netdev(adapter->port[i]);
4874
9f16dc2e 4875 debugfs_remove_recursive(adapter->debugfs_root);
b8ff05a9 4876
f2b7e78d
VP
4877 /* If we allocated filters, free up state associated with any
4878 * valid filters ...
4879 */
4880 if (adapter->tids.ftid_tab) {
4881 struct filter_entry *f = &adapter->tids.ftid_tab[0];
dca4faeb
VP
4882 for (i = 0; i < (adapter->tids.nftids +
4883 adapter->tids.nsftids); i++, f++)
f2b7e78d
VP
4884 if (f->valid)
4885 clear_filter(adapter, f);
4886 }
4887
aaefae9b
DM
4888 if (adapter->flags & FULL_INIT_DONE)
4889 cxgb_down(adapter);
b8ff05a9 4890
06546391 4891 free_some_resources(adapter);
b5a02f50
AB
4892#if IS_ENABLED(CONFIG_IPV6)
4893 t4_cleanup_clip_tbl(adapter);
4894#endif
b8ff05a9 4895 iounmap(adapter->regs);
d14807dd 4896 if (!is_t4(adapter->params.chip))
22adfe0a 4897 iounmap(adapter->bar2);
b8ff05a9 4898 pci_disable_pcie_error_reporting(pdev);
144be3d9
GS
4899 if ((adapter->flags & DEV_ENABLED)) {
4900 pci_disable_device(pdev);
4901 adapter->flags &= ~DEV_ENABLED;
4902 }
b8ff05a9 4903 pci_release_regions(pdev);
ee9a33b2 4904 synchronize_rcu();
8b662fe7 4905 kfree(adapter);
a069ec91 4906 } else
b8ff05a9
DM
4907 pci_release_regions(pdev);
4908}
4909
4910static struct pci_driver cxgb4_driver = {
4911 .name = KBUILD_MODNAME,
4912 .id_table = cxgb4_pci_tbl,
4913 .probe = init_one,
91744948 4914 .remove = remove_one,
687d705c 4915 .shutdown = remove_one,
204dc3c0 4916 .err_handler = &cxgb4_eeh,
b8ff05a9
DM
4917};
4918
4919static int __init cxgb4_init_module(void)
4920{
4921 int ret;
4922
4923 /* Debugfs support is optional, just warn if this fails */
4924 cxgb4_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
4925 if (!cxgb4_debugfs_root)
428ac43f 4926 pr_warn("could not create debugfs entry, continuing\n");
b8ff05a9
DM
4927
4928 ret = pci_register_driver(&cxgb4_driver);
29aaee65 4929 if (ret < 0)
b8ff05a9 4930 debugfs_remove(cxgb4_debugfs_root);
01bcca68 4931
1bb60376 4932#if IS_ENABLED(CONFIG_IPV6)
b5a02f50
AB
4933 if (!inet6addr_registered) {
4934 register_inet6addr_notifier(&cxgb4_inet6addr_notifier);
4935 inet6addr_registered = true;
4936 }
1bb60376 4937#endif
01bcca68 4938
b8ff05a9
DM
4939 return ret;
4940}
4941
4942static void __exit cxgb4_cleanup_module(void)
4943{
1bb60376 4944#if IS_ENABLED(CONFIG_IPV6)
1793c798 4945 if (inet6addr_registered) {
b5a02f50
AB
4946 unregister_inet6addr_notifier(&cxgb4_inet6addr_notifier);
4947 inet6addr_registered = false;
4948 }
1bb60376 4949#endif
b8ff05a9
DM
4950 pci_unregister_driver(&cxgb4_driver);
4951 debugfs_remove(cxgb4_debugfs_root); /* NULL ok */
4952}
4953
4954module_init(cxgb4_init_module);
4955module_exit(cxgb4_cleanup_module);