]> git.ipfire.org Git - thirdparty/linux.git/blame - drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
cxgb4: Assign filter server TIDs properly
[thirdparty/linux.git] / drivers / net / ethernet / chelsio / cxgb4 / cxgb4_main.c
CommitLineData
b8ff05a9
DM
1/*
2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
3 *
4 * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
36
37#include <linux/bitmap.h>
38#include <linux/crc32.h>
39#include <linux/ctype.h>
40#include <linux/debugfs.h>
41#include <linux/err.h>
42#include <linux/etherdevice.h>
43#include <linux/firmware.h>
01789349 44#include <linux/if.h>
b8ff05a9
DM
45#include <linux/if_vlan.h>
46#include <linux/init.h>
47#include <linux/log2.h>
48#include <linux/mdio.h>
49#include <linux/module.h>
50#include <linux/moduleparam.h>
51#include <linux/mutex.h>
52#include <linux/netdevice.h>
53#include <linux/pci.h>
54#include <linux/aer.h>
55#include <linux/rtnetlink.h>
56#include <linux/sched.h>
57#include <linux/seq_file.h>
58#include <linux/sockios.h>
59#include <linux/vmalloc.h>
60#include <linux/workqueue.h>
61#include <net/neighbour.h>
62#include <net/netevent.h>
01bcca68 63#include <net/addrconf.h>
b8ff05a9
DM
64#include <asm/uaccess.h>
65
66#include "cxgb4.h"
67#include "t4_regs.h"
68#include "t4_msg.h"
69#include "t4fw_api.h"
70#include "l2t.h"
71
01bcca68
VP
72#include <../drivers/net/bonding/bonding.h>
73
74#ifdef DRV_VERSION
75#undef DRV_VERSION
76#endif
3a7f8554
SR
77#define DRV_VERSION "2.0.0-ko"
78#define DRV_DESC "Chelsio T4/T5 Network Driver"
b8ff05a9
DM
79
80/*
81 * Max interrupt hold-off timer value in us. Queues fall back to this value
82 * under extreme memory pressure so it's largish to give the system time to
83 * recover.
84 */
85#define MAX_SGE_TIMERVAL 200U
86
7ee9ff94 87enum {
13ee15d3
VP
88 /*
89 * Physical Function provisioning constants.
90 */
91 PFRES_NVI = 4, /* # of Virtual Interfaces */
92 PFRES_NETHCTRL = 128, /* # of EQs used for ETH or CTRL Qs */
93 PFRES_NIQFLINT = 128, /* # of ingress Qs/w Free List(s)/intr
94 */
95 PFRES_NEQ = 256, /* # of egress queues */
96 PFRES_NIQ = 0, /* # of ingress queues */
97 PFRES_TC = 0, /* PCI-E traffic class */
98 PFRES_NEXACTF = 128, /* # of exact MPS filters */
99
100 PFRES_R_CAPS = FW_CMD_CAP_PF,
101 PFRES_WX_CAPS = FW_CMD_CAP_PF,
102
103#ifdef CONFIG_PCI_IOV
104 /*
105 * Virtual Function provisioning constants. We need two extra Ingress
106 * Queues with Interrupt capability to serve as the VF's Firmware
107 * Event Queue and Forwarded Interrupt Queue (when using MSI mode) --
108 * neither will have Free Lists associated with them). For each
109 * Ethernet/Control Egress Queue and for each Free List, we need an
110 * Egress Context.
111 */
7ee9ff94
CL
112 VFRES_NPORTS = 1, /* # of "ports" per VF */
113 VFRES_NQSETS = 2, /* # of "Queue Sets" per VF */
114
115 VFRES_NVI = VFRES_NPORTS, /* # of Virtual Interfaces */
116 VFRES_NETHCTRL = VFRES_NQSETS, /* # of EQs used for ETH or CTRL Qs */
117 VFRES_NIQFLINT = VFRES_NQSETS+2,/* # of ingress Qs/w Free List(s)/intr */
7ee9ff94 118 VFRES_NEQ = VFRES_NQSETS*2, /* # of egress queues */
13ee15d3 119 VFRES_NIQ = 0, /* # of non-fl/int ingress queues */
7ee9ff94
CL
120 VFRES_TC = 0, /* PCI-E traffic class */
121 VFRES_NEXACTF = 16, /* # of exact MPS filters */
122
123 VFRES_R_CAPS = FW_CMD_CAP_DMAQ|FW_CMD_CAP_VF|FW_CMD_CAP_PORT,
124 VFRES_WX_CAPS = FW_CMD_CAP_DMAQ|FW_CMD_CAP_VF,
13ee15d3 125#endif
7ee9ff94
CL
126};
127
128/*
129 * Provide a Port Access Rights Mask for the specified PF/VF. This is very
130 * static and likely not to be useful in the long run. We really need to
131 * implement some form of persistent configuration which the firmware
132 * controls.
133 */
134static unsigned int pfvfres_pmask(struct adapter *adapter,
135 unsigned int pf, unsigned int vf)
136{
137 unsigned int portn, portvec;
138
139 /*
140 * Give PF's access to all of the ports.
141 */
142 if (vf == 0)
143 return FW_PFVF_CMD_PMASK_MASK;
144
145 /*
146 * For VFs, we'll assign them access to the ports based purely on the
147 * PF. We assign active ports in order, wrapping around if there are
148 * fewer active ports than PFs: e.g. active port[pf % nports].
149 * Unfortunately the adapter's port_info structs haven't been
150 * initialized yet so we have to compute this.
151 */
152 if (adapter->params.nports == 0)
153 return 0;
154
155 portn = pf % adapter->params.nports;
156 portvec = adapter->params.portvec;
157 for (;;) {
158 /*
159 * Isolate the lowest set bit in the port vector. If we're at
160 * the port number that we want, return that as the pmask.
161 * otherwise mask that bit out of the port vector and
162 * decrement our port number ...
163 */
164 unsigned int pmask = portvec ^ (portvec & (portvec-1));
165 if (portn == 0)
166 return pmask;
167 portn--;
168 portvec &= ~pmask;
169 }
170 /*NOTREACHED*/
171}
7ee9ff94 172
b8ff05a9
DM
173enum {
174 MAX_TXQ_ENTRIES = 16384,
175 MAX_CTRL_TXQ_ENTRIES = 1024,
176 MAX_RSPQ_ENTRIES = 16384,
177 MAX_RX_BUFFERS = 16384,
178 MIN_TXQ_ENTRIES = 32,
179 MIN_CTRL_TXQ_ENTRIES = 32,
180 MIN_RSPQ_ENTRIES = 128,
181 MIN_FL_ENTRIES = 16
182};
183
f2b7e78d
VP
184/* Host shadow copy of ingress filter entry. This is in host native format
185 * and doesn't match the ordering or bit order, etc. of the hardware of the
186 * firmware command. The use of bit-field structure elements is purely to
187 * remind ourselves of the field size limitations and save memory in the case
188 * where the filter table is large.
189 */
190struct filter_entry {
191 /* Administrative fields for filter.
192 */
193 u32 valid:1; /* filter allocated and valid */
194 u32 locked:1; /* filter is administratively locked */
195
196 u32 pending:1; /* filter action is pending firmware reply */
197 u32 smtidx:8; /* Source MAC Table index for smac */
198 struct l2t_entry *l2t; /* Layer Two Table entry for dmac */
199
200 /* The filter itself. Most of this is a straight copy of information
201 * provided by the extended ioctl(). Some fields are translated to
202 * internal forms -- for instance the Ingress Queue ID passed in from
203 * the ioctl() is translated into the Absolute Ingress Queue ID.
204 */
205 struct ch_filter_specification fs;
206};
207
b8ff05a9
DM
208#define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
209 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
210 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
211
060e0c75 212#define CH_DEVICE(devid, data) { PCI_VDEVICE(CHELSIO, devid), (data) }
b8ff05a9
DM
213
214static DEFINE_PCI_DEVICE_TABLE(cxgb4_pci_tbl) = {
060e0c75 215 CH_DEVICE(0xa000, 0), /* PE10K */
ccea790e
DM
216 CH_DEVICE(0x4001, -1),
217 CH_DEVICE(0x4002, -1),
218 CH_DEVICE(0x4003, -1),
219 CH_DEVICE(0x4004, -1),
220 CH_DEVICE(0x4005, -1),
221 CH_DEVICE(0x4006, -1),
222 CH_DEVICE(0x4007, -1),
223 CH_DEVICE(0x4008, -1),
224 CH_DEVICE(0x4009, -1),
225 CH_DEVICE(0x400a, -1),
226 CH_DEVICE(0x4401, 4),
227 CH_DEVICE(0x4402, 4),
228 CH_DEVICE(0x4403, 4),
229 CH_DEVICE(0x4404, 4),
230 CH_DEVICE(0x4405, 4),
231 CH_DEVICE(0x4406, 4),
232 CH_DEVICE(0x4407, 4),
233 CH_DEVICE(0x4408, 4),
234 CH_DEVICE(0x4409, 4),
235 CH_DEVICE(0x440a, 4),
f637d577
VP
236 CH_DEVICE(0x440d, 4),
237 CH_DEVICE(0x440e, 4),
9ef603a0
VP
238 CH_DEVICE(0x5001, 4),
239 CH_DEVICE(0x5002, 4),
240 CH_DEVICE(0x5003, 4),
241 CH_DEVICE(0x5004, 4),
242 CH_DEVICE(0x5005, 4),
243 CH_DEVICE(0x5006, 4),
244 CH_DEVICE(0x5007, 4),
245 CH_DEVICE(0x5008, 4),
246 CH_DEVICE(0x5009, 4),
247 CH_DEVICE(0x500A, 4),
248 CH_DEVICE(0x500B, 4),
249 CH_DEVICE(0x500C, 4),
250 CH_DEVICE(0x500D, 4),
251 CH_DEVICE(0x500E, 4),
252 CH_DEVICE(0x500F, 4),
253 CH_DEVICE(0x5010, 4),
254 CH_DEVICE(0x5011, 4),
255 CH_DEVICE(0x5012, 4),
256 CH_DEVICE(0x5013, 4),
257 CH_DEVICE(0x5401, 4),
258 CH_DEVICE(0x5402, 4),
259 CH_DEVICE(0x5403, 4),
260 CH_DEVICE(0x5404, 4),
261 CH_DEVICE(0x5405, 4),
262 CH_DEVICE(0x5406, 4),
263 CH_DEVICE(0x5407, 4),
264 CH_DEVICE(0x5408, 4),
265 CH_DEVICE(0x5409, 4),
266 CH_DEVICE(0x540A, 4),
267 CH_DEVICE(0x540B, 4),
268 CH_DEVICE(0x540C, 4),
269 CH_DEVICE(0x540D, 4),
270 CH_DEVICE(0x540E, 4),
271 CH_DEVICE(0x540F, 4),
272 CH_DEVICE(0x5410, 4),
273 CH_DEVICE(0x5411, 4),
274 CH_DEVICE(0x5412, 4),
275 CH_DEVICE(0x5413, 4),
b8ff05a9
DM
276 { 0, }
277};
278
16e47624 279#define FW4_FNAME "cxgb4/t4fw.bin"
0a57a536 280#define FW5_FNAME "cxgb4/t5fw.bin"
16e47624 281#define FW4_CFNAME "cxgb4/t4-config.txt"
0a57a536 282#define FW5_CFNAME "cxgb4/t5-config.txt"
b8ff05a9
DM
283
284MODULE_DESCRIPTION(DRV_DESC);
285MODULE_AUTHOR("Chelsio Communications");
286MODULE_LICENSE("Dual BSD/GPL");
287MODULE_VERSION(DRV_VERSION);
288MODULE_DEVICE_TABLE(pci, cxgb4_pci_tbl);
16e47624 289MODULE_FIRMWARE(FW4_FNAME);
0a57a536 290MODULE_FIRMWARE(FW5_FNAME);
b8ff05a9 291
636f9d37
VP
292/*
293 * Normally we're willing to become the firmware's Master PF but will be happy
294 * if another PF has already become the Master and initialized the adapter.
295 * Setting "force_init" will cause this driver to forcibly establish itself as
296 * the Master PF and initialize the adapter.
297 */
298static uint force_init;
299
300module_param(force_init, uint, 0644);
301MODULE_PARM_DESC(force_init, "Forcibly become Master PF and initialize adapter");
302
13ee15d3
VP
303/*
304 * Normally if the firmware we connect to has Configuration File support, we
305 * use that and only fall back to the old Driver-based initialization if the
306 * Configuration File fails for some reason. If force_old_init is set, then
307 * we'll always use the old Driver-based initialization sequence.
308 */
309static uint force_old_init;
310
311module_param(force_old_init, uint, 0644);
312MODULE_PARM_DESC(force_old_init, "Force old initialization sequence");
313
b8ff05a9
DM
314static int dflt_msg_enable = DFLT_MSG_ENABLE;
315
316module_param(dflt_msg_enable, int, 0644);
317MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T4 default message enable bitmap");
318
319/*
320 * The driver uses the best interrupt scheme available on a platform in the
321 * order MSI-X, MSI, legacy INTx interrupts. This parameter determines which
322 * of these schemes the driver may consider as follows:
323 *
324 * msi = 2: choose from among all three options
325 * msi = 1: only consider MSI and INTx interrupts
326 * msi = 0: force INTx interrupts
327 */
328static int msi = 2;
329
330module_param(msi, int, 0644);
331MODULE_PARM_DESC(msi, "whether to use INTx (0), MSI (1) or MSI-X (2)");
332
333/*
334 * Queue interrupt hold-off timer values. Queues default to the first of these
335 * upon creation.
336 */
337static unsigned int intr_holdoff[SGE_NTIMERS - 1] = { 5, 10, 20, 50, 100 };
338
339module_param_array(intr_holdoff, uint, NULL, 0644);
340MODULE_PARM_DESC(intr_holdoff, "values for queue interrupt hold-off timers "
341 "0..4 in microseconds");
342
343static unsigned int intr_cnt[SGE_NCOUNTERS - 1] = { 4, 8, 16 };
344
345module_param_array(intr_cnt, uint, NULL, 0644);
346MODULE_PARM_DESC(intr_cnt,
347 "thresholds 1..3 for queue interrupt packet counters");
348
636f9d37
VP
349/*
350 * Normally we tell the chip to deliver Ingress Packets into our DMA buffers
351 * offset by 2 bytes in order to have the IP headers line up on 4-byte
352 * boundaries. This is a requirement for many architectures which will throw
353 * a machine check fault if an attempt is made to access one of the 4-byte IP
354 * header fields on a non-4-byte boundary. And it's a major performance issue
355 * even on some architectures which allow it like some implementations of the
356 * x86 ISA. However, some architectures don't mind this and for some very
357 * edge-case performance sensitive applications (like forwarding large volumes
358 * of small packets), setting this DMA offset to 0 will decrease the number of
359 * PCI-E Bus transfers enough to measurably affect performance.
360 */
361static int rx_dma_offset = 2;
362
eb939922 363static bool vf_acls;
b8ff05a9
DM
364
365#ifdef CONFIG_PCI_IOV
366module_param(vf_acls, bool, 0644);
367MODULE_PARM_DESC(vf_acls, "if set enable virtualization L2 ACL enforcement");
368
7d6727cf
SR
369/* Configure the number of PCI-E Virtual Function which are to be instantiated
370 * on SR-IOV Capable Physical Functions.
0a57a536 371 */
7d6727cf 372static unsigned int num_vf[NUM_OF_PF_WITH_SRIOV];
b8ff05a9
DM
373
374module_param_array(num_vf, uint, NULL, 0644);
7d6727cf 375MODULE_PARM_DESC(num_vf, "number of VFs for each of PFs 0-3");
b8ff05a9
DM
376#endif
377
13ee15d3
VP
378/*
379 * The filter TCAM has a fixed portion and a variable portion. The fixed
380 * portion can match on source/destination IP IPv4/IPv6 addresses and TCP/UDP
381 * ports. The variable portion is 36 bits which can include things like Exact
382 * Match MAC Index (9 bits), Ether Type (16 bits), IP Protocol (8 bits),
383 * [Inner] VLAN Tag (17 bits), etc. which, if all were somehow selected, would
384 * far exceed the 36-bit budget for this "compressed" header portion of the
385 * filter. Thus, we have a scarce resource which must be carefully managed.
386 *
387 * By default we set this up to mostly match the set of filter matching
388 * capabilities of T3 but with accommodations for some of T4's more
389 * interesting features:
390 *
391 * { IP Fragment (1), MPS Match Type (3), IP Protocol (8),
392 * [Inner] VLAN (17), Port (3), FCoE (1) }
393 */
394enum {
395 TP_VLAN_PRI_MAP_DEFAULT = HW_TPL_FR_MT_PR_IV_P_FC,
396 TP_VLAN_PRI_MAP_FIRST = FCOE_SHIFT,
397 TP_VLAN_PRI_MAP_LAST = FRAGMENTATION_SHIFT,
398};
399
400static unsigned int tp_vlan_pri_map = TP_VLAN_PRI_MAP_DEFAULT;
401
f2b7e78d
VP
402module_param(tp_vlan_pri_map, uint, 0644);
403MODULE_PARM_DESC(tp_vlan_pri_map, "global compressed filter configuration");
404
b8ff05a9
DM
405static struct dentry *cxgb4_debugfs_root;
406
407static LIST_HEAD(adapter_list);
408static DEFINE_MUTEX(uld_mutex);
01bcca68
VP
409/* Adapter list to be accessed from atomic context */
410static LIST_HEAD(adap_rcu_list);
411static DEFINE_SPINLOCK(adap_rcu_lock);
b8ff05a9
DM
412static struct cxgb4_uld_info ulds[CXGB4_ULD_MAX];
413static const char *uld_str[] = { "RDMA", "iSCSI" };
414
415static void link_report(struct net_device *dev)
416{
417 if (!netif_carrier_ok(dev))
418 netdev_info(dev, "link down\n");
419 else {
420 static const char *fc[] = { "no", "Rx", "Tx", "Tx/Rx" };
421
422 const char *s = "10Mbps";
423 const struct port_info *p = netdev_priv(dev);
424
425 switch (p->link_cfg.speed) {
426 case SPEED_10000:
427 s = "10Gbps";
428 break;
429 case SPEED_1000:
430 s = "1000Mbps";
431 break;
432 case SPEED_100:
433 s = "100Mbps";
434 break;
435 }
436
437 netdev_info(dev, "link up, %s, full-duplex, %s PAUSE\n", s,
438 fc[p->link_cfg.fc]);
439 }
440}
441
442void t4_os_link_changed(struct adapter *adapter, int port_id, int link_stat)
443{
444 struct net_device *dev = adapter->port[port_id];
445
446 /* Skip changes from disabled ports. */
447 if (netif_running(dev) && link_stat != netif_carrier_ok(dev)) {
448 if (link_stat)
449 netif_carrier_on(dev);
450 else
451 netif_carrier_off(dev);
452
453 link_report(dev);
454 }
455}
456
457void t4_os_portmod_changed(const struct adapter *adap, int port_id)
458{
459 static const char *mod_str[] = {
a0881cab 460 NULL, "LR", "SR", "ER", "passive DA", "active DA", "LRM"
b8ff05a9
DM
461 };
462
463 const struct net_device *dev = adap->port[port_id];
464 const struct port_info *pi = netdev_priv(dev);
465
466 if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
467 netdev_info(dev, "port module unplugged\n");
a0881cab 468 else if (pi->mod_type < ARRAY_SIZE(mod_str))
b8ff05a9
DM
469 netdev_info(dev, "%s module inserted\n", mod_str[pi->mod_type]);
470}
471
472/*
473 * Configure the exact and hash address filters to handle a port's multicast
474 * and secondary unicast MAC addresses.
475 */
476static int set_addr_filters(const struct net_device *dev, bool sleep)
477{
478 u64 mhash = 0;
479 u64 uhash = 0;
480 bool free = true;
481 u16 filt_idx[7];
482 const u8 *addr[7];
483 int ret, naddr = 0;
b8ff05a9
DM
484 const struct netdev_hw_addr *ha;
485 int uc_cnt = netdev_uc_count(dev);
4a35ecf8 486 int mc_cnt = netdev_mc_count(dev);
b8ff05a9 487 const struct port_info *pi = netdev_priv(dev);
060e0c75 488 unsigned int mb = pi->adapter->fn;
b8ff05a9
DM
489
490 /* first do the secondary unicast addresses */
491 netdev_for_each_uc_addr(ha, dev) {
492 addr[naddr++] = ha->addr;
493 if (--uc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) {
060e0c75 494 ret = t4_alloc_mac_filt(pi->adapter, mb, pi->viid, free,
b8ff05a9
DM
495 naddr, addr, filt_idx, &uhash, sleep);
496 if (ret < 0)
497 return ret;
498
499 free = false;
500 naddr = 0;
501 }
502 }
503
504 /* next set up the multicast addresses */
4a35ecf8
DM
505 netdev_for_each_mc_addr(ha, dev) {
506 addr[naddr++] = ha->addr;
507 if (--mc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) {
060e0c75 508 ret = t4_alloc_mac_filt(pi->adapter, mb, pi->viid, free,
b8ff05a9
DM
509 naddr, addr, filt_idx, &mhash, sleep);
510 if (ret < 0)
511 return ret;
512
513 free = false;
514 naddr = 0;
515 }
516 }
517
060e0c75 518 return t4_set_addr_hash(pi->adapter, mb, pi->viid, uhash != 0,
b8ff05a9
DM
519 uhash | mhash, sleep);
520}
521
3069ee9b
VP
522int dbfifo_int_thresh = 10; /* 10 == 640 entry threshold */
523module_param(dbfifo_int_thresh, int, 0644);
524MODULE_PARM_DESC(dbfifo_int_thresh, "doorbell fifo interrupt threshold");
525
404d9e3f
VP
526/*
527 * usecs to sleep while draining the dbfifo
528 */
529static int dbfifo_drain_delay = 1000;
3069ee9b
VP
530module_param(dbfifo_drain_delay, int, 0644);
531MODULE_PARM_DESC(dbfifo_drain_delay,
532 "usecs to sleep while draining the dbfifo");
533
b8ff05a9
DM
534/*
535 * Set Rx properties of a port, such as promiscruity, address filters, and MTU.
536 * If @mtu is -1 it is left unchanged.
537 */
538static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok)
539{
540 int ret;
541 struct port_info *pi = netdev_priv(dev);
542
543 ret = set_addr_filters(dev, sleep_ok);
544 if (ret == 0)
060e0c75 545 ret = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, mtu,
b8ff05a9 546 (dev->flags & IFF_PROMISC) ? 1 : 0,
f8f5aafa 547 (dev->flags & IFF_ALLMULTI) ? 1 : 0, 1, -1,
b8ff05a9
DM
548 sleep_ok);
549 return ret;
550}
551
3069ee9b
VP
552static struct workqueue_struct *workq;
553
b8ff05a9
DM
554/**
555 * link_start - enable a port
556 * @dev: the port to enable
557 *
558 * Performs the MAC and PHY actions needed to enable a port.
559 */
560static int link_start(struct net_device *dev)
561{
562 int ret;
563 struct port_info *pi = netdev_priv(dev);
060e0c75 564 unsigned int mb = pi->adapter->fn;
b8ff05a9
DM
565
566 /*
567 * We do not set address filters and promiscuity here, the stack does
568 * that step explicitly.
569 */
060e0c75 570 ret = t4_set_rxmode(pi->adapter, mb, pi->viid, dev->mtu, -1, -1, -1,
f646968f 571 !!(dev->features & NETIF_F_HW_VLAN_CTAG_RX), true);
b8ff05a9 572 if (ret == 0) {
060e0c75 573 ret = t4_change_mac(pi->adapter, mb, pi->viid,
b8ff05a9 574 pi->xact_addr_filt, dev->dev_addr, true,
b6bd29e7 575 true);
b8ff05a9
DM
576 if (ret >= 0) {
577 pi->xact_addr_filt = ret;
578 ret = 0;
579 }
580 }
581 if (ret == 0)
060e0c75
DM
582 ret = t4_link_start(pi->adapter, mb, pi->tx_chan,
583 &pi->link_cfg);
b8ff05a9 584 if (ret == 0)
060e0c75 585 ret = t4_enable_vi(pi->adapter, mb, pi->viid, true, true);
b8ff05a9
DM
586 return ret;
587}
588
f2b7e78d
VP
589/* Clear a filter and release any of its resources that we own. This also
590 * clears the filter's "pending" status.
591 */
592static void clear_filter(struct adapter *adap, struct filter_entry *f)
593{
594 /* If the new or old filter have loopback rewriteing rules then we'll
595 * need to free any existing Layer Two Table (L2T) entries of the old
596 * filter rule. The firmware will handle freeing up any Source MAC
597 * Table (SMT) entries used for rewriting Source MAC Addresses in
598 * loopback rules.
599 */
600 if (f->l2t)
601 cxgb4_l2t_release(f->l2t);
602
603 /* The zeroing of the filter rule below clears the filter valid,
604 * pending, locked flags, l2t pointer, etc. so it's all we need for
605 * this operation.
606 */
607 memset(f, 0, sizeof(*f));
608}
609
610/* Handle a filter write/deletion reply.
611 */
612static void filter_rpl(struct adapter *adap, const struct cpl_set_tcb_rpl *rpl)
613{
614 unsigned int idx = GET_TID(rpl);
615 unsigned int nidx = idx - adap->tids.ftid_base;
616 unsigned int ret;
617 struct filter_entry *f;
618
619 if (idx >= adap->tids.ftid_base && nidx <
620 (adap->tids.nftids + adap->tids.nsftids)) {
621 idx = nidx;
622 ret = GET_TCB_COOKIE(rpl->cookie);
623 f = &adap->tids.ftid_tab[idx];
624
625 if (ret == FW_FILTER_WR_FLT_DELETED) {
626 /* Clear the filter when we get confirmation from the
627 * hardware that the filter has been deleted.
628 */
629 clear_filter(adap, f);
630 } else if (ret == FW_FILTER_WR_SMT_TBL_FULL) {
631 dev_err(adap->pdev_dev, "filter %u setup failed due to full SMT\n",
632 idx);
633 clear_filter(adap, f);
634 } else if (ret == FW_FILTER_WR_FLT_ADDED) {
635 f->smtidx = (be64_to_cpu(rpl->oldval) >> 24) & 0xff;
636 f->pending = 0; /* asynchronous setup completed */
637 f->valid = 1;
638 } else {
639 /* Something went wrong. Issue a warning about the
640 * problem and clear everything out.
641 */
642 dev_err(adap->pdev_dev, "filter %u setup failed with error %u\n",
643 idx, ret);
644 clear_filter(adap, f);
645 }
646 }
647}
648
649/* Response queue handler for the FW event queue.
b8ff05a9
DM
650 */
651static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,
652 const struct pkt_gl *gl)
653{
654 u8 opcode = ((const struct rss_header *)rsp)->opcode;
655
656 rsp++; /* skip RSS header */
b407a4a9
VP
657
658 /* FW can send EGR_UPDATEs encapsulated in a CPL_FW4_MSG.
659 */
660 if (unlikely(opcode == CPL_FW4_MSG &&
661 ((const struct cpl_fw4_msg *)rsp)->type == FW_TYPE_RSSCPL)) {
662 rsp++;
663 opcode = ((const struct rss_header *)rsp)->opcode;
664 rsp++;
665 if (opcode != CPL_SGE_EGR_UPDATE) {
666 dev_err(q->adap->pdev_dev, "unexpected FW4/CPL %#x on FW event queue\n"
667 , opcode);
668 goto out;
669 }
670 }
671
b8ff05a9
DM
672 if (likely(opcode == CPL_SGE_EGR_UPDATE)) {
673 const struct cpl_sge_egr_update *p = (void *)rsp;
674 unsigned int qid = EGR_QID(ntohl(p->opcode_qid));
e46dab4d 675 struct sge_txq *txq;
b8ff05a9 676
e46dab4d 677 txq = q->adap->sge.egr_map[qid - q->adap->sge.egr_start];
b8ff05a9 678 txq->restarts++;
e46dab4d 679 if ((u8 *)txq < (u8 *)q->adap->sge.ofldtxq) {
b8ff05a9
DM
680 struct sge_eth_txq *eq;
681
682 eq = container_of(txq, struct sge_eth_txq, q);
683 netif_tx_wake_queue(eq->txq);
684 } else {
685 struct sge_ofld_txq *oq;
686
687 oq = container_of(txq, struct sge_ofld_txq, q);
688 tasklet_schedule(&oq->qresume_tsk);
689 }
690 } else if (opcode == CPL_FW6_MSG || opcode == CPL_FW4_MSG) {
691 const struct cpl_fw6_msg *p = (void *)rsp;
692
693 if (p->type == 0)
694 t4_handle_fw_rpl(q->adap, p->data);
695 } else if (opcode == CPL_L2T_WRITE_RPL) {
696 const struct cpl_l2t_write_rpl *p = (void *)rsp;
697
698 do_l2t_write_rpl(q->adap, p);
f2b7e78d
VP
699 } else if (opcode == CPL_SET_TCB_RPL) {
700 const struct cpl_set_tcb_rpl *p = (void *)rsp;
701
702 filter_rpl(q->adap, p);
b8ff05a9
DM
703 } else
704 dev_err(q->adap->pdev_dev,
705 "unexpected CPL %#x on FW event queue\n", opcode);
b407a4a9 706out:
b8ff05a9
DM
707 return 0;
708}
709
710/**
711 * uldrx_handler - response queue handler for ULD queues
712 * @q: the response queue that received the packet
713 * @rsp: the response queue descriptor holding the offload message
714 * @gl: the gather list of packet fragments
715 *
716 * Deliver an ingress offload packet to a ULD. All processing is done by
717 * the ULD, we just maintain statistics.
718 */
719static int uldrx_handler(struct sge_rspq *q, const __be64 *rsp,
720 const struct pkt_gl *gl)
721{
722 struct sge_ofld_rxq *rxq = container_of(q, struct sge_ofld_rxq, rspq);
723
b407a4a9
VP
724 /* FW can send CPLs encapsulated in a CPL_FW4_MSG.
725 */
726 if (((const struct rss_header *)rsp)->opcode == CPL_FW4_MSG &&
727 ((const struct cpl_fw4_msg *)(rsp + 1))->type == FW_TYPE_RSSCPL)
728 rsp += 2;
729
b8ff05a9
DM
730 if (ulds[q->uld].rx_handler(q->adap->uld_handle[q->uld], rsp, gl)) {
731 rxq->stats.nomem++;
732 return -1;
733 }
734 if (gl == NULL)
735 rxq->stats.imm++;
736 else if (gl == CXGB4_MSG_AN)
737 rxq->stats.an++;
738 else
739 rxq->stats.pkts++;
740 return 0;
741}
742
743static void disable_msi(struct adapter *adapter)
744{
745 if (adapter->flags & USING_MSIX) {
746 pci_disable_msix(adapter->pdev);
747 adapter->flags &= ~USING_MSIX;
748 } else if (adapter->flags & USING_MSI) {
749 pci_disable_msi(adapter->pdev);
750 adapter->flags &= ~USING_MSI;
751 }
752}
753
754/*
755 * Interrupt handler for non-data events used with MSI-X.
756 */
757static irqreturn_t t4_nondata_intr(int irq, void *cookie)
758{
759 struct adapter *adap = cookie;
760
761 u32 v = t4_read_reg(adap, MYPF_REG(PL_PF_INT_CAUSE));
762 if (v & PFSW) {
763 adap->swintr = 1;
764 t4_write_reg(adap, MYPF_REG(PL_PF_INT_CAUSE), v);
765 }
766 t4_slow_intr_handler(adap);
767 return IRQ_HANDLED;
768}
769
770/*
771 * Name the MSI-X interrupts.
772 */
773static void name_msix_vecs(struct adapter *adap)
774{
ba27816c 775 int i, j, msi_idx = 2, n = sizeof(adap->msix_info[0].desc);
b8ff05a9
DM
776
777 /* non-data interrupts */
b1a3c2b6 778 snprintf(adap->msix_info[0].desc, n, "%s", adap->port[0]->name);
b8ff05a9
DM
779
780 /* FW events */
b1a3c2b6
DM
781 snprintf(adap->msix_info[1].desc, n, "%s-FWeventq",
782 adap->port[0]->name);
b8ff05a9
DM
783
784 /* Ethernet queues */
785 for_each_port(adap, j) {
786 struct net_device *d = adap->port[j];
787 const struct port_info *pi = netdev_priv(d);
788
ba27816c 789 for (i = 0; i < pi->nqsets; i++, msi_idx++)
b8ff05a9
DM
790 snprintf(adap->msix_info[msi_idx].desc, n, "%s-Rx%d",
791 d->name, i);
b8ff05a9
DM
792 }
793
794 /* offload queues */
ba27816c
DM
795 for_each_ofldrxq(&adap->sge, i)
796 snprintf(adap->msix_info[msi_idx++].desc, n, "%s-ofld%d",
b1a3c2b6 797 adap->port[0]->name, i);
ba27816c
DM
798
799 for_each_rdmarxq(&adap->sge, i)
800 snprintf(adap->msix_info[msi_idx++].desc, n, "%s-rdma%d",
b1a3c2b6 801 adap->port[0]->name, i);
b8ff05a9
DM
802}
803
804static int request_msix_queue_irqs(struct adapter *adap)
805{
806 struct sge *s = &adap->sge;
404d9e3f 807 int err, ethqidx, ofldqidx = 0, rdmaqidx = 0, msi_index = 2;
b8ff05a9
DM
808
809 err = request_irq(adap->msix_info[1].vec, t4_sge_intr_msix, 0,
810 adap->msix_info[1].desc, &s->fw_evtq);
811 if (err)
812 return err;
813
814 for_each_ethrxq(s, ethqidx) {
404d9e3f
VP
815 err = request_irq(adap->msix_info[msi_index].vec,
816 t4_sge_intr_msix, 0,
817 adap->msix_info[msi_index].desc,
b8ff05a9
DM
818 &s->ethrxq[ethqidx].rspq);
819 if (err)
820 goto unwind;
404d9e3f 821 msi_index++;
b8ff05a9
DM
822 }
823 for_each_ofldrxq(s, ofldqidx) {
404d9e3f
VP
824 err = request_irq(adap->msix_info[msi_index].vec,
825 t4_sge_intr_msix, 0,
826 adap->msix_info[msi_index].desc,
b8ff05a9
DM
827 &s->ofldrxq[ofldqidx].rspq);
828 if (err)
829 goto unwind;
404d9e3f 830 msi_index++;
b8ff05a9
DM
831 }
832 for_each_rdmarxq(s, rdmaqidx) {
404d9e3f
VP
833 err = request_irq(adap->msix_info[msi_index].vec,
834 t4_sge_intr_msix, 0,
835 adap->msix_info[msi_index].desc,
b8ff05a9
DM
836 &s->rdmarxq[rdmaqidx].rspq);
837 if (err)
838 goto unwind;
404d9e3f 839 msi_index++;
b8ff05a9
DM
840 }
841 return 0;
842
843unwind:
844 while (--rdmaqidx >= 0)
404d9e3f 845 free_irq(adap->msix_info[--msi_index].vec,
b8ff05a9
DM
846 &s->rdmarxq[rdmaqidx].rspq);
847 while (--ofldqidx >= 0)
404d9e3f 848 free_irq(adap->msix_info[--msi_index].vec,
b8ff05a9
DM
849 &s->ofldrxq[ofldqidx].rspq);
850 while (--ethqidx >= 0)
404d9e3f
VP
851 free_irq(adap->msix_info[--msi_index].vec,
852 &s->ethrxq[ethqidx].rspq);
b8ff05a9
DM
853 free_irq(adap->msix_info[1].vec, &s->fw_evtq);
854 return err;
855}
856
857static void free_msix_queue_irqs(struct adapter *adap)
858{
404d9e3f 859 int i, msi_index = 2;
b8ff05a9
DM
860 struct sge *s = &adap->sge;
861
862 free_irq(adap->msix_info[1].vec, &s->fw_evtq);
863 for_each_ethrxq(s, i)
404d9e3f 864 free_irq(adap->msix_info[msi_index++].vec, &s->ethrxq[i].rspq);
b8ff05a9 865 for_each_ofldrxq(s, i)
404d9e3f 866 free_irq(adap->msix_info[msi_index++].vec, &s->ofldrxq[i].rspq);
b8ff05a9 867 for_each_rdmarxq(s, i)
404d9e3f 868 free_irq(adap->msix_info[msi_index++].vec, &s->rdmarxq[i].rspq);
b8ff05a9
DM
869}
870
671b0060
DM
871/**
872 * write_rss - write the RSS table for a given port
873 * @pi: the port
874 * @queues: array of queue indices for RSS
875 *
876 * Sets up the portion of the HW RSS table for the port's VI to distribute
877 * packets to the Rx queues in @queues.
878 */
879static int write_rss(const struct port_info *pi, const u16 *queues)
880{
881 u16 *rss;
882 int i, err;
883 const struct sge_eth_rxq *q = &pi->adapter->sge.ethrxq[pi->first_qset];
884
885 rss = kmalloc(pi->rss_size * sizeof(u16), GFP_KERNEL);
886 if (!rss)
887 return -ENOMEM;
888
889 /* map the queue indices to queue ids */
890 for (i = 0; i < pi->rss_size; i++, queues++)
891 rss[i] = q[*queues].rspq.abs_id;
892
060e0c75
DM
893 err = t4_config_rss_range(pi->adapter, pi->adapter->fn, pi->viid, 0,
894 pi->rss_size, rss, pi->rss_size);
671b0060
DM
895 kfree(rss);
896 return err;
897}
898
b8ff05a9
DM
899/**
900 * setup_rss - configure RSS
901 * @adap: the adapter
902 *
671b0060 903 * Sets up RSS for each port.
b8ff05a9
DM
904 */
905static int setup_rss(struct adapter *adap)
906{
671b0060 907 int i, err;
b8ff05a9
DM
908
909 for_each_port(adap, i) {
910 const struct port_info *pi = adap2pinfo(adap, i);
b8ff05a9 911
671b0060 912 err = write_rss(pi, pi->rss);
b8ff05a9
DM
913 if (err)
914 return err;
915 }
916 return 0;
917}
918
e46dab4d
DM
919/*
920 * Return the channel of the ingress queue with the given qid.
921 */
922static unsigned int rxq_to_chan(const struct sge *p, unsigned int qid)
923{
924 qid -= p->ingr_start;
925 return netdev2pinfo(p->ingr_map[qid]->netdev)->tx_chan;
926}
927
b8ff05a9
DM
928/*
929 * Wait until all NAPI handlers are descheduled.
930 */
931static void quiesce_rx(struct adapter *adap)
932{
933 int i;
934
935 for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) {
936 struct sge_rspq *q = adap->sge.ingr_map[i];
937
938 if (q && q->handler)
939 napi_disable(&q->napi);
940 }
941}
942
943/*
944 * Enable NAPI scheduling and interrupt generation for all Rx queues.
945 */
946static void enable_rx(struct adapter *adap)
947{
948 int i;
949
950 for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) {
951 struct sge_rspq *q = adap->sge.ingr_map[i];
952
953 if (!q)
954 continue;
955 if (q->handler)
956 napi_enable(&q->napi);
957 /* 0-increment GTS to start the timer and enable interrupts */
958 t4_write_reg(adap, MYPF_REG(SGE_PF_GTS),
959 SEINTARM(q->intr_params) |
960 INGRESSQID(q->cntxt_id));
961 }
962}
963
964/**
965 * setup_sge_queues - configure SGE Tx/Rx/response queues
966 * @adap: the adapter
967 *
968 * Determines how many sets of SGE queues to use and initializes them.
969 * We support multiple queue sets per port if we have MSI-X, otherwise
970 * just one queue set per port.
971 */
972static int setup_sge_queues(struct adapter *adap)
973{
974 int err, msi_idx, i, j;
975 struct sge *s = &adap->sge;
976
977 bitmap_zero(s->starving_fl, MAX_EGRQ);
978 bitmap_zero(s->txq_maperr, MAX_EGRQ);
979
980 if (adap->flags & USING_MSIX)
981 msi_idx = 1; /* vector 0 is for non-queue interrupts */
982 else {
983 err = t4_sge_alloc_rxq(adap, &s->intrq, false, adap->port[0], 0,
984 NULL, NULL);
985 if (err)
986 return err;
987 msi_idx = -((int)s->intrq.abs_id + 1);
988 }
989
990 err = t4_sge_alloc_rxq(adap, &s->fw_evtq, true, adap->port[0],
991 msi_idx, NULL, fwevtq_handler);
992 if (err) {
993freeout: t4_free_sge_resources(adap);
994 return err;
995 }
996
997 for_each_port(adap, i) {
998 struct net_device *dev = adap->port[i];
999 struct port_info *pi = netdev_priv(dev);
1000 struct sge_eth_rxq *q = &s->ethrxq[pi->first_qset];
1001 struct sge_eth_txq *t = &s->ethtxq[pi->first_qset];
1002
1003 for (j = 0; j < pi->nqsets; j++, q++) {
1004 if (msi_idx > 0)
1005 msi_idx++;
1006 err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev,
1007 msi_idx, &q->fl,
1008 t4_ethrx_handler);
1009 if (err)
1010 goto freeout;
1011 q->rspq.idx = j;
1012 memset(&q->stats, 0, sizeof(q->stats));
1013 }
1014 for (j = 0; j < pi->nqsets; j++, t++) {
1015 err = t4_sge_alloc_eth_txq(adap, t, dev,
1016 netdev_get_tx_queue(dev, j),
1017 s->fw_evtq.cntxt_id);
1018 if (err)
1019 goto freeout;
1020 }
1021 }
1022
1023 j = s->ofldqsets / adap->params.nports; /* ofld queues per channel */
1024 for_each_ofldrxq(s, i) {
1025 struct sge_ofld_rxq *q = &s->ofldrxq[i];
1026 struct net_device *dev = adap->port[i / j];
1027
1028 if (msi_idx > 0)
1029 msi_idx++;
1030 err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev, msi_idx,
1031 &q->fl, uldrx_handler);
1032 if (err)
1033 goto freeout;
1034 memset(&q->stats, 0, sizeof(q->stats));
1035 s->ofld_rxq[i] = q->rspq.abs_id;
1036 err = t4_sge_alloc_ofld_txq(adap, &s->ofldtxq[i], dev,
1037 s->fw_evtq.cntxt_id);
1038 if (err)
1039 goto freeout;
1040 }
1041
1042 for_each_rdmarxq(s, i) {
1043 struct sge_ofld_rxq *q = &s->rdmarxq[i];
1044
1045 if (msi_idx > 0)
1046 msi_idx++;
1047 err = t4_sge_alloc_rxq(adap, &q->rspq, false, adap->port[i],
1048 msi_idx, &q->fl, uldrx_handler);
1049 if (err)
1050 goto freeout;
1051 memset(&q->stats, 0, sizeof(q->stats));
1052 s->rdma_rxq[i] = q->rspq.abs_id;
1053 }
1054
1055 for_each_port(adap, i) {
1056 /*
1057 * Note that ->rdmarxq[i].rspq.cntxt_id below is 0 if we don't
1058 * have RDMA queues, and that's the right value.
1059 */
1060 err = t4_sge_alloc_ctrl_txq(adap, &s->ctrlq[i], adap->port[i],
1061 s->fw_evtq.cntxt_id,
1062 s->rdmarxq[i].rspq.cntxt_id);
1063 if (err)
1064 goto freeout;
1065 }
1066
1067 t4_write_reg(adap, MPS_TRC_RSS_CONTROL,
1068 RSSCONTROL(netdev2pinfo(adap->port[0])->tx_chan) |
1069 QUEUENUMBER(s->ethrxq[0].rspq.abs_id));
1070 return 0;
1071}
1072
b8ff05a9
DM
1073/*
1074 * Allocate a chunk of memory using kmalloc or, if that fails, vmalloc.
1075 * The allocated memory is cleared.
1076 */
1077void *t4_alloc_mem(size_t size)
1078{
8be04b93 1079 void *p = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
b8ff05a9
DM
1080
1081 if (!p)
89bf67f1 1082 p = vzalloc(size);
b8ff05a9
DM
1083 return p;
1084}
1085
1086/*
1087 * Free memory allocated through alloc_mem().
1088 */
31b9c19b 1089static void t4_free_mem(void *addr)
b8ff05a9
DM
1090{
1091 if (is_vmalloc_addr(addr))
1092 vfree(addr);
1093 else
1094 kfree(addr);
1095}
1096
f2b7e78d
VP
1097/* Send a Work Request to write the filter at a specified index. We construct
1098 * a Firmware Filter Work Request to have the work done and put the indicated
1099 * filter into "pending" mode which will prevent any further actions against
1100 * it till we get a reply from the firmware on the completion status of the
1101 * request.
1102 */
1103static int set_filter_wr(struct adapter *adapter, int fidx)
1104{
1105 struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
1106 struct sk_buff *skb;
1107 struct fw_filter_wr *fwr;
1108 unsigned int ftid;
1109
1110 /* If the new filter requires loopback Destination MAC and/or VLAN
1111 * rewriting then we need to allocate a Layer 2 Table (L2T) entry for
1112 * the filter.
1113 */
1114 if (f->fs.newdmac || f->fs.newvlan) {
1115 /* allocate L2T entry for new filter */
1116 f->l2t = t4_l2t_alloc_switching(adapter->l2t);
1117 if (f->l2t == NULL)
1118 return -EAGAIN;
1119 if (t4_l2t_set_switching(adapter, f->l2t, f->fs.vlan,
1120 f->fs.eport, f->fs.dmac)) {
1121 cxgb4_l2t_release(f->l2t);
1122 f->l2t = NULL;
1123 return -ENOMEM;
1124 }
1125 }
1126
1127 ftid = adapter->tids.ftid_base + fidx;
1128
1129 skb = alloc_skb(sizeof(*fwr), GFP_KERNEL | __GFP_NOFAIL);
1130 fwr = (struct fw_filter_wr *)__skb_put(skb, sizeof(*fwr));
1131 memset(fwr, 0, sizeof(*fwr));
1132
1133 /* It would be nice to put most of the following in t4_hw.c but most
1134 * of the work is translating the cxgbtool ch_filter_specification
1135 * into the Work Request and the definition of that structure is
1136 * currently in cxgbtool.h which isn't appropriate to pull into the
1137 * common code. We may eventually try to come up with a more neutral
1138 * filter specification structure but for now it's easiest to simply
1139 * put this fairly direct code in line ...
1140 */
1141 fwr->op_pkd = htonl(FW_WR_OP(FW_FILTER_WR));
1142 fwr->len16_pkd = htonl(FW_WR_LEN16(sizeof(*fwr)/16));
1143 fwr->tid_to_iq =
1144 htonl(V_FW_FILTER_WR_TID(ftid) |
1145 V_FW_FILTER_WR_RQTYPE(f->fs.type) |
1146 V_FW_FILTER_WR_NOREPLY(0) |
1147 V_FW_FILTER_WR_IQ(f->fs.iq));
1148 fwr->del_filter_to_l2tix =
1149 htonl(V_FW_FILTER_WR_RPTTID(f->fs.rpttid) |
1150 V_FW_FILTER_WR_DROP(f->fs.action == FILTER_DROP) |
1151 V_FW_FILTER_WR_DIRSTEER(f->fs.dirsteer) |
1152 V_FW_FILTER_WR_MASKHASH(f->fs.maskhash) |
1153 V_FW_FILTER_WR_DIRSTEERHASH(f->fs.dirsteerhash) |
1154 V_FW_FILTER_WR_LPBK(f->fs.action == FILTER_SWITCH) |
1155 V_FW_FILTER_WR_DMAC(f->fs.newdmac) |
1156 V_FW_FILTER_WR_SMAC(f->fs.newsmac) |
1157 V_FW_FILTER_WR_INSVLAN(f->fs.newvlan == VLAN_INSERT ||
1158 f->fs.newvlan == VLAN_REWRITE) |
1159 V_FW_FILTER_WR_RMVLAN(f->fs.newvlan == VLAN_REMOVE ||
1160 f->fs.newvlan == VLAN_REWRITE) |
1161 V_FW_FILTER_WR_HITCNTS(f->fs.hitcnts) |
1162 V_FW_FILTER_WR_TXCHAN(f->fs.eport) |
1163 V_FW_FILTER_WR_PRIO(f->fs.prio) |
1164 V_FW_FILTER_WR_L2TIX(f->l2t ? f->l2t->idx : 0));
1165 fwr->ethtype = htons(f->fs.val.ethtype);
1166 fwr->ethtypem = htons(f->fs.mask.ethtype);
1167 fwr->frag_to_ovlan_vldm =
1168 (V_FW_FILTER_WR_FRAG(f->fs.val.frag) |
1169 V_FW_FILTER_WR_FRAGM(f->fs.mask.frag) |
1170 V_FW_FILTER_WR_IVLAN_VLD(f->fs.val.ivlan_vld) |
1171 V_FW_FILTER_WR_OVLAN_VLD(f->fs.val.ovlan_vld) |
1172 V_FW_FILTER_WR_IVLAN_VLDM(f->fs.mask.ivlan_vld) |
1173 V_FW_FILTER_WR_OVLAN_VLDM(f->fs.mask.ovlan_vld));
1174 fwr->smac_sel = 0;
1175 fwr->rx_chan_rx_rpl_iq =
1176 htons(V_FW_FILTER_WR_RX_CHAN(0) |
1177 V_FW_FILTER_WR_RX_RPL_IQ(adapter->sge.fw_evtq.abs_id));
1178 fwr->maci_to_matchtypem =
1179 htonl(V_FW_FILTER_WR_MACI(f->fs.val.macidx) |
1180 V_FW_FILTER_WR_MACIM(f->fs.mask.macidx) |
1181 V_FW_FILTER_WR_FCOE(f->fs.val.fcoe) |
1182 V_FW_FILTER_WR_FCOEM(f->fs.mask.fcoe) |
1183 V_FW_FILTER_WR_PORT(f->fs.val.iport) |
1184 V_FW_FILTER_WR_PORTM(f->fs.mask.iport) |
1185 V_FW_FILTER_WR_MATCHTYPE(f->fs.val.matchtype) |
1186 V_FW_FILTER_WR_MATCHTYPEM(f->fs.mask.matchtype));
1187 fwr->ptcl = f->fs.val.proto;
1188 fwr->ptclm = f->fs.mask.proto;
1189 fwr->ttyp = f->fs.val.tos;
1190 fwr->ttypm = f->fs.mask.tos;
1191 fwr->ivlan = htons(f->fs.val.ivlan);
1192 fwr->ivlanm = htons(f->fs.mask.ivlan);
1193 fwr->ovlan = htons(f->fs.val.ovlan);
1194 fwr->ovlanm = htons(f->fs.mask.ovlan);
1195 memcpy(fwr->lip, f->fs.val.lip, sizeof(fwr->lip));
1196 memcpy(fwr->lipm, f->fs.mask.lip, sizeof(fwr->lipm));
1197 memcpy(fwr->fip, f->fs.val.fip, sizeof(fwr->fip));
1198 memcpy(fwr->fipm, f->fs.mask.fip, sizeof(fwr->fipm));
1199 fwr->lp = htons(f->fs.val.lport);
1200 fwr->lpm = htons(f->fs.mask.lport);
1201 fwr->fp = htons(f->fs.val.fport);
1202 fwr->fpm = htons(f->fs.mask.fport);
1203 if (f->fs.newsmac)
1204 memcpy(fwr->sma, f->fs.smac, sizeof(fwr->sma));
1205
1206 /* Mark the filter as "pending" and ship off the Filter Work Request.
1207 * When we get the Work Request Reply we'll clear the pending status.
1208 */
1209 f->pending = 1;
1210 set_wr_txq(skb, CPL_PRIORITY_CONTROL, f->fs.val.iport & 0x3);
1211 t4_ofld_send(adapter, skb);
1212 return 0;
1213}
1214
1215/* Delete the filter at a specified index.
1216 */
1217static int del_filter_wr(struct adapter *adapter, int fidx)
1218{
1219 struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
1220 struct sk_buff *skb;
1221 struct fw_filter_wr *fwr;
1222 unsigned int len, ftid;
1223
1224 len = sizeof(*fwr);
1225 ftid = adapter->tids.ftid_base + fidx;
1226
1227 skb = alloc_skb(len, GFP_KERNEL | __GFP_NOFAIL);
1228 fwr = (struct fw_filter_wr *)__skb_put(skb, len);
1229 t4_mk_filtdelwr(ftid, fwr, adapter->sge.fw_evtq.abs_id);
1230
1231 /* Mark the filter as "pending" and ship off the Filter Work Request.
1232 * When we get the Work Request Reply we'll clear the pending status.
1233 */
1234 f->pending = 1;
1235 t4_mgmt_tx(adapter, skb);
1236 return 0;
1237}
1238
b8ff05a9
DM
1239static inline int is_offload(const struct adapter *adap)
1240{
1241 return adap->params.offload;
1242}
1243
1244/*
1245 * Implementation of ethtool operations.
1246 */
1247
1248static u32 get_msglevel(struct net_device *dev)
1249{
1250 return netdev2adap(dev)->msg_enable;
1251}
1252
1253static void set_msglevel(struct net_device *dev, u32 val)
1254{
1255 netdev2adap(dev)->msg_enable = val;
1256}
1257
1258static char stats_strings[][ETH_GSTRING_LEN] = {
1259 "TxOctetsOK ",
1260 "TxFramesOK ",
1261 "TxBroadcastFrames ",
1262 "TxMulticastFrames ",
1263 "TxUnicastFrames ",
1264 "TxErrorFrames ",
1265
1266 "TxFrames64 ",
1267 "TxFrames65To127 ",
1268 "TxFrames128To255 ",
1269 "TxFrames256To511 ",
1270 "TxFrames512To1023 ",
1271 "TxFrames1024To1518 ",
1272 "TxFrames1519ToMax ",
1273
1274 "TxFramesDropped ",
1275 "TxPauseFrames ",
1276 "TxPPP0Frames ",
1277 "TxPPP1Frames ",
1278 "TxPPP2Frames ",
1279 "TxPPP3Frames ",
1280 "TxPPP4Frames ",
1281 "TxPPP5Frames ",
1282 "TxPPP6Frames ",
1283 "TxPPP7Frames ",
1284
1285 "RxOctetsOK ",
1286 "RxFramesOK ",
1287 "RxBroadcastFrames ",
1288 "RxMulticastFrames ",
1289 "RxUnicastFrames ",
1290
1291 "RxFramesTooLong ",
1292 "RxJabberErrors ",
1293 "RxFCSErrors ",
1294 "RxLengthErrors ",
1295 "RxSymbolErrors ",
1296 "RxRuntFrames ",
1297
1298 "RxFrames64 ",
1299 "RxFrames65To127 ",
1300 "RxFrames128To255 ",
1301 "RxFrames256To511 ",
1302 "RxFrames512To1023 ",
1303 "RxFrames1024To1518 ",
1304 "RxFrames1519ToMax ",
1305
1306 "RxPauseFrames ",
1307 "RxPPP0Frames ",
1308 "RxPPP1Frames ",
1309 "RxPPP2Frames ",
1310 "RxPPP3Frames ",
1311 "RxPPP4Frames ",
1312 "RxPPP5Frames ",
1313 "RxPPP6Frames ",
1314 "RxPPP7Frames ",
1315
1316 "RxBG0FramesDropped ",
1317 "RxBG1FramesDropped ",
1318 "RxBG2FramesDropped ",
1319 "RxBG3FramesDropped ",
1320 "RxBG0FramesTrunc ",
1321 "RxBG1FramesTrunc ",
1322 "RxBG2FramesTrunc ",
1323 "RxBG3FramesTrunc ",
1324
1325 "TSO ",
1326 "TxCsumOffload ",
1327 "RxCsumGood ",
1328 "VLANextractions ",
1329 "VLANinsertions ",
4a6346d4
DM
1330 "GROpackets ",
1331 "GROmerged ",
22adfe0a
SR
1332 "WriteCoalSuccess ",
1333 "WriteCoalFail ",
b8ff05a9
DM
1334};
1335
1336static int get_sset_count(struct net_device *dev, int sset)
1337{
1338 switch (sset) {
1339 case ETH_SS_STATS:
1340 return ARRAY_SIZE(stats_strings);
1341 default:
1342 return -EOPNOTSUPP;
1343 }
1344}
1345
1346#define T4_REGMAP_SIZE (160 * 1024)
251f9e88 1347#define T5_REGMAP_SIZE (332 * 1024)
b8ff05a9
DM
1348
1349static int get_regs_len(struct net_device *dev)
1350{
251f9e88 1351 struct adapter *adap = netdev2adap(dev);
d14807dd 1352 if (is_t4(adap->params.chip))
251f9e88
SR
1353 return T4_REGMAP_SIZE;
1354 else
1355 return T5_REGMAP_SIZE;
b8ff05a9
DM
1356}
1357
1358static int get_eeprom_len(struct net_device *dev)
1359{
1360 return EEPROMSIZE;
1361}
1362
1363static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1364{
1365 struct adapter *adapter = netdev2adap(dev);
1366
23020ab3
RJ
1367 strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
1368 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
1369 strlcpy(info->bus_info, pci_name(adapter->pdev),
1370 sizeof(info->bus_info));
b8ff05a9 1371
84b40501 1372 if (adapter->params.fw_vers)
b8ff05a9
DM
1373 snprintf(info->fw_version, sizeof(info->fw_version),
1374 "%u.%u.%u.%u, TP %u.%u.%u.%u",
1375 FW_HDR_FW_VER_MAJOR_GET(adapter->params.fw_vers),
1376 FW_HDR_FW_VER_MINOR_GET(adapter->params.fw_vers),
1377 FW_HDR_FW_VER_MICRO_GET(adapter->params.fw_vers),
1378 FW_HDR_FW_VER_BUILD_GET(adapter->params.fw_vers),
1379 FW_HDR_FW_VER_MAJOR_GET(adapter->params.tp_vers),
1380 FW_HDR_FW_VER_MINOR_GET(adapter->params.tp_vers),
1381 FW_HDR_FW_VER_MICRO_GET(adapter->params.tp_vers),
1382 FW_HDR_FW_VER_BUILD_GET(adapter->params.tp_vers));
1383}
1384
1385static void get_strings(struct net_device *dev, u32 stringset, u8 *data)
1386{
1387 if (stringset == ETH_SS_STATS)
1388 memcpy(data, stats_strings, sizeof(stats_strings));
1389}
1390
1391/*
1392 * port stats maintained per queue of the port. They should be in the same
1393 * order as in stats_strings above.
1394 */
1395struct queue_port_stats {
1396 u64 tso;
1397 u64 tx_csum;
1398 u64 rx_csum;
1399 u64 vlan_ex;
1400 u64 vlan_ins;
4a6346d4
DM
1401 u64 gro_pkts;
1402 u64 gro_merged;
b8ff05a9
DM
1403};
1404
1405static void collect_sge_port_stats(const struct adapter *adap,
1406 const struct port_info *p, struct queue_port_stats *s)
1407{
1408 int i;
1409 const struct sge_eth_txq *tx = &adap->sge.ethtxq[p->first_qset];
1410 const struct sge_eth_rxq *rx = &adap->sge.ethrxq[p->first_qset];
1411
1412 memset(s, 0, sizeof(*s));
1413 for (i = 0; i < p->nqsets; i++, rx++, tx++) {
1414 s->tso += tx->tso;
1415 s->tx_csum += tx->tx_cso;
1416 s->rx_csum += rx->stats.rx_cso;
1417 s->vlan_ex += rx->stats.vlan_ex;
1418 s->vlan_ins += tx->vlan_ins;
4a6346d4
DM
1419 s->gro_pkts += rx->stats.lro_pkts;
1420 s->gro_merged += rx->stats.lro_merged;
b8ff05a9
DM
1421 }
1422}
1423
1424static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1425 u64 *data)
1426{
1427 struct port_info *pi = netdev_priv(dev);
1428 struct adapter *adapter = pi->adapter;
22adfe0a 1429 u32 val1, val2;
b8ff05a9
DM
1430
1431 t4_get_port_stats(adapter, pi->tx_chan, (struct port_stats *)data);
1432
1433 data += sizeof(struct port_stats) / sizeof(u64);
1434 collect_sge_port_stats(adapter, pi, (struct queue_port_stats *)data);
22adfe0a 1435 data += sizeof(struct queue_port_stats) / sizeof(u64);
d14807dd 1436 if (!is_t4(adapter->params.chip)) {
22adfe0a
SR
1437 t4_write_reg(adapter, SGE_STAT_CFG, STATSOURCE_T5(7));
1438 val1 = t4_read_reg(adapter, SGE_STAT_TOTAL);
1439 val2 = t4_read_reg(adapter, SGE_STAT_MATCH);
1440 *data = val1 - val2;
1441 data++;
1442 *data = val2;
1443 data++;
1444 } else {
1445 memset(data, 0, 2 * sizeof(u64));
1446 *data += 2;
1447 }
b8ff05a9
DM
1448}
1449
1450/*
1451 * Return a version number to identify the type of adapter. The scheme is:
1452 * - bits 0..9: chip version
1453 * - bits 10..15: chip revision
835bb606 1454 * - bits 16..23: register dump version
b8ff05a9
DM
1455 */
1456static inline unsigned int mk_adap_vers(const struct adapter *ap)
1457{
d14807dd
HS
1458 return CHELSIO_CHIP_VERSION(ap->params.chip) |
1459 (CHELSIO_CHIP_RELEASE(ap->params.chip) << 10) | (1 << 16);
b8ff05a9
DM
1460}
1461
1462static void reg_block_dump(struct adapter *ap, void *buf, unsigned int start,
1463 unsigned int end)
1464{
1465 u32 *p = buf + start;
1466
1467 for ( ; start <= end; start += sizeof(u32))
1468 *p++ = t4_read_reg(ap, start);
1469}
1470
1471static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
1472 void *buf)
1473{
251f9e88 1474 static const unsigned int t4_reg_ranges[] = {
b8ff05a9
DM
1475 0x1008, 0x1108,
1476 0x1180, 0x11b4,
1477 0x11fc, 0x123c,
1478 0x1300, 0x173c,
1479 0x1800, 0x18fc,
1480 0x3000, 0x30d8,
1481 0x30e0, 0x5924,
1482 0x5960, 0x59d4,
1483 0x5a00, 0x5af8,
1484 0x6000, 0x6098,
1485 0x6100, 0x6150,
1486 0x6200, 0x6208,
1487 0x6240, 0x6248,
1488 0x6280, 0x6338,
1489 0x6370, 0x638c,
1490 0x6400, 0x643c,
1491 0x6500, 0x6524,
1492 0x6a00, 0x6a38,
1493 0x6a60, 0x6a78,
1494 0x6b00, 0x6b84,
1495 0x6bf0, 0x6c84,
1496 0x6cf0, 0x6d84,
1497 0x6df0, 0x6e84,
1498 0x6ef0, 0x6f84,
1499 0x6ff0, 0x7084,
1500 0x70f0, 0x7184,
1501 0x71f0, 0x7284,
1502 0x72f0, 0x7384,
1503 0x73f0, 0x7450,
1504 0x7500, 0x7530,
1505 0x7600, 0x761c,
1506 0x7680, 0x76cc,
1507 0x7700, 0x7798,
1508 0x77c0, 0x77fc,
1509 0x7900, 0x79fc,
1510 0x7b00, 0x7c38,
1511 0x7d00, 0x7efc,
1512 0x8dc0, 0x8e1c,
1513 0x8e30, 0x8e78,
1514 0x8ea0, 0x8f6c,
1515 0x8fc0, 0x9074,
1516 0x90fc, 0x90fc,
1517 0x9400, 0x9458,
1518 0x9600, 0x96bc,
1519 0x9800, 0x9808,
1520 0x9820, 0x983c,
1521 0x9850, 0x9864,
1522 0x9c00, 0x9c6c,
1523 0x9c80, 0x9cec,
1524 0x9d00, 0x9d6c,
1525 0x9d80, 0x9dec,
1526 0x9e00, 0x9e6c,
1527 0x9e80, 0x9eec,
1528 0x9f00, 0x9f6c,
1529 0x9f80, 0x9fec,
1530 0xd004, 0xd03c,
1531 0xdfc0, 0xdfe0,
1532 0xe000, 0xea7c,
1533 0xf000, 0x11190,
835bb606
DM
1534 0x19040, 0x1906c,
1535 0x19078, 0x19080,
1536 0x1908c, 0x19124,
b8ff05a9
DM
1537 0x19150, 0x191b0,
1538 0x191d0, 0x191e8,
1539 0x19238, 0x1924c,
1540 0x193f8, 0x19474,
1541 0x19490, 0x194f8,
1542 0x19800, 0x19f30,
1543 0x1a000, 0x1a06c,
1544 0x1a0b0, 0x1a120,
1545 0x1a128, 0x1a138,
1546 0x1a190, 0x1a1c4,
1547 0x1a1fc, 0x1a1fc,
1548 0x1e040, 0x1e04c,
835bb606 1549 0x1e284, 0x1e28c,
b8ff05a9
DM
1550 0x1e2c0, 0x1e2c0,
1551 0x1e2e0, 0x1e2e0,
1552 0x1e300, 0x1e384,
1553 0x1e3c0, 0x1e3c8,
1554 0x1e440, 0x1e44c,
835bb606 1555 0x1e684, 0x1e68c,
b8ff05a9
DM
1556 0x1e6c0, 0x1e6c0,
1557 0x1e6e0, 0x1e6e0,
1558 0x1e700, 0x1e784,
1559 0x1e7c0, 0x1e7c8,
1560 0x1e840, 0x1e84c,
835bb606 1561 0x1ea84, 0x1ea8c,
b8ff05a9
DM
1562 0x1eac0, 0x1eac0,
1563 0x1eae0, 0x1eae0,
1564 0x1eb00, 0x1eb84,
1565 0x1ebc0, 0x1ebc8,
1566 0x1ec40, 0x1ec4c,
835bb606 1567 0x1ee84, 0x1ee8c,
b8ff05a9
DM
1568 0x1eec0, 0x1eec0,
1569 0x1eee0, 0x1eee0,
1570 0x1ef00, 0x1ef84,
1571 0x1efc0, 0x1efc8,
1572 0x1f040, 0x1f04c,
835bb606 1573 0x1f284, 0x1f28c,
b8ff05a9
DM
1574 0x1f2c0, 0x1f2c0,
1575 0x1f2e0, 0x1f2e0,
1576 0x1f300, 0x1f384,
1577 0x1f3c0, 0x1f3c8,
1578 0x1f440, 0x1f44c,
835bb606 1579 0x1f684, 0x1f68c,
b8ff05a9
DM
1580 0x1f6c0, 0x1f6c0,
1581 0x1f6e0, 0x1f6e0,
1582 0x1f700, 0x1f784,
1583 0x1f7c0, 0x1f7c8,
1584 0x1f840, 0x1f84c,
835bb606 1585 0x1fa84, 0x1fa8c,
b8ff05a9
DM
1586 0x1fac0, 0x1fac0,
1587 0x1fae0, 0x1fae0,
1588 0x1fb00, 0x1fb84,
1589 0x1fbc0, 0x1fbc8,
1590 0x1fc40, 0x1fc4c,
835bb606 1591 0x1fe84, 0x1fe8c,
b8ff05a9
DM
1592 0x1fec0, 0x1fec0,
1593 0x1fee0, 0x1fee0,
1594 0x1ff00, 0x1ff84,
1595 0x1ffc0, 0x1ffc8,
1596 0x20000, 0x2002c,
1597 0x20100, 0x2013c,
1598 0x20190, 0x201c8,
1599 0x20200, 0x20318,
1600 0x20400, 0x20528,
1601 0x20540, 0x20614,
1602 0x21000, 0x21040,
1603 0x2104c, 0x21060,
1604 0x210c0, 0x210ec,
1605 0x21200, 0x21268,
1606 0x21270, 0x21284,
1607 0x212fc, 0x21388,
1608 0x21400, 0x21404,
1609 0x21500, 0x21518,
1610 0x2152c, 0x2153c,
1611 0x21550, 0x21554,
1612 0x21600, 0x21600,
1613 0x21608, 0x21628,
1614 0x21630, 0x2163c,
1615 0x21700, 0x2171c,
1616 0x21780, 0x2178c,
1617 0x21800, 0x21c38,
1618 0x21c80, 0x21d7c,
1619 0x21e00, 0x21e04,
1620 0x22000, 0x2202c,
1621 0x22100, 0x2213c,
1622 0x22190, 0x221c8,
1623 0x22200, 0x22318,
1624 0x22400, 0x22528,
1625 0x22540, 0x22614,
1626 0x23000, 0x23040,
1627 0x2304c, 0x23060,
1628 0x230c0, 0x230ec,
1629 0x23200, 0x23268,
1630 0x23270, 0x23284,
1631 0x232fc, 0x23388,
1632 0x23400, 0x23404,
1633 0x23500, 0x23518,
1634 0x2352c, 0x2353c,
1635 0x23550, 0x23554,
1636 0x23600, 0x23600,
1637 0x23608, 0x23628,
1638 0x23630, 0x2363c,
1639 0x23700, 0x2371c,
1640 0x23780, 0x2378c,
1641 0x23800, 0x23c38,
1642 0x23c80, 0x23d7c,
1643 0x23e00, 0x23e04,
1644 0x24000, 0x2402c,
1645 0x24100, 0x2413c,
1646 0x24190, 0x241c8,
1647 0x24200, 0x24318,
1648 0x24400, 0x24528,
1649 0x24540, 0x24614,
1650 0x25000, 0x25040,
1651 0x2504c, 0x25060,
1652 0x250c0, 0x250ec,
1653 0x25200, 0x25268,
1654 0x25270, 0x25284,
1655 0x252fc, 0x25388,
1656 0x25400, 0x25404,
1657 0x25500, 0x25518,
1658 0x2552c, 0x2553c,
1659 0x25550, 0x25554,
1660 0x25600, 0x25600,
1661 0x25608, 0x25628,
1662 0x25630, 0x2563c,
1663 0x25700, 0x2571c,
1664 0x25780, 0x2578c,
1665 0x25800, 0x25c38,
1666 0x25c80, 0x25d7c,
1667 0x25e00, 0x25e04,
1668 0x26000, 0x2602c,
1669 0x26100, 0x2613c,
1670 0x26190, 0x261c8,
1671 0x26200, 0x26318,
1672 0x26400, 0x26528,
1673 0x26540, 0x26614,
1674 0x27000, 0x27040,
1675 0x2704c, 0x27060,
1676 0x270c0, 0x270ec,
1677 0x27200, 0x27268,
1678 0x27270, 0x27284,
1679 0x272fc, 0x27388,
1680 0x27400, 0x27404,
1681 0x27500, 0x27518,
1682 0x2752c, 0x2753c,
1683 0x27550, 0x27554,
1684 0x27600, 0x27600,
1685 0x27608, 0x27628,
1686 0x27630, 0x2763c,
1687 0x27700, 0x2771c,
1688 0x27780, 0x2778c,
1689 0x27800, 0x27c38,
1690 0x27c80, 0x27d7c,
1691 0x27e00, 0x27e04
1692 };
1693
251f9e88
SR
1694 static const unsigned int t5_reg_ranges[] = {
1695 0x1008, 0x1148,
1696 0x1180, 0x11b4,
1697 0x11fc, 0x123c,
1698 0x1280, 0x173c,
1699 0x1800, 0x18fc,
1700 0x3000, 0x3028,
1701 0x3060, 0x30d8,
1702 0x30e0, 0x30fc,
1703 0x3140, 0x357c,
1704 0x35a8, 0x35cc,
1705 0x35ec, 0x35ec,
1706 0x3600, 0x5624,
1707 0x56cc, 0x575c,
1708 0x580c, 0x5814,
1709 0x5890, 0x58bc,
1710 0x5940, 0x59dc,
1711 0x59fc, 0x5a18,
1712 0x5a60, 0x5a9c,
1713 0x5b9c, 0x5bfc,
1714 0x6000, 0x6040,
1715 0x6058, 0x614c,
1716 0x7700, 0x7798,
1717 0x77c0, 0x78fc,
1718 0x7b00, 0x7c54,
1719 0x7d00, 0x7efc,
1720 0x8dc0, 0x8de0,
1721 0x8df8, 0x8e84,
1722 0x8ea0, 0x8f84,
1723 0x8fc0, 0x90f8,
1724 0x9400, 0x9470,
1725 0x9600, 0x96f4,
1726 0x9800, 0x9808,
1727 0x9820, 0x983c,
1728 0x9850, 0x9864,
1729 0x9c00, 0x9c6c,
1730 0x9c80, 0x9cec,
1731 0x9d00, 0x9d6c,
1732 0x9d80, 0x9dec,
1733 0x9e00, 0x9e6c,
1734 0x9e80, 0x9eec,
1735 0x9f00, 0x9f6c,
1736 0x9f80, 0xa020,
1737 0xd004, 0xd03c,
1738 0xdfc0, 0xdfe0,
1739 0xe000, 0x11088,
1740 0x1109c, 0x1117c,
1741 0x11190, 0x11204,
1742 0x19040, 0x1906c,
1743 0x19078, 0x19080,
1744 0x1908c, 0x19124,
1745 0x19150, 0x191b0,
1746 0x191d0, 0x191e8,
1747 0x19238, 0x19290,
1748 0x193f8, 0x19474,
1749 0x19490, 0x194cc,
1750 0x194f0, 0x194f8,
1751 0x19c00, 0x19c60,
1752 0x19c94, 0x19e10,
1753 0x19e50, 0x19f34,
1754 0x19f40, 0x19f50,
1755 0x19f90, 0x19fe4,
1756 0x1a000, 0x1a06c,
1757 0x1a0b0, 0x1a120,
1758 0x1a128, 0x1a138,
1759 0x1a190, 0x1a1c4,
1760 0x1a1fc, 0x1a1fc,
1761 0x1e008, 0x1e00c,
1762 0x1e040, 0x1e04c,
1763 0x1e284, 0x1e290,
1764 0x1e2c0, 0x1e2c0,
1765 0x1e2e0, 0x1e2e0,
1766 0x1e300, 0x1e384,
1767 0x1e3c0, 0x1e3c8,
1768 0x1e408, 0x1e40c,
1769 0x1e440, 0x1e44c,
1770 0x1e684, 0x1e690,
1771 0x1e6c0, 0x1e6c0,
1772 0x1e6e0, 0x1e6e0,
1773 0x1e700, 0x1e784,
1774 0x1e7c0, 0x1e7c8,
1775 0x1e808, 0x1e80c,
1776 0x1e840, 0x1e84c,
1777 0x1ea84, 0x1ea90,
1778 0x1eac0, 0x1eac0,
1779 0x1eae0, 0x1eae0,
1780 0x1eb00, 0x1eb84,
1781 0x1ebc0, 0x1ebc8,
1782 0x1ec08, 0x1ec0c,
1783 0x1ec40, 0x1ec4c,
1784 0x1ee84, 0x1ee90,
1785 0x1eec0, 0x1eec0,
1786 0x1eee0, 0x1eee0,
1787 0x1ef00, 0x1ef84,
1788 0x1efc0, 0x1efc8,
1789 0x1f008, 0x1f00c,
1790 0x1f040, 0x1f04c,
1791 0x1f284, 0x1f290,
1792 0x1f2c0, 0x1f2c0,
1793 0x1f2e0, 0x1f2e0,
1794 0x1f300, 0x1f384,
1795 0x1f3c0, 0x1f3c8,
1796 0x1f408, 0x1f40c,
1797 0x1f440, 0x1f44c,
1798 0x1f684, 0x1f690,
1799 0x1f6c0, 0x1f6c0,
1800 0x1f6e0, 0x1f6e0,
1801 0x1f700, 0x1f784,
1802 0x1f7c0, 0x1f7c8,
1803 0x1f808, 0x1f80c,
1804 0x1f840, 0x1f84c,
1805 0x1fa84, 0x1fa90,
1806 0x1fac0, 0x1fac0,
1807 0x1fae0, 0x1fae0,
1808 0x1fb00, 0x1fb84,
1809 0x1fbc0, 0x1fbc8,
1810 0x1fc08, 0x1fc0c,
1811 0x1fc40, 0x1fc4c,
1812 0x1fe84, 0x1fe90,
1813 0x1fec0, 0x1fec0,
1814 0x1fee0, 0x1fee0,
1815 0x1ff00, 0x1ff84,
1816 0x1ffc0, 0x1ffc8,
1817 0x30000, 0x30030,
1818 0x30100, 0x30144,
1819 0x30190, 0x301d0,
1820 0x30200, 0x30318,
1821 0x30400, 0x3052c,
1822 0x30540, 0x3061c,
1823 0x30800, 0x30834,
1824 0x308c0, 0x30908,
1825 0x30910, 0x309ac,
1826 0x30a00, 0x30a04,
1827 0x30a0c, 0x30a2c,
1828 0x30a44, 0x30a50,
1829 0x30a74, 0x30c24,
1830 0x30d08, 0x30d14,
1831 0x30d1c, 0x30d20,
1832 0x30d3c, 0x30d50,
1833 0x31200, 0x3120c,
1834 0x31220, 0x31220,
1835 0x31240, 0x31240,
1836 0x31600, 0x31600,
1837 0x31608, 0x3160c,
1838 0x31a00, 0x31a1c,
1839 0x31e04, 0x31e20,
1840 0x31e38, 0x31e3c,
1841 0x31e80, 0x31e80,
1842 0x31e88, 0x31ea8,
1843 0x31eb0, 0x31eb4,
1844 0x31ec8, 0x31ed4,
1845 0x31fb8, 0x32004,
1846 0x32208, 0x3223c,
1847 0x32600, 0x32630,
1848 0x32a00, 0x32abc,
1849 0x32b00, 0x32b70,
1850 0x33000, 0x33048,
1851 0x33060, 0x3309c,
1852 0x330f0, 0x33148,
1853 0x33160, 0x3319c,
1854 0x331f0, 0x332e4,
1855 0x332f8, 0x333e4,
1856 0x333f8, 0x33448,
1857 0x33460, 0x3349c,
1858 0x334f0, 0x33548,
1859 0x33560, 0x3359c,
1860 0x335f0, 0x336e4,
1861 0x336f8, 0x337e4,
1862 0x337f8, 0x337fc,
1863 0x33814, 0x33814,
1864 0x3382c, 0x3382c,
1865 0x33880, 0x3388c,
1866 0x338e8, 0x338ec,
1867 0x33900, 0x33948,
1868 0x33960, 0x3399c,
1869 0x339f0, 0x33ae4,
1870 0x33af8, 0x33b10,
1871 0x33b28, 0x33b28,
1872 0x33b3c, 0x33b50,
1873 0x33bf0, 0x33c10,
1874 0x33c28, 0x33c28,
1875 0x33c3c, 0x33c50,
1876 0x33cf0, 0x33cfc,
1877 0x34000, 0x34030,
1878 0x34100, 0x34144,
1879 0x34190, 0x341d0,
1880 0x34200, 0x34318,
1881 0x34400, 0x3452c,
1882 0x34540, 0x3461c,
1883 0x34800, 0x34834,
1884 0x348c0, 0x34908,
1885 0x34910, 0x349ac,
1886 0x34a00, 0x34a04,
1887 0x34a0c, 0x34a2c,
1888 0x34a44, 0x34a50,
1889 0x34a74, 0x34c24,
1890 0x34d08, 0x34d14,
1891 0x34d1c, 0x34d20,
1892 0x34d3c, 0x34d50,
1893 0x35200, 0x3520c,
1894 0x35220, 0x35220,
1895 0x35240, 0x35240,
1896 0x35600, 0x35600,
1897 0x35608, 0x3560c,
1898 0x35a00, 0x35a1c,
1899 0x35e04, 0x35e20,
1900 0x35e38, 0x35e3c,
1901 0x35e80, 0x35e80,
1902 0x35e88, 0x35ea8,
1903 0x35eb0, 0x35eb4,
1904 0x35ec8, 0x35ed4,
1905 0x35fb8, 0x36004,
1906 0x36208, 0x3623c,
1907 0x36600, 0x36630,
1908 0x36a00, 0x36abc,
1909 0x36b00, 0x36b70,
1910 0x37000, 0x37048,
1911 0x37060, 0x3709c,
1912 0x370f0, 0x37148,
1913 0x37160, 0x3719c,
1914 0x371f0, 0x372e4,
1915 0x372f8, 0x373e4,
1916 0x373f8, 0x37448,
1917 0x37460, 0x3749c,
1918 0x374f0, 0x37548,
1919 0x37560, 0x3759c,
1920 0x375f0, 0x376e4,
1921 0x376f8, 0x377e4,
1922 0x377f8, 0x377fc,
1923 0x37814, 0x37814,
1924 0x3782c, 0x3782c,
1925 0x37880, 0x3788c,
1926 0x378e8, 0x378ec,
1927 0x37900, 0x37948,
1928 0x37960, 0x3799c,
1929 0x379f0, 0x37ae4,
1930 0x37af8, 0x37b10,
1931 0x37b28, 0x37b28,
1932 0x37b3c, 0x37b50,
1933 0x37bf0, 0x37c10,
1934 0x37c28, 0x37c28,
1935 0x37c3c, 0x37c50,
1936 0x37cf0, 0x37cfc,
1937 0x38000, 0x38030,
1938 0x38100, 0x38144,
1939 0x38190, 0x381d0,
1940 0x38200, 0x38318,
1941 0x38400, 0x3852c,
1942 0x38540, 0x3861c,
1943 0x38800, 0x38834,
1944 0x388c0, 0x38908,
1945 0x38910, 0x389ac,
1946 0x38a00, 0x38a04,
1947 0x38a0c, 0x38a2c,
1948 0x38a44, 0x38a50,
1949 0x38a74, 0x38c24,
1950 0x38d08, 0x38d14,
1951 0x38d1c, 0x38d20,
1952 0x38d3c, 0x38d50,
1953 0x39200, 0x3920c,
1954 0x39220, 0x39220,
1955 0x39240, 0x39240,
1956 0x39600, 0x39600,
1957 0x39608, 0x3960c,
1958 0x39a00, 0x39a1c,
1959 0x39e04, 0x39e20,
1960 0x39e38, 0x39e3c,
1961 0x39e80, 0x39e80,
1962 0x39e88, 0x39ea8,
1963 0x39eb0, 0x39eb4,
1964 0x39ec8, 0x39ed4,
1965 0x39fb8, 0x3a004,
1966 0x3a208, 0x3a23c,
1967 0x3a600, 0x3a630,
1968 0x3aa00, 0x3aabc,
1969 0x3ab00, 0x3ab70,
1970 0x3b000, 0x3b048,
1971 0x3b060, 0x3b09c,
1972 0x3b0f0, 0x3b148,
1973 0x3b160, 0x3b19c,
1974 0x3b1f0, 0x3b2e4,
1975 0x3b2f8, 0x3b3e4,
1976 0x3b3f8, 0x3b448,
1977 0x3b460, 0x3b49c,
1978 0x3b4f0, 0x3b548,
1979 0x3b560, 0x3b59c,
1980 0x3b5f0, 0x3b6e4,
1981 0x3b6f8, 0x3b7e4,
1982 0x3b7f8, 0x3b7fc,
1983 0x3b814, 0x3b814,
1984 0x3b82c, 0x3b82c,
1985 0x3b880, 0x3b88c,
1986 0x3b8e8, 0x3b8ec,
1987 0x3b900, 0x3b948,
1988 0x3b960, 0x3b99c,
1989 0x3b9f0, 0x3bae4,
1990 0x3baf8, 0x3bb10,
1991 0x3bb28, 0x3bb28,
1992 0x3bb3c, 0x3bb50,
1993 0x3bbf0, 0x3bc10,
1994 0x3bc28, 0x3bc28,
1995 0x3bc3c, 0x3bc50,
1996 0x3bcf0, 0x3bcfc,
1997 0x3c000, 0x3c030,
1998 0x3c100, 0x3c144,
1999 0x3c190, 0x3c1d0,
2000 0x3c200, 0x3c318,
2001 0x3c400, 0x3c52c,
2002 0x3c540, 0x3c61c,
2003 0x3c800, 0x3c834,
2004 0x3c8c0, 0x3c908,
2005 0x3c910, 0x3c9ac,
2006 0x3ca00, 0x3ca04,
2007 0x3ca0c, 0x3ca2c,
2008 0x3ca44, 0x3ca50,
2009 0x3ca74, 0x3cc24,
2010 0x3cd08, 0x3cd14,
2011 0x3cd1c, 0x3cd20,
2012 0x3cd3c, 0x3cd50,
2013 0x3d200, 0x3d20c,
2014 0x3d220, 0x3d220,
2015 0x3d240, 0x3d240,
2016 0x3d600, 0x3d600,
2017 0x3d608, 0x3d60c,
2018 0x3da00, 0x3da1c,
2019 0x3de04, 0x3de20,
2020 0x3de38, 0x3de3c,
2021 0x3de80, 0x3de80,
2022 0x3de88, 0x3dea8,
2023 0x3deb0, 0x3deb4,
2024 0x3dec8, 0x3ded4,
2025 0x3dfb8, 0x3e004,
2026 0x3e208, 0x3e23c,
2027 0x3e600, 0x3e630,
2028 0x3ea00, 0x3eabc,
2029 0x3eb00, 0x3eb70,
2030 0x3f000, 0x3f048,
2031 0x3f060, 0x3f09c,
2032 0x3f0f0, 0x3f148,
2033 0x3f160, 0x3f19c,
2034 0x3f1f0, 0x3f2e4,
2035 0x3f2f8, 0x3f3e4,
2036 0x3f3f8, 0x3f448,
2037 0x3f460, 0x3f49c,
2038 0x3f4f0, 0x3f548,
2039 0x3f560, 0x3f59c,
2040 0x3f5f0, 0x3f6e4,
2041 0x3f6f8, 0x3f7e4,
2042 0x3f7f8, 0x3f7fc,
2043 0x3f814, 0x3f814,
2044 0x3f82c, 0x3f82c,
2045 0x3f880, 0x3f88c,
2046 0x3f8e8, 0x3f8ec,
2047 0x3f900, 0x3f948,
2048 0x3f960, 0x3f99c,
2049 0x3f9f0, 0x3fae4,
2050 0x3faf8, 0x3fb10,
2051 0x3fb28, 0x3fb28,
2052 0x3fb3c, 0x3fb50,
2053 0x3fbf0, 0x3fc10,
2054 0x3fc28, 0x3fc28,
2055 0x3fc3c, 0x3fc50,
2056 0x3fcf0, 0x3fcfc,
2057 0x40000, 0x4000c,
2058 0x40040, 0x40068,
2059 0x40080, 0x40144,
2060 0x40180, 0x4018c,
2061 0x40200, 0x40298,
2062 0x402ac, 0x4033c,
2063 0x403f8, 0x403fc,
2064 0x41300, 0x413c4,
2065 0x41400, 0x4141c,
2066 0x41480, 0x414d0,
2067 0x44000, 0x44078,
2068 0x440c0, 0x44278,
2069 0x442c0, 0x44478,
2070 0x444c0, 0x44678,
2071 0x446c0, 0x44878,
2072 0x448c0, 0x449fc,
2073 0x45000, 0x45068,
2074 0x45080, 0x45084,
2075 0x450a0, 0x450b0,
2076 0x45200, 0x45268,
2077 0x45280, 0x45284,
2078 0x452a0, 0x452b0,
2079 0x460c0, 0x460e4,
2080 0x47000, 0x4708c,
2081 0x47200, 0x47250,
2082 0x47400, 0x47420,
2083 0x47600, 0x47618,
2084 0x47800, 0x47814,
2085 0x48000, 0x4800c,
2086 0x48040, 0x48068,
2087 0x48080, 0x48144,
2088 0x48180, 0x4818c,
2089 0x48200, 0x48298,
2090 0x482ac, 0x4833c,
2091 0x483f8, 0x483fc,
2092 0x49300, 0x493c4,
2093 0x49400, 0x4941c,
2094 0x49480, 0x494d0,
2095 0x4c000, 0x4c078,
2096 0x4c0c0, 0x4c278,
2097 0x4c2c0, 0x4c478,
2098 0x4c4c0, 0x4c678,
2099 0x4c6c0, 0x4c878,
2100 0x4c8c0, 0x4c9fc,
2101 0x4d000, 0x4d068,
2102 0x4d080, 0x4d084,
2103 0x4d0a0, 0x4d0b0,
2104 0x4d200, 0x4d268,
2105 0x4d280, 0x4d284,
2106 0x4d2a0, 0x4d2b0,
2107 0x4e0c0, 0x4e0e4,
2108 0x4f000, 0x4f08c,
2109 0x4f200, 0x4f250,
2110 0x4f400, 0x4f420,
2111 0x4f600, 0x4f618,
2112 0x4f800, 0x4f814,
2113 0x50000, 0x500cc,
2114 0x50400, 0x50400,
2115 0x50800, 0x508cc,
2116 0x50c00, 0x50c00,
2117 0x51000, 0x5101c,
2118 0x51300, 0x51308,
2119 };
2120
b8ff05a9
DM
2121 int i;
2122 struct adapter *ap = netdev2adap(dev);
251f9e88
SR
2123 static const unsigned int *reg_ranges;
2124 int arr_size = 0, buf_size = 0;
2125
d14807dd 2126 if (is_t4(ap->params.chip)) {
251f9e88
SR
2127 reg_ranges = &t4_reg_ranges[0];
2128 arr_size = ARRAY_SIZE(t4_reg_ranges);
2129 buf_size = T4_REGMAP_SIZE;
2130 } else {
2131 reg_ranges = &t5_reg_ranges[0];
2132 arr_size = ARRAY_SIZE(t5_reg_ranges);
2133 buf_size = T5_REGMAP_SIZE;
2134 }
b8ff05a9
DM
2135
2136 regs->version = mk_adap_vers(ap);
2137
251f9e88
SR
2138 memset(buf, 0, buf_size);
2139 for (i = 0; i < arr_size; i += 2)
b8ff05a9
DM
2140 reg_block_dump(ap, buf, reg_ranges[i], reg_ranges[i + 1]);
2141}
2142
2143static int restart_autoneg(struct net_device *dev)
2144{
2145 struct port_info *p = netdev_priv(dev);
2146
2147 if (!netif_running(dev))
2148 return -EAGAIN;
2149 if (p->link_cfg.autoneg != AUTONEG_ENABLE)
2150 return -EINVAL;
060e0c75 2151 t4_restart_aneg(p->adapter, p->adapter->fn, p->tx_chan);
b8ff05a9
DM
2152 return 0;
2153}
2154
c5e06360
DM
2155static int identify_port(struct net_device *dev,
2156 enum ethtool_phys_id_state state)
b8ff05a9 2157{
c5e06360 2158 unsigned int val;
060e0c75
DM
2159 struct adapter *adap = netdev2adap(dev);
2160
c5e06360
DM
2161 if (state == ETHTOOL_ID_ACTIVE)
2162 val = 0xffff;
2163 else if (state == ETHTOOL_ID_INACTIVE)
2164 val = 0;
2165 else
2166 return -EINVAL;
b8ff05a9 2167
c5e06360 2168 return t4_identify_port(adap, adap->fn, netdev2pinfo(dev)->viid, val);
b8ff05a9
DM
2169}
2170
2171static unsigned int from_fw_linkcaps(unsigned int type, unsigned int caps)
2172{
2173 unsigned int v = 0;
2174
a0881cab
DM
2175 if (type == FW_PORT_TYPE_BT_SGMII || type == FW_PORT_TYPE_BT_XFI ||
2176 type == FW_PORT_TYPE_BT_XAUI) {
b8ff05a9
DM
2177 v |= SUPPORTED_TP;
2178 if (caps & FW_PORT_CAP_SPEED_100M)
2179 v |= SUPPORTED_100baseT_Full;
2180 if (caps & FW_PORT_CAP_SPEED_1G)
2181 v |= SUPPORTED_1000baseT_Full;
2182 if (caps & FW_PORT_CAP_SPEED_10G)
2183 v |= SUPPORTED_10000baseT_Full;
2184 } else if (type == FW_PORT_TYPE_KX4 || type == FW_PORT_TYPE_KX) {
2185 v |= SUPPORTED_Backplane;
2186 if (caps & FW_PORT_CAP_SPEED_1G)
2187 v |= SUPPORTED_1000baseKX_Full;
2188 if (caps & FW_PORT_CAP_SPEED_10G)
2189 v |= SUPPORTED_10000baseKX4_Full;
2190 } else if (type == FW_PORT_TYPE_KR)
2191 v |= SUPPORTED_Backplane | SUPPORTED_10000baseKR_Full;
a0881cab 2192 else if (type == FW_PORT_TYPE_BP_AP)
7d5e77aa
DM
2193 v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC |
2194 SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full;
2195 else if (type == FW_PORT_TYPE_BP4_AP)
2196 v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC |
2197 SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full |
2198 SUPPORTED_10000baseKX4_Full;
a0881cab
DM
2199 else if (type == FW_PORT_TYPE_FIBER_XFI ||
2200 type == FW_PORT_TYPE_FIBER_XAUI || type == FW_PORT_TYPE_SFP)
b8ff05a9
DM
2201 v |= SUPPORTED_FIBRE;
2202
2203 if (caps & FW_PORT_CAP_ANEG)
2204 v |= SUPPORTED_Autoneg;
2205 return v;
2206}
2207
2208static unsigned int to_fw_linkcaps(unsigned int caps)
2209{
2210 unsigned int v = 0;
2211
2212 if (caps & ADVERTISED_100baseT_Full)
2213 v |= FW_PORT_CAP_SPEED_100M;
2214 if (caps & ADVERTISED_1000baseT_Full)
2215 v |= FW_PORT_CAP_SPEED_1G;
2216 if (caps & ADVERTISED_10000baseT_Full)
2217 v |= FW_PORT_CAP_SPEED_10G;
2218 return v;
2219}
2220
2221static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2222{
2223 const struct port_info *p = netdev_priv(dev);
2224
2225 if (p->port_type == FW_PORT_TYPE_BT_SGMII ||
a0881cab 2226 p->port_type == FW_PORT_TYPE_BT_XFI ||
b8ff05a9
DM
2227 p->port_type == FW_PORT_TYPE_BT_XAUI)
2228 cmd->port = PORT_TP;
a0881cab
DM
2229 else if (p->port_type == FW_PORT_TYPE_FIBER_XFI ||
2230 p->port_type == FW_PORT_TYPE_FIBER_XAUI)
b8ff05a9 2231 cmd->port = PORT_FIBRE;
a0881cab
DM
2232 else if (p->port_type == FW_PORT_TYPE_SFP) {
2233 if (p->mod_type == FW_PORT_MOD_TYPE_TWINAX_PASSIVE ||
2234 p->mod_type == FW_PORT_MOD_TYPE_TWINAX_ACTIVE)
2235 cmd->port = PORT_DA;
2236 else
2237 cmd->port = PORT_FIBRE;
2238 } else
b8ff05a9
DM
2239 cmd->port = PORT_OTHER;
2240
2241 if (p->mdio_addr >= 0) {
2242 cmd->phy_address = p->mdio_addr;
2243 cmd->transceiver = XCVR_EXTERNAL;
2244 cmd->mdio_support = p->port_type == FW_PORT_TYPE_BT_SGMII ?
2245 MDIO_SUPPORTS_C22 : MDIO_SUPPORTS_C45;
2246 } else {
2247 cmd->phy_address = 0; /* not really, but no better option */
2248 cmd->transceiver = XCVR_INTERNAL;
2249 cmd->mdio_support = 0;
2250 }
2251
2252 cmd->supported = from_fw_linkcaps(p->port_type, p->link_cfg.supported);
2253 cmd->advertising = from_fw_linkcaps(p->port_type,
2254 p->link_cfg.advertising);
70739497
DD
2255 ethtool_cmd_speed_set(cmd,
2256 netif_carrier_ok(dev) ? p->link_cfg.speed : 0);
b8ff05a9
DM
2257 cmd->duplex = DUPLEX_FULL;
2258 cmd->autoneg = p->link_cfg.autoneg;
2259 cmd->maxtxpkt = 0;
2260 cmd->maxrxpkt = 0;
2261 return 0;
2262}
2263
2264static unsigned int speed_to_caps(int speed)
2265{
2266 if (speed == SPEED_100)
2267 return FW_PORT_CAP_SPEED_100M;
2268 if (speed == SPEED_1000)
2269 return FW_PORT_CAP_SPEED_1G;
2270 if (speed == SPEED_10000)
2271 return FW_PORT_CAP_SPEED_10G;
2272 return 0;
2273}
2274
2275static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2276{
2277 unsigned int cap;
2278 struct port_info *p = netdev_priv(dev);
2279 struct link_config *lc = &p->link_cfg;
25db0338 2280 u32 speed = ethtool_cmd_speed(cmd);
b8ff05a9
DM
2281
2282 if (cmd->duplex != DUPLEX_FULL) /* only full-duplex supported */
2283 return -EINVAL;
2284
2285 if (!(lc->supported & FW_PORT_CAP_ANEG)) {
2286 /*
2287 * PHY offers a single speed. See if that's what's
2288 * being requested.
2289 */
2290 if (cmd->autoneg == AUTONEG_DISABLE &&
25db0338
DD
2291 (lc->supported & speed_to_caps(speed)))
2292 return 0;
b8ff05a9
DM
2293 return -EINVAL;
2294 }
2295
2296 if (cmd->autoneg == AUTONEG_DISABLE) {
25db0338 2297 cap = speed_to_caps(speed);
b8ff05a9 2298
25db0338
DD
2299 if (!(lc->supported & cap) || (speed == SPEED_1000) ||
2300 (speed == SPEED_10000))
b8ff05a9
DM
2301 return -EINVAL;
2302 lc->requested_speed = cap;
2303 lc->advertising = 0;
2304 } else {
2305 cap = to_fw_linkcaps(cmd->advertising);
2306 if (!(lc->supported & cap))
2307 return -EINVAL;
2308 lc->requested_speed = 0;
2309 lc->advertising = cap | FW_PORT_CAP_ANEG;
2310 }
2311 lc->autoneg = cmd->autoneg;
2312
2313 if (netif_running(dev))
060e0c75
DM
2314 return t4_link_start(p->adapter, p->adapter->fn, p->tx_chan,
2315 lc);
b8ff05a9
DM
2316 return 0;
2317}
2318
2319static void get_pauseparam(struct net_device *dev,
2320 struct ethtool_pauseparam *epause)
2321{
2322 struct port_info *p = netdev_priv(dev);
2323
2324 epause->autoneg = (p->link_cfg.requested_fc & PAUSE_AUTONEG) != 0;
2325 epause->rx_pause = (p->link_cfg.fc & PAUSE_RX) != 0;
2326 epause->tx_pause = (p->link_cfg.fc & PAUSE_TX) != 0;
2327}
2328
2329static int set_pauseparam(struct net_device *dev,
2330 struct ethtool_pauseparam *epause)
2331{
2332 struct port_info *p = netdev_priv(dev);
2333 struct link_config *lc = &p->link_cfg;
2334
2335 if (epause->autoneg == AUTONEG_DISABLE)
2336 lc->requested_fc = 0;
2337 else if (lc->supported & FW_PORT_CAP_ANEG)
2338 lc->requested_fc = PAUSE_AUTONEG;
2339 else
2340 return -EINVAL;
2341
2342 if (epause->rx_pause)
2343 lc->requested_fc |= PAUSE_RX;
2344 if (epause->tx_pause)
2345 lc->requested_fc |= PAUSE_TX;
2346 if (netif_running(dev))
060e0c75
DM
2347 return t4_link_start(p->adapter, p->adapter->fn, p->tx_chan,
2348 lc);
b8ff05a9
DM
2349 return 0;
2350}
2351
b8ff05a9
DM
2352static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
2353{
2354 const struct port_info *pi = netdev_priv(dev);
2355 const struct sge *s = &pi->adapter->sge;
2356
2357 e->rx_max_pending = MAX_RX_BUFFERS;
2358 e->rx_mini_max_pending = MAX_RSPQ_ENTRIES;
2359 e->rx_jumbo_max_pending = 0;
2360 e->tx_max_pending = MAX_TXQ_ENTRIES;
2361
2362 e->rx_pending = s->ethrxq[pi->first_qset].fl.size - 8;
2363 e->rx_mini_pending = s->ethrxq[pi->first_qset].rspq.size;
2364 e->rx_jumbo_pending = 0;
2365 e->tx_pending = s->ethtxq[pi->first_qset].q.size;
2366}
2367
2368static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
2369{
2370 int i;
2371 const struct port_info *pi = netdev_priv(dev);
2372 struct adapter *adapter = pi->adapter;
2373 struct sge *s = &adapter->sge;
2374
2375 if (e->rx_pending > MAX_RX_BUFFERS || e->rx_jumbo_pending ||
2376 e->tx_pending > MAX_TXQ_ENTRIES ||
2377 e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
2378 e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
2379 e->rx_pending < MIN_FL_ENTRIES || e->tx_pending < MIN_TXQ_ENTRIES)
2380 return -EINVAL;
2381
2382 if (adapter->flags & FULL_INIT_DONE)
2383 return -EBUSY;
2384
2385 for (i = 0; i < pi->nqsets; ++i) {
2386 s->ethtxq[pi->first_qset + i].q.size = e->tx_pending;
2387 s->ethrxq[pi->first_qset + i].fl.size = e->rx_pending + 8;
2388 s->ethrxq[pi->first_qset + i].rspq.size = e->rx_mini_pending;
2389 }
2390 return 0;
2391}
2392
2393static int closest_timer(const struct sge *s, int time)
2394{
2395 int i, delta, match = 0, min_delta = INT_MAX;
2396
2397 for (i = 0; i < ARRAY_SIZE(s->timer_val); i++) {
2398 delta = time - s->timer_val[i];
2399 if (delta < 0)
2400 delta = -delta;
2401 if (delta < min_delta) {
2402 min_delta = delta;
2403 match = i;
2404 }
2405 }
2406 return match;
2407}
2408
2409static int closest_thres(const struct sge *s, int thres)
2410{
2411 int i, delta, match = 0, min_delta = INT_MAX;
2412
2413 for (i = 0; i < ARRAY_SIZE(s->counter_val); i++) {
2414 delta = thres - s->counter_val[i];
2415 if (delta < 0)
2416 delta = -delta;
2417 if (delta < min_delta) {
2418 min_delta = delta;
2419 match = i;
2420 }
2421 }
2422 return match;
2423}
2424
2425/*
2426 * Return a queue's interrupt hold-off time in us. 0 means no timer.
2427 */
2428static unsigned int qtimer_val(const struct adapter *adap,
2429 const struct sge_rspq *q)
2430{
2431 unsigned int idx = q->intr_params >> 1;
2432
2433 return idx < SGE_NTIMERS ? adap->sge.timer_val[idx] : 0;
2434}
2435
2436/**
2437 * set_rxq_intr_params - set a queue's interrupt holdoff parameters
2438 * @adap: the adapter
2439 * @q: the Rx queue
2440 * @us: the hold-off time in us, or 0 to disable timer
2441 * @cnt: the hold-off packet count, or 0 to disable counter
2442 *
2443 * Sets an Rx queue's interrupt hold-off time and packet count. At least
2444 * one of the two needs to be enabled for the queue to generate interrupts.
2445 */
2446static int set_rxq_intr_params(struct adapter *adap, struct sge_rspq *q,
2447 unsigned int us, unsigned int cnt)
2448{
2449 if ((us | cnt) == 0)
2450 cnt = 1;
2451
2452 if (cnt) {
2453 int err;
2454 u32 v, new_idx;
2455
2456 new_idx = closest_thres(&adap->sge, cnt);
2457 if (q->desc && q->pktcnt_idx != new_idx) {
2458 /* the queue has already been created, update it */
2459 v = FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) |
2460 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH) |
2461 FW_PARAMS_PARAM_YZ(q->cntxt_id);
060e0c75
DM
2462 err = t4_set_params(adap, adap->fn, adap->fn, 0, 1, &v,
2463 &new_idx);
b8ff05a9
DM
2464 if (err)
2465 return err;
2466 }
2467 q->pktcnt_idx = new_idx;
2468 }
2469
2470 us = us == 0 ? 6 : closest_timer(&adap->sge, us);
2471 q->intr_params = QINTR_TIMER_IDX(us) | (cnt > 0 ? QINTR_CNT_EN : 0);
2472 return 0;
2473}
2474
2475static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
2476{
2477 const struct port_info *pi = netdev_priv(dev);
2478 struct adapter *adap = pi->adapter;
d4fc9dc2
TLSC
2479 struct sge_rspq *q;
2480 int i;
2481 int r = 0;
2482
2483 for (i = pi->first_qset; i < pi->first_qset + pi->nqsets; i++) {
2484 q = &adap->sge.ethrxq[i].rspq;
2485 r = set_rxq_intr_params(adap, q, c->rx_coalesce_usecs,
2486 c->rx_max_coalesced_frames);
2487 if (r) {
2488 dev_err(&dev->dev, "failed to set coalesce %d\n", r);
2489 break;
2490 }
2491 }
2492 return r;
b8ff05a9
DM
2493}
2494
2495static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
2496{
2497 const struct port_info *pi = netdev_priv(dev);
2498 const struct adapter *adap = pi->adapter;
2499 const struct sge_rspq *rq = &adap->sge.ethrxq[pi->first_qset].rspq;
2500
2501 c->rx_coalesce_usecs = qtimer_val(adap, rq);
2502 c->rx_max_coalesced_frames = (rq->intr_params & QINTR_CNT_EN) ?
2503 adap->sge.counter_val[rq->pktcnt_idx] : 0;
2504 return 0;
2505}
2506
1478b3ee
DM
2507/**
2508 * eeprom_ptov - translate a physical EEPROM address to virtual
2509 * @phys_addr: the physical EEPROM address
2510 * @fn: the PCI function number
2511 * @sz: size of function-specific area
2512 *
2513 * Translate a physical EEPROM address to virtual. The first 1K is
2514 * accessed through virtual addresses starting at 31K, the rest is
2515 * accessed through virtual addresses starting at 0.
2516 *
2517 * The mapping is as follows:
2518 * [0..1K) -> [31K..32K)
2519 * [1K..1K+A) -> [31K-A..31K)
2520 * [1K+A..ES) -> [0..ES-A-1K)
2521 *
2522 * where A = @fn * @sz, and ES = EEPROM size.
b8ff05a9 2523 */
1478b3ee 2524static int eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz)
b8ff05a9 2525{
1478b3ee 2526 fn *= sz;
b8ff05a9
DM
2527 if (phys_addr < 1024)
2528 return phys_addr + (31 << 10);
1478b3ee
DM
2529 if (phys_addr < 1024 + fn)
2530 return 31744 - fn + phys_addr - 1024;
b8ff05a9 2531 if (phys_addr < EEPROMSIZE)
1478b3ee 2532 return phys_addr - 1024 - fn;
b8ff05a9
DM
2533 return -EINVAL;
2534}
2535
2536/*
2537 * The next two routines implement eeprom read/write from physical addresses.
b8ff05a9
DM
2538 */
2539static int eeprom_rd_phys(struct adapter *adap, unsigned int phys_addr, u32 *v)
2540{
1478b3ee 2541 int vaddr = eeprom_ptov(phys_addr, adap->fn, EEPROMPFSIZE);
b8ff05a9
DM
2542
2543 if (vaddr >= 0)
2544 vaddr = pci_read_vpd(adap->pdev, vaddr, sizeof(u32), v);
2545 return vaddr < 0 ? vaddr : 0;
2546}
2547
2548static int eeprom_wr_phys(struct adapter *adap, unsigned int phys_addr, u32 v)
2549{
1478b3ee 2550 int vaddr = eeprom_ptov(phys_addr, adap->fn, EEPROMPFSIZE);
b8ff05a9
DM
2551
2552 if (vaddr >= 0)
2553 vaddr = pci_write_vpd(adap->pdev, vaddr, sizeof(u32), &v);
2554 return vaddr < 0 ? vaddr : 0;
2555}
2556
2557#define EEPROM_MAGIC 0x38E2F10C
2558
2559static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
2560 u8 *data)
2561{
2562 int i, err = 0;
2563 struct adapter *adapter = netdev2adap(dev);
2564
2565 u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
2566 if (!buf)
2567 return -ENOMEM;
2568
2569 e->magic = EEPROM_MAGIC;
2570 for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
2571 err = eeprom_rd_phys(adapter, i, (u32 *)&buf[i]);
2572
2573 if (!err)
2574 memcpy(data, buf + e->offset, e->len);
2575 kfree(buf);
2576 return err;
2577}
2578
2579static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
2580 u8 *data)
2581{
2582 u8 *buf;
2583 int err = 0;
2584 u32 aligned_offset, aligned_len, *p;
2585 struct adapter *adapter = netdev2adap(dev);
2586
2587 if (eeprom->magic != EEPROM_MAGIC)
2588 return -EINVAL;
2589
2590 aligned_offset = eeprom->offset & ~3;
2591 aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
2592
1478b3ee
DM
2593 if (adapter->fn > 0) {
2594 u32 start = 1024 + adapter->fn * EEPROMPFSIZE;
2595
2596 if (aligned_offset < start ||
2597 aligned_offset + aligned_len > start + EEPROMPFSIZE)
2598 return -EPERM;
2599 }
2600
b8ff05a9
DM
2601 if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
2602 /*
2603 * RMW possibly needed for first or last words.
2604 */
2605 buf = kmalloc(aligned_len, GFP_KERNEL);
2606 if (!buf)
2607 return -ENOMEM;
2608 err = eeprom_rd_phys(adapter, aligned_offset, (u32 *)buf);
2609 if (!err && aligned_len > 4)
2610 err = eeprom_rd_phys(adapter,
2611 aligned_offset + aligned_len - 4,
2612 (u32 *)&buf[aligned_len - 4]);
2613 if (err)
2614 goto out;
2615 memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
2616 } else
2617 buf = data;
2618
2619 err = t4_seeprom_wp(adapter, false);
2620 if (err)
2621 goto out;
2622
2623 for (p = (u32 *)buf; !err && aligned_len; aligned_len -= 4, p++) {
2624 err = eeprom_wr_phys(adapter, aligned_offset, *p);
2625 aligned_offset += 4;
2626 }
2627
2628 if (!err)
2629 err = t4_seeprom_wp(adapter, true);
2630out:
2631 if (buf != data)
2632 kfree(buf);
2633 return err;
2634}
2635
2636static int set_flash(struct net_device *netdev, struct ethtool_flash *ef)
2637{
2638 int ret;
2639 const struct firmware *fw;
2640 struct adapter *adap = netdev2adap(netdev);
2641
2642 ef->data[sizeof(ef->data) - 1] = '\0';
2643 ret = request_firmware(&fw, ef->data, adap->pdev_dev);
2644 if (ret < 0)
2645 return ret;
2646
2647 ret = t4_load_fw(adap, fw->data, fw->size);
2648 release_firmware(fw);
2649 if (!ret)
2650 dev_info(adap->pdev_dev, "loaded firmware %s\n", ef->data);
2651 return ret;
2652}
2653
2654#define WOL_SUPPORTED (WAKE_BCAST | WAKE_MAGIC)
2655#define BCAST_CRC 0xa0ccc1a6
2656
2657static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2658{
2659 wol->supported = WAKE_BCAST | WAKE_MAGIC;
2660 wol->wolopts = netdev2adap(dev)->wol;
2661 memset(&wol->sopass, 0, sizeof(wol->sopass));
2662}
2663
2664static int set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2665{
2666 int err = 0;
2667 struct port_info *pi = netdev_priv(dev);
2668
2669 if (wol->wolopts & ~WOL_SUPPORTED)
2670 return -EINVAL;
2671 t4_wol_magic_enable(pi->adapter, pi->tx_chan,
2672 (wol->wolopts & WAKE_MAGIC) ? dev->dev_addr : NULL);
2673 if (wol->wolopts & WAKE_BCAST) {
2674 err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0xfe, ~0ULL,
2675 ~0ULL, 0, false);
2676 if (!err)
2677 err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 1,
2678 ~6ULL, ~0ULL, BCAST_CRC, true);
2679 } else
2680 t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0, 0, 0, 0, false);
2681 return err;
2682}
2683
c8f44aff 2684static int cxgb_set_features(struct net_device *dev, netdev_features_t features)
87b6cf51 2685{
2ed28baa 2686 const struct port_info *pi = netdev_priv(dev);
c8f44aff 2687 netdev_features_t changed = dev->features ^ features;
19ecae2c 2688 int err;
19ecae2c 2689
f646968f 2690 if (!(changed & NETIF_F_HW_VLAN_CTAG_RX))
2ed28baa 2691 return 0;
19ecae2c 2692
2ed28baa
MM
2693 err = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, -1,
2694 -1, -1, -1,
f646968f 2695 !!(features & NETIF_F_HW_VLAN_CTAG_RX), true);
2ed28baa 2696 if (unlikely(err))
f646968f 2697 dev->features = features ^ NETIF_F_HW_VLAN_CTAG_RX;
19ecae2c 2698 return err;
87b6cf51
DM
2699}
2700
7850f63f 2701static u32 get_rss_table_size(struct net_device *dev)
671b0060
DM
2702{
2703 const struct port_info *pi = netdev_priv(dev);
671b0060 2704
7850f63f
BH
2705 return pi->rss_size;
2706}
2707
2708static int get_rss_table(struct net_device *dev, u32 *p)
2709{
2710 const struct port_info *pi = netdev_priv(dev);
2711 unsigned int n = pi->rss_size;
2712
671b0060 2713 while (n--)
7850f63f 2714 p[n] = pi->rss[n];
671b0060
DM
2715 return 0;
2716}
2717
7850f63f 2718static int set_rss_table(struct net_device *dev, const u32 *p)
671b0060
DM
2719{
2720 unsigned int i;
2721 struct port_info *pi = netdev_priv(dev);
2722
7850f63f
BH
2723 for (i = 0; i < pi->rss_size; i++)
2724 pi->rss[i] = p[i];
671b0060
DM
2725 if (pi->adapter->flags & FULL_INIT_DONE)
2726 return write_rss(pi, pi->rss);
2727 return 0;
2728}
2729
2730static int get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
815c7db5 2731 u32 *rules)
671b0060 2732{
f796564a
DM
2733 const struct port_info *pi = netdev_priv(dev);
2734
671b0060 2735 switch (info->cmd) {
f796564a
DM
2736 case ETHTOOL_GRXFH: {
2737 unsigned int v = pi->rss_mode;
2738
2739 info->data = 0;
2740 switch (info->flow_type) {
2741 case TCP_V4_FLOW:
2742 if (v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN)
2743 info->data = RXH_IP_SRC | RXH_IP_DST |
2744 RXH_L4_B_0_1 | RXH_L4_B_2_3;
2745 else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
2746 info->data = RXH_IP_SRC | RXH_IP_DST;
2747 break;
2748 case UDP_V4_FLOW:
2749 if ((v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN) &&
2750 (v & FW_RSS_VI_CONFIG_CMD_UDPEN))
2751 info->data = RXH_IP_SRC | RXH_IP_DST |
2752 RXH_L4_B_0_1 | RXH_L4_B_2_3;
2753 else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
2754 info->data = RXH_IP_SRC | RXH_IP_DST;
2755 break;
2756 case SCTP_V4_FLOW:
2757 case AH_ESP_V4_FLOW:
2758 case IPV4_FLOW:
2759 if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
2760 info->data = RXH_IP_SRC | RXH_IP_DST;
2761 break;
2762 case TCP_V6_FLOW:
2763 if (v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN)
2764 info->data = RXH_IP_SRC | RXH_IP_DST |
2765 RXH_L4_B_0_1 | RXH_L4_B_2_3;
2766 else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
2767 info->data = RXH_IP_SRC | RXH_IP_DST;
2768 break;
2769 case UDP_V6_FLOW:
2770 if ((v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN) &&
2771 (v & FW_RSS_VI_CONFIG_CMD_UDPEN))
2772 info->data = RXH_IP_SRC | RXH_IP_DST |
2773 RXH_L4_B_0_1 | RXH_L4_B_2_3;
2774 else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
2775 info->data = RXH_IP_SRC | RXH_IP_DST;
2776 break;
2777 case SCTP_V6_FLOW:
2778 case AH_ESP_V6_FLOW:
2779 case IPV6_FLOW:
2780 if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
2781 info->data = RXH_IP_SRC | RXH_IP_DST;
2782 break;
2783 }
2784 return 0;
2785 }
671b0060 2786 case ETHTOOL_GRXRINGS:
f796564a 2787 info->data = pi->nqsets;
671b0060
DM
2788 return 0;
2789 }
2790 return -EOPNOTSUPP;
2791}
2792
9b07be4b 2793static const struct ethtool_ops cxgb_ethtool_ops = {
b8ff05a9
DM
2794 .get_settings = get_settings,
2795 .set_settings = set_settings,
2796 .get_drvinfo = get_drvinfo,
2797 .get_msglevel = get_msglevel,
2798 .set_msglevel = set_msglevel,
2799 .get_ringparam = get_sge_param,
2800 .set_ringparam = set_sge_param,
2801 .get_coalesce = get_coalesce,
2802 .set_coalesce = set_coalesce,
2803 .get_eeprom_len = get_eeprom_len,
2804 .get_eeprom = get_eeprom,
2805 .set_eeprom = set_eeprom,
2806 .get_pauseparam = get_pauseparam,
2807 .set_pauseparam = set_pauseparam,
b8ff05a9
DM
2808 .get_link = ethtool_op_get_link,
2809 .get_strings = get_strings,
c5e06360 2810 .set_phys_id = identify_port,
b8ff05a9
DM
2811 .nway_reset = restart_autoneg,
2812 .get_sset_count = get_sset_count,
2813 .get_ethtool_stats = get_stats,
2814 .get_regs_len = get_regs_len,
2815 .get_regs = get_regs,
2816 .get_wol = get_wol,
2817 .set_wol = set_wol,
671b0060 2818 .get_rxnfc = get_rxnfc,
7850f63f 2819 .get_rxfh_indir_size = get_rss_table_size,
671b0060
DM
2820 .get_rxfh_indir = get_rss_table,
2821 .set_rxfh_indir = set_rss_table,
b8ff05a9
DM
2822 .flash_device = set_flash,
2823};
2824
2825/*
2826 * debugfs support
2827 */
b8ff05a9
DM
2828static ssize_t mem_read(struct file *file, char __user *buf, size_t count,
2829 loff_t *ppos)
2830{
2831 loff_t pos = *ppos;
496ad9aa 2832 loff_t avail = file_inode(file)->i_size;
b8ff05a9
DM
2833 unsigned int mem = (uintptr_t)file->private_data & 3;
2834 struct adapter *adap = file->private_data - mem;
2835
2836 if (pos < 0)
2837 return -EINVAL;
2838 if (pos >= avail)
2839 return 0;
2840 if (count > avail - pos)
2841 count = avail - pos;
2842
2843 while (count) {
2844 size_t len;
2845 int ret, ofst;
2846 __be32 data[16];
2847
19dd37ba
SR
2848 if ((mem == MEM_MC) || (mem == MEM_MC1))
2849 ret = t4_mc_read(adap, mem % MEM_MC, pos, data, NULL);
b8ff05a9
DM
2850 else
2851 ret = t4_edc_read(adap, mem, pos, data, NULL);
2852 if (ret)
2853 return ret;
2854
2855 ofst = pos % sizeof(data);
2856 len = min(count, sizeof(data) - ofst);
2857 if (copy_to_user(buf, (u8 *)data + ofst, len))
2858 return -EFAULT;
2859
2860 buf += len;
2861 pos += len;
2862 count -= len;
2863 }
2864 count = pos - *ppos;
2865 *ppos = pos;
2866 return count;
2867}
2868
2869static const struct file_operations mem_debugfs_fops = {
2870 .owner = THIS_MODULE,
234e3405 2871 .open = simple_open,
b8ff05a9 2872 .read = mem_read,
6038f373 2873 .llseek = default_llseek,
b8ff05a9
DM
2874};
2875
91744948 2876static void add_debugfs_mem(struct adapter *adap, const char *name,
1dd06ae8 2877 unsigned int idx, unsigned int size_mb)
b8ff05a9
DM
2878{
2879 struct dentry *de;
2880
2881 de = debugfs_create_file(name, S_IRUSR, adap->debugfs_root,
2882 (void *)adap + idx, &mem_debugfs_fops);
2883 if (de && de->d_inode)
2884 de->d_inode->i_size = size_mb << 20;
2885}
2886
91744948 2887static int setup_debugfs(struct adapter *adap)
b8ff05a9
DM
2888{
2889 int i;
19dd37ba 2890 u32 size;
b8ff05a9
DM
2891
2892 if (IS_ERR_OR_NULL(adap->debugfs_root))
2893 return -1;
2894
2895 i = t4_read_reg(adap, MA_TARGET_MEM_ENABLE);
19dd37ba
SR
2896 if (i & EDRAM0_ENABLE) {
2897 size = t4_read_reg(adap, MA_EDRAM0_BAR);
2898 add_debugfs_mem(adap, "edc0", MEM_EDC0, EDRAM_SIZE_GET(size));
2899 }
2900 if (i & EDRAM1_ENABLE) {
2901 size = t4_read_reg(adap, MA_EDRAM1_BAR);
2902 add_debugfs_mem(adap, "edc1", MEM_EDC1, EDRAM_SIZE_GET(size));
2903 }
d14807dd 2904 if (is_t4(adap->params.chip)) {
19dd37ba
SR
2905 size = t4_read_reg(adap, MA_EXT_MEMORY_BAR);
2906 if (i & EXT_MEM_ENABLE)
2907 add_debugfs_mem(adap, "mc", MEM_MC,
2908 EXT_MEM_SIZE_GET(size));
2909 } else {
2910 if (i & EXT_MEM_ENABLE) {
2911 size = t4_read_reg(adap, MA_EXT_MEMORY_BAR);
2912 add_debugfs_mem(adap, "mc0", MEM_MC0,
2913 EXT_MEM_SIZE_GET(size));
2914 }
2915 if (i & EXT_MEM1_ENABLE) {
2916 size = t4_read_reg(adap, MA_EXT_MEMORY1_BAR);
2917 add_debugfs_mem(adap, "mc1", MEM_MC1,
2918 EXT_MEM_SIZE_GET(size));
2919 }
2920 }
b8ff05a9
DM
2921 if (adap->l2t)
2922 debugfs_create_file("l2t", S_IRUSR, adap->debugfs_root, adap,
2923 &t4_l2t_fops);
2924 return 0;
2925}
2926
2927/*
2928 * upper-layer driver support
2929 */
2930
2931/*
2932 * Allocate an active-open TID and set it to the supplied value.
2933 */
2934int cxgb4_alloc_atid(struct tid_info *t, void *data)
2935{
2936 int atid = -1;
2937
2938 spin_lock_bh(&t->atid_lock);
2939 if (t->afree) {
2940 union aopen_entry *p = t->afree;
2941
f2b7e78d 2942 atid = (p - t->atid_tab) + t->atid_base;
b8ff05a9
DM
2943 t->afree = p->next;
2944 p->data = data;
2945 t->atids_in_use++;
2946 }
2947 spin_unlock_bh(&t->atid_lock);
2948 return atid;
2949}
2950EXPORT_SYMBOL(cxgb4_alloc_atid);
2951
2952/*
2953 * Release an active-open TID.
2954 */
2955void cxgb4_free_atid(struct tid_info *t, unsigned int atid)
2956{
f2b7e78d 2957 union aopen_entry *p = &t->atid_tab[atid - t->atid_base];
b8ff05a9
DM
2958
2959 spin_lock_bh(&t->atid_lock);
2960 p->next = t->afree;
2961 t->afree = p;
2962 t->atids_in_use--;
2963 spin_unlock_bh(&t->atid_lock);
2964}
2965EXPORT_SYMBOL(cxgb4_free_atid);
2966
2967/*
2968 * Allocate a server TID and set it to the supplied value.
2969 */
2970int cxgb4_alloc_stid(struct tid_info *t, int family, void *data)
2971{
2972 int stid;
2973
2974 spin_lock_bh(&t->stid_lock);
2975 if (family == PF_INET) {
2976 stid = find_first_zero_bit(t->stid_bmap, t->nstids);
2977 if (stid < t->nstids)
2978 __set_bit(stid, t->stid_bmap);
2979 else
2980 stid = -1;
2981 } else {
2982 stid = bitmap_find_free_region(t->stid_bmap, t->nstids, 2);
2983 if (stid < 0)
2984 stid = -1;
2985 }
2986 if (stid >= 0) {
2987 t->stid_tab[stid].data = data;
2988 stid += t->stid_base;
2989 t->stids_in_use++;
2990 }
2991 spin_unlock_bh(&t->stid_lock);
2992 return stid;
2993}
2994EXPORT_SYMBOL(cxgb4_alloc_stid);
2995
dca4faeb
VP
2996/* Allocate a server filter TID and set it to the supplied value.
2997 */
2998int cxgb4_alloc_sftid(struct tid_info *t, int family, void *data)
2999{
3000 int stid;
3001
3002 spin_lock_bh(&t->stid_lock);
3003 if (family == PF_INET) {
3004 stid = find_next_zero_bit(t->stid_bmap,
3005 t->nstids + t->nsftids, t->nstids);
3006 if (stid < (t->nstids + t->nsftids))
3007 __set_bit(stid, t->stid_bmap);
3008 else
3009 stid = -1;
3010 } else {
3011 stid = -1;
3012 }
3013 if (stid >= 0) {
3014 t->stid_tab[stid].data = data;
470c60c4
KS
3015 stid -= t->nstids;
3016 stid += t->sftid_base;
dca4faeb
VP
3017 t->stids_in_use++;
3018 }
3019 spin_unlock_bh(&t->stid_lock);
3020 return stid;
3021}
3022EXPORT_SYMBOL(cxgb4_alloc_sftid);
3023
3024/* Release a server TID.
b8ff05a9
DM
3025 */
3026void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family)
3027{
470c60c4
KS
3028 /* Is it a server filter TID? */
3029 if (t->nsftids && (stid >= t->sftid_base)) {
3030 stid -= t->sftid_base;
3031 stid += t->nstids;
3032 } else {
3033 stid -= t->stid_base;
3034 }
3035
b8ff05a9
DM
3036 spin_lock_bh(&t->stid_lock);
3037 if (family == PF_INET)
3038 __clear_bit(stid, t->stid_bmap);
3039 else
3040 bitmap_release_region(t->stid_bmap, stid, 2);
3041 t->stid_tab[stid].data = NULL;
3042 t->stids_in_use--;
3043 spin_unlock_bh(&t->stid_lock);
3044}
3045EXPORT_SYMBOL(cxgb4_free_stid);
3046
3047/*
3048 * Populate a TID_RELEASE WR. Caller must properly size the skb.
3049 */
3050static void mk_tid_release(struct sk_buff *skb, unsigned int chan,
3051 unsigned int tid)
3052{
3053 struct cpl_tid_release *req;
3054
3055 set_wr_txq(skb, CPL_PRIORITY_SETUP, chan);
3056 req = (struct cpl_tid_release *)__skb_put(skb, sizeof(*req));
3057 INIT_TP_WR(req, tid);
3058 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, tid));
3059}
3060
3061/*
3062 * Queue a TID release request and if necessary schedule a work queue to
3063 * process it.
3064 */
31b9c19b 3065static void cxgb4_queue_tid_release(struct tid_info *t, unsigned int chan,
3066 unsigned int tid)
b8ff05a9
DM
3067{
3068 void **p = &t->tid_tab[tid];
3069 struct adapter *adap = container_of(t, struct adapter, tids);
3070
3071 spin_lock_bh(&adap->tid_release_lock);
3072 *p = adap->tid_release_head;
3073 /* Low 2 bits encode the Tx channel number */
3074 adap->tid_release_head = (void **)((uintptr_t)p | chan);
3075 if (!adap->tid_release_task_busy) {
3076 adap->tid_release_task_busy = true;
3069ee9b 3077 queue_work(workq, &adap->tid_release_task);
b8ff05a9
DM
3078 }
3079 spin_unlock_bh(&adap->tid_release_lock);
3080}
b8ff05a9
DM
3081
3082/*
3083 * Process the list of pending TID release requests.
3084 */
3085static void process_tid_release_list(struct work_struct *work)
3086{
3087 struct sk_buff *skb;
3088 struct adapter *adap;
3089
3090 adap = container_of(work, struct adapter, tid_release_task);
3091
3092 spin_lock_bh(&adap->tid_release_lock);
3093 while (adap->tid_release_head) {
3094 void **p = adap->tid_release_head;
3095 unsigned int chan = (uintptr_t)p & 3;
3096 p = (void *)p - chan;
3097
3098 adap->tid_release_head = *p;
3099 *p = NULL;
3100 spin_unlock_bh(&adap->tid_release_lock);
3101
3102 while (!(skb = alloc_skb(sizeof(struct cpl_tid_release),
3103 GFP_KERNEL)))
3104 schedule_timeout_uninterruptible(1);
3105
3106 mk_tid_release(skb, chan, p - adap->tids.tid_tab);
3107 t4_ofld_send(adap, skb);
3108 spin_lock_bh(&adap->tid_release_lock);
3109 }
3110 adap->tid_release_task_busy = false;
3111 spin_unlock_bh(&adap->tid_release_lock);
3112}
3113
3114/*
3115 * Release a TID and inform HW. If we are unable to allocate the release
3116 * message we defer to a work queue.
3117 */
3118void cxgb4_remove_tid(struct tid_info *t, unsigned int chan, unsigned int tid)
3119{
3120 void *old;
3121 struct sk_buff *skb;
3122 struct adapter *adap = container_of(t, struct adapter, tids);
3123
3124 old = t->tid_tab[tid];
3125 skb = alloc_skb(sizeof(struct cpl_tid_release), GFP_ATOMIC);
3126 if (likely(skb)) {
3127 t->tid_tab[tid] = NULL;
3128 mk_tid_release(skb, chan, tid);
3129 t4_ofld_send(adap, skb);
3130 } else
3131 cxgb4_queue_tid_release(t, chan, tid);
3132 if (old)
3133 atomic_dec(&t->tids_in_use);
3134}
3135EXPORT_SYMBOL(cxgb4_remove_tid);
3136
3137/*
3138 * Allocate and initialize the TID tables. Returns 0 on success.
3139 */
3140static int tid_init(struct tid_info *t)
3141{
3142 size_t size;
f2b7e78d 3143 unsigned int stid_bmap_size;
b8ff05a9 3144 unsigned int natids = t->natids;
b6f8eaec 3145 struct adapter *adap = container_of(t, struct adapter, tids);
b8ff05a9 3146
dca4faeb 3147 stid_bmap_size = BITS_TO_LONGS(t->nstids + t->nsftids);
f2b7e78d
VP
3148 size = t->ntids * sizeof(*t->tid_tab) +
3149 natids * sizeof(*t->atid_tab) +
b8ff05a9 3150 t->nstids * sizeof(*t->stid_tab) +
dca4faeb 3151 t->nsftids * sizeof(*t->stid_tab) +
f2b7e78d 3152 stid_bmap_size * sizeof(long) +
dca4faeb
VP
3153 t->nftids * sizeof(*t->ftid_tab) +
3154 t->nsftids * sizeof(*t->ftid_tab);
f2b7e78d 3155
b8ff05a9
DM
3156 t->tid_tab = t4_alloc_mem(size);
3157 if (!t->tid_tab)
3158 return -ENOMEM;
3159
3160 t->atid_tab = (union aopen_entry *)&t->tid_tab[t->ntids];
3161 t->stid_tab = (struct serv_entry *)&t->atid_tab[natids];
dca4faeb 3162 t->stid_bmap = (unsigned long *)&t->stid_tab[t->nstids + t->nsftids];
f2b7e78d 3163 t->ftid_tab = (struct filter_entry *)&t->stid_bmap[stid_bmap_size];
b8ff05a9
DM
3164 spin_lock_init(&t->stid_lock);
3165 spin_lock_init(&t->atid_lock);
3166
3167 t->stids_in_use = 0;
3168 t->afree = NULL;
3169 t->atids_in_use = 0;
3170 atomic_set(&t->tids_in_use, 0);
3171
3172 /* Setup the free list for atid_tab and clear the stid bitmap. */
3173 if (natids) {
3174 while (--natids)
3175 t->atid_tab[natids - 1].next = &t->atid_tab[natids];
3176 t->afree = t->atid_tab;
3177 }
dca4faeb 3178 bitmap_zero(t->stid_bmap, t->nstids + t->nsftids);
b6f8eaec
KS
3179 /* Reserve stid 0 for T4/T5 adapters */
3180 if (!t->stid_base &&
3181 (is_t4(adap->params.chip) || is_t5(adap->params.chip)))
3182 __set_bit(0, t->stid_bmap);
3183
b8ff05a9
DM
3184 return 0;
3185}
3186
01bcca68
VP
3187static int cxgb4_clip_get(const struct net_device *dev,
3188 const struct in6_addr *lip)
3189{
3190 struct adapter *adap;
3191 struct fw_clip_cmd c;
3192
3193 adap = netdev2adap(dev);
3194 memset(&c, 0, sizeof(c));
3195 c.op_to_write = htonl(FW_CMD_OP(FW_CLIP_CMD) |
3196 FW_CMD_REQUEST | FW_CMD_WRITE);
3197 c.alloc_to_len16 = htonl(F_FW_CLIP_CMD_ALLOC | FW_LEN16(c));
3198 *(__be64 *)&c.ip_hi = *(__be64 *)(lip->s6_addr);
3199 *(__be64 *)&c.ip_lo = *(__be64 *)(lip->s6_addr + 8);
3200 return t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, false);
3201}
3202
3203static int cxgb4_clip_release(const struct net_device *dev,
3204 const struct in6_addr *lip)
3205{
3206 struct adapter *adap;
3207 struct fw_clip_cmd c;
3208
3209 adap = netdev2adap(dev);
3210 memset(&c, 0, sizeof(c));
3211 c.op_to_write = htonl(FW_CMD_OP(FW_CLIP_CMD) |
3212 FW_CMD_REQUEST | FW_CMD_READ);
3213 c.alloc_to_len16 = htonl(F_FW_CLIP_CMD_FREE | FW_LEN16(c));
3214 *(__be64 *)&c.ip_hi = *(__be64 *)(lip->s6_addr);
3215 *(__be64 *)&c.ip_lo = *(__be64 *)(lip->s6_addr + 8);
3216 return t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, false);
3217}
3218
b8ff05a9
DM
3219/**
3220 * cxgb4_create_server - create an IP server
3221 * @dev: the device
3222 * @stid: the server TID
3223 * @sip: local IP address to bind server to
3224 * @sport: the server's TCP port
3225 * @queue: queue to direct messages from this server to
3226 *
3227 * Create an IP server for the given port and address.
3228 * Returns <0 on error and one of the %NET_XMIT_* values on success.
3229 */
3230int cxgb4_create_server(const struct net_device *dev, unsigned int stid,
793dad94
VP
3231 __be32 sip, __be16 sport, __be16 vlan,
3232 unsigned int queue)
b8ff05a9
DM
3233{
3234 unsigned int chan;
3235 struct sk_buff *skb;
3236 struct adapter *adap;
3237 struct cpl_pass_open_req *req;
80f40c1f 3238 int ret;
b8ff05a9
DM
3239
3240 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
3241 if (!skb)
3242 return -ENOMEM;
3243
3244 adap = netdev2adap(dev);
3245 req = (struct cpl_pass_open_req *)__skb_put(skb, sizeof(*req));
3246 INIT_TP_WR(req, 0);
3247 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ, stid));
3248 req->local_port = sport;
3249 req->peer_port = htons(0);
3250 req->local_ip = sip;
3251 req->peer_ip = htonl(0);
e46dab4d 3252 chan = rxq_to_chan(&adap->sge, queue);
b8ff05a9
DM
3253 req->opt0 = cpu_to_be64(TX_CHAN(chan));
3254 req->opt1 = cpu_to_be64(CONN_POLICY_ASK |
3255 SYN_RSS_ENABLE | SYN_RSS_QUEUE(queue));
80f40c1f
VP
3256 ret = t4_mgmt_tx(adap, skb);
3257 return net_xmit_eval(ret);
b8ff05a9
DM
3258}
3259EXPORT_SYMBOL(cxgb4_create_server);
3260
80f40c1f
VP
3261/* cxgb4_create_server6 - create an IPv6 server
3262 * @dev: the device
3263 * @stid: the server TID
3264 * @sip: local IPv6 address to bind server to
3265 * @sport: the server's TCP port
3266 * @queue: queue to direct messages from this server to
3267 *
3268 * Create an IPv6 server for the given port and address.
3269 * Returns <0 on error and one of the %NET_XMIT_* values on success.
3270 */
3271int cxgb4_create_server6(const struct net_device *dev, unsigned int stid,
3272 const struct in6_addr *sip, __be16 sport,
3273 unsigned int queue)
3274{
3275 unsigned int chan;
3276 struct sk_buff *skb;
3277 struct adapter *adap;
3278 struct cpl_pass_open_req6 *req;
3279 int ret;
3280
3281 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
3282 if (!skb)
3283 return -ENOMEM;
3284
3285 adap = netdev2adap(dev);
3286 req = (struct cpl_pass_open_req6 *)__skb_put(skb, sizeof(*req));
3287 INIT_TP_WR(req, 0);
3288 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ6, stid));
3289 req->local_port = sport;
3290 req->peer_port = htons(0);
3291 req->local_ip_hi = *(__be64 *)(sip->s6_addr);
3292 req->local_ip_lo = *(__be64 *)(sip->s6_addr + 8);
3293 req->peer_ip_hi = cpu_to_be64(0);
3294 req->peer_ip_lo = cpu_to_be64(0);
3295 chan = rxq_to_chan(&adap->sge, queue);
3296 req->opt0 = cpu_to_be64(TX_CHAN(chan));
3297 req->opt1 = cpu_to_be64(CONN_POLICY_ASK |
3298 SYN_RSS_ENABLE | SYN_RSS_QUEUE(queue));
3299 ret = t4_mgmt_tx(adap, skb);
3300 return net_xmit_eval(ret);
3301}
3302EXPORT_SYMBOL(cxgb4_create_server6);
3303
3304int cxgb4_remove_server(const struct net_device *dev, unsigned int stid,
3305 unsigned int queue, bool ipv6)
3306{
3307 struct sk_buff *skb;
3308 struct adapter *adap;
3309 struct cpl_close_listsvr_req *req;
3310 int ret;
3311
3312 adap = netdev2adap(dev);
3313
3314 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
3315 if (!skb)
3316 return -ENOMEM;
3317
3318 req = (struct cpl_close_listsvr_req *)__skb_put(skb, sizeof(*req));
3319 INIT_TP_WR(req, 0);
3320 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ, stid));
3321 req->reply_ctrl = htons(NO_REPLY(0) | (ipv6 ? LISTSVR_IPV6(1) :
3322 LISTSVR_IPV6(0)) | QUEUENO(queue));
3323 ret = t4_mgmt_tx(adap, skb);
3324 return net_xmit_eval(ret);
3325}
3326EXPORT_SYMBOL(cxgb4_remove_server);
3327
b8ff05a9
DM
3328/**
3329 * cxgb4_best_mtu - find the entry in the MTU table closest to an MTU
3330 * @mtus: the HW MTU table
3331 * @mtu: the target MTU
3332 * @idx: index of selected entry in the MTU table
3333 *
3334 * Returns the index and the value in the HW MTU table that is closest to
3335 * but does not exceed @mtu, unless @mtu is smaller than any value in the
3336 * table, in which case that smallest available value is selected.
3337 */
3338unsigned int cxgb4_best_mtu(const unsigned short *mtus, unsigned short mtu,
3339 unsigned int *idx)
3340{
3341 unsigned int i = 0;
3342
3343 while (i < NMTUS - 1 && mtus[i + 1] <= mtu)
3344 ++i;
3345 if (idx)
3346 *idx = i;
3347 return mtus[i];
3348}
3349EXPORT_SYMBOL(cxgb4_best_mtu);
3350
3351/**
3352 * cxgb4_port_chan - get the HW channel of a port
3353 * @dev: the net device for the port
3354 *
3355 * Return the HW Tx channel of the given port.
3356 */
3357unsigned int cxgb4_port_chan(const struct net_device *dev)
3358{
3359 return netdev2pinfo(dev)->tx_chan;
3360}
3361EXPORT_SYMBOL(cxgb4_port_chan);
3362
881806bc
VP
3363unsigned int cxgb4_dbfifo_count(const struct net_device *dev, int lpfifo)
3364{
3365 struct adapter *adap = netdev2adap(dev);
2cc301d2 3366 u32 v1, v2, lp_count, hp_count;
881806bc 3367
2cc301d2
SR
3368 v1 = t4_read_reg(adap, A_SGE_DBFIFO_STATUS);
3369 v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2);
d14807dd 3370 if (is_t4(adap->params.chip)) {
2cc301d2
SR
3371 lp_count = G_LP_COUNT(v1);
3372 hp_count = G_HP_COUNT(v1);
3373 } else {
3374 lp_count = G_LP_COUNT_T5(v1);
3375 hp_count = G_HP_COUNT_T5(v2);
3376 }
3377 return lpfifo ? lp_count : hp_count;
881806bc
VP
3378}
3379EXPORT_SYMBOL(cxgb4_dbfifo_count);
3380
b8ff05a9
DM
3381/**
3382 * cxgb4_port_viid - get the VI id of a port
3383 * @dev: the net device for the port
3384 *
3385 * Return the VI id of the given port.
3386 */
3387unsigned int cxgb4_port_viid(const struct net_device *dev)
3388{
3389 return netdev2pinfo(dev)->viid;
3390}
3391EXPORT_SYMBOL(cxgb4_port_viid);
3392
3393/**
3394 * cxgb4_port_idx - get the index of a port
3395 * @dev: the net device for the port
3396 *
3397 * Return the index of the given port.
3398 */
3399unsigned int cxgb4_port_idx(const struct net_device *dev)
3400{
3401 return netdev2pinfo(dev)->port_id;
3402}
3403EXPORT_SYMBOL(cxgb4_port_idx);
3404
b8ff05a9
DM
3405void cxgb4_get_tcp_stats(struct pci_dev *pdev, struct tp_tcp_stats *v4,
3406 struct tp_tcp_stats *v6)
3407{
3408 struct adapter *adap = pci_get_drvdata(pdev);
3409
3410 spin_lock(&adap->stats_lock);
3411 t4_tp_get_tcp_stats(adap, v4, v6);
3412 spin_unlock(&adap->stats_lock);
3413}
3414EXPORT_SYMBOL(cxgb4_get_tcp_stats);
3415
3416void cxgb4_iscsi_init(struct net_device *dev, unsigned int tag_mask,
3417 const unsigned int *pgsz_order)
3418{
3419 struct adapter *adap = netdev2adap(dev);
3420
3421 t4_write_reg(adap, ULP_RX_ISCSI_TAGMASK, tag_mask);
3422 t4_write_reg(adap, ULP_RX_ISCSI_PSZ, HPZ0(pgsz_order[0]) |
3423 HPZ1(pgsz_order[1]) | HPZ2(pgsz_order[2]) |
3424 HPZ3(pgsz_order[3]));
3425}
3426EXPORT_SYMBOL(cxgb4_iscsi_init);
3427
3069ee9b
VP
3428int cxgb4_flush_eq_cache(struct net_device *dev)
3429{
3430 struct adapter *adap = netdev2adap(dev);
3431 int ret;
3432
3433 ret = t4_fwaddrspace_write(adap, adap->mbox,
3434 0xe1000000 + A_SGE_CTXT_CMD, 0x20000000);
3435 return ret;
3436}
3437EXPORT_SYMBOL(cxgb4_flush_eq_cache);
3438
3439static int read_eq_indices(struct adapter *adap, u16 qid, u16 *pidx, u16 *cidx)
3440{
3441 u32 addr = t4_read_reg(adap, A_SGE_DBQ_CTXT_BADDR) + 24 * qid + 8;
3442 __be64 indices;
3443 int ret;
3444
3445 ret = t4_mem_win_read_len(adap, addr, (__be32 *)&indices, 8);
3446 if (!ret) {
404d9e3f
VP
3447 *cidx = (be64_to_cpu(indices) >> 25) & 0xffff;
3448 *pidx = (be64_to_cpu(indices) >> 9) & 0xffff;
3069ee9b
VP
3449 }
3450 return ret;
3451}
3452
3453int cxgb4_sync_txq_pidx(struct net_device *dev, u16 qid, u16 pidx,
3454 u16 size)
3455{
3456 struct adapter *adap = netdev2adap(dev);
3457 u16 hw_pidx, hw_cidx;
3458 int ret;
3459
3460 ret = read_eq_indices(adap, qid, &hw_pidx, &hw_cidx);
3461 if (ret)
3462 goto out;
3463
3464 if (pidx != hw_pidx) {
3465 u16 delta;
3466
3467 if (pidx >= hw_pidx)
3468 delta = pidx - hw_pidx;
3469 else
3470 delta = size - hw_pidx + pidx;
3471 wmb();
840f3000
VP
3472 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
3473 QID(qid) | PIDX(delta));
3069ee9b
VP
3474 }
3475out:
3476 return ret;
3477}
3478EXPORT_SYMBOL(cxgb4_sync_txq_pidx);
3479
3cbdb928
VP
3480void cxgb4_disable_db_coalescing(struct net_device *dev)
3481{
3482 struct adapter *adap;
3483
3484 adap = netdev2adap(dev);
3485 t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_NOCOALESCE,
3486 F_NOCOALESCE);
3487}
3488EXPORT_SYMBOL(cxgb4_disable_db_coalescing);
3489
3490void cxgb4_enable_db_coalescing(struct net_device *dev)
3491{
3492 struct adapter *adap;
3493
3494 adap = netdev2adap(dev);
3495 t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_NOCOALESCE, 0);
3496}
3497EXPORT_SYMBOL(cxgb4_enable_db_coalescing);
3498
b8ff05a9
DM
3499static struct pci_driver cxgb4_driver;
3500
3501static void check_neigh_update(struct neighbour *neigh)
3502{
3503 const struct device *parent;
3504 const struct net_device *netdev = neigh->dev;
3505
3506 if (netdev->priv_flags & IFF_802_1Q_VLAN)
3507 netdev = vlan_dev_real_dev(netdev);
3508 parent = netdev->dev.parent;
3509 if (parent && parent->driver == &cxgb4_driver.driver)
3510 t4_l2t_update(dev_get_drvdata(parent), neigh);
3511}
3512
3513static int netevent_cb(struct notifier_block *nb, unsigned long event,
3514 void *data)
3515{
3516 switch (event) {
3517 case NETEVENT_NEIGH_UPDATE:
3518 check_neigh_update(data);
3519 break;
b8ff05a9
DM
3520 case NETEVENT_REDIRECT:
3521 default:
3522 break;
3523 }
3524 return 0;
3525}
3526
3527static bool netevent_registered;
3528static struct notifier_block cxgb4_netevent_nb = {
3529 .notifier_call = netevent_cb
3530};
3531
3069ee9b
VP
3532static void drain_db_fifo(struct adapter *adap, int usecs)
3533{
2cc301d2 3534 u32 v1, v2, lp_count, hp_count;
3069ee9b
VP
3535
3536 do {
2cc301d2
SR
3537 v1 = t4_read_reg(adap, A_SGE_DBFIFO_STATUS);
3538 v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2);
d14807dd 3539 if (is_t4(adap->params.chip)) {
2cc301d2
SR
3540 lp_count = G_LP_COUNT(v1);
3541 hp_count = G_HP_COUNT(v1);
3542 } else {
3543 lp_count = G_LP_COUNT_T5(v1);
3544 hp_count = G_HP_COUNT_T5(v2);
3545 }
3546
3547 if (lp_count == 0 && hp_count == 0)
3548 break;
3069ee9b
VP
3549 set_current_state(TASK_UNINTERRUPTIBLE);
3550 schedule_timeout(usecs_to_jiffies(usecs));
3069ee9b
VP
3551 } while (1);
3552}
3553
3554static void disable_txq_db(struct sge_txq *q)
3555{
3556 spin_lock_irq(&q->db_lock);
3557 q->db_disabled = 1;
3558 spin_unlock_irq(&q->db_lock);
3559}
3560
3561static void enable_txq_db(struct sge_txq *q)
3562{
3563 spin_lock_irq(&q->db_lock);
3564 q->db_disabled = 0;
3565 spin_unlock_irq(&q->db_lock);
3566}
3567
3568static void disable_dbs(struct adapter *adap)
3569{
3570 int i;
3571
3572 for_each_ethrxq(&adap->sge, i)
3573 disable_txq_db(&adap->sge.ethtxq[i].q);
3574 for_each_ofldrxq(&adap->sge, i)
3575 disable_txq_db(&adap->sge.ofldtxq[i].q);
3576 for_each_port(adap, i)
3577 disable_txq_db(&adap->sge.ctrlq[i].q);
3578}
3579
3580static void enable_dbs(struct adapter *adap)
3581{
3582 int i;
3583
3584 for_each_ethrxq(&adap->sge, i)
3585 enable_txq_db(&adap->sge.ethtxq[i].q);
3586 for_each_ofldrxq(&adap->sge, i)
3587 enable_txq_db(&adap->sge.ofldtxq[i].q);
3588 for_each_port(adap, i)
3589 enable_txq_db(&adap->sge.ctrlq[i].q);
3590}
3591
3592static void sync_txq_pidx(struct adapter *adap, struct sge_txq *q)
3593{
3594 u16 hw_pidx, hw_cidx;
3595 int ret;
3596
3597 spin_lock_bh(&q->db_lock);
3598 ret = read_eq_indices(adap, (u16)q->cntxt_id, &hw_pidx, &hw_cidx);
3599 if (ret)
3600 goto out;
3601 if (q->db_pidx != hw_pidx) {
3602 u16 delta;
3603
3604 if (q->db_pidx >= hw_pidx)
3605 delta = q->db_pidx - hw_pidx;
3606 else
3607 delta = q->size - hw_pidx + q->db_pidx;
3608 wmb();
840f3000
VP
3609 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
3610 QID(q->cntxt_id) | PIDX(delta));
3069ee9b
VP
3611 }
3612out:
3613 q->db_disabled = 0;
3614 spin_unlock_bh(&q->db_lock);
3615 if (ret)
3616 CH_WARN(adap, "DB drop recovery failed.\n");
3617}
3618static void recover_all_queues(struct adapter *adap)
3619{
3620 int i;
3621
3622 for_each_ethrxq(&adap->sge, i)
3623 sync_txq_pidx(adap, &adap->sge.ethtxq[i].q);
3624 for_each_ofldrxq(&adap->sge, i)
3625 sync_txq_pidx(adap, &adap->sge.ofldtxq[i].q);
3626 for_each_port(adap, i)
3627 sync_txq_pidx(adap, &adap->sge.ctrlq[i].q);
3628}
3629
881806bc
VP
3630static void notify_rdma_uld(struct adapter *adap, enum cxgb4_control cmd)
3631{
3632 mutex_lock(&uld_mutex);
3633 if (adap->uld_handle[CXGB4_ULD_RDMA])
3634 ulds[CXGB4_ULD_RDMA].control(adap->uld_handle[CXGB4_ULD_RDMA],
3635 cmd);
3636 mutex_unlock(&uld_mutex);
3637}
3638
3639static void process_db_full(struct work_struct *work)
3640{
3641 struct adapter *adap;
881806bc
VP
3642
3643 adap = container_of(work, struct adapter, db_full_task);
3644
881806bc 3645 notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL);
3069ee9b 3646 drain_db_fifo(adap, dbfifo_drain_delay);
840f3000
VP
3647 t4_set_reg_field(adap, SGE_INT_ENABLE3,
3648 DBFIFO_HP_INT | DBFIFO_LP_INT,
3649 DBFIFO_HP_INT | DBFIFO_LP_INT);
881806bc 3650 notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY);
881806bc
VP
3651}
3652
3653static void process_db_drop(struct work_struct *work)
3654{
3655 struct adapter *adap;
881806bc 3656
3069ee9b 3657 adap = container_of(work, struct adapter, db_drop_task);
881806bc 3658
d14807dd 3659 if (is_t4(adap->params.chip)) {
2cc301d2
SR
3660 disable_dbs(adap);
3661 notify_rdma_uld(adap, CXGB4_CONTROL_DB_DROP);
3662 drain_db_fifo(adap, 1);
3663 recover_all_queues(adap);
3664 enable_dbs(adap);
3665 } else {
3666 u32 dropped_db = t4_read_reg(adap, 0x010ac);
3667 u16 qid = (dropped_db >> 15) & 0x1ffff;
3668 u16 pidx_inc = dropped_db & 0x1fff;
3669 unsigned int s_qpp;
3670 unsigned short udb_density;
3671 unsigned long qpshift;
3672 int page;
3673 u32 udb;
3674
3675 dev_warn(adap->pdev_dev,
3676 "Dropped DB 0x%x qid %d bar2 %d coalesce %d pidx %d\n",
3677 dropped_db, qid,
3678 (dropped_db >> 14) & 1,
3679 (dropped_db >> 13) & 1,
3680 pidx_inc);
3681
3682 drain_db_fifo(adap, 1);
3683
3684 s_qpp = QUEUESPERPAGEPF1 * adap->fn;
3685 udb_density = 1 << QUEUESPERPAGEPF0_GET(t4_read_reg(adap,
3686 SGE_EGRESS_QUEUES_PER_PAGE_PF) >> s_qpp);
3687 qpshift = PAGE_SHIFT - ilog2(udb_density);
3688 udb = qid << qpshift;
3689 udb &= PAGE_MASK;
3690 page = udb / PAGE_SIZE;
3691 udb += (qid - (page * udb_density)) * 128;
3692
3693 writel(PIDX(pidx_inc), adap->bar2 + udb + 8);
3694
3695 /* Re-enable BAR2 WC */
3696 t4_set_reg_field(adap, 0x10b0, 1<<15, 1<<15);
3697 }
3698
3069ee9b 3699 t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_DROPPED_DB, 0);
881806bc
VP
3700}
3701
3702void t4_db_full(struct adapter *adap)
3703{
d14807dd 3704 if (is_t4(adap->params.chip)) {
2cc301d2
SR
3705 t4_set_reg_field(adap, SGE_INT_ENABLE3,
3706 DBFIFO_HP_INT | DBFIFO_LP_INT, 0);
3707 queue_work(workq, &adap->db_full_task);
3708 }
881806bc
VP
3709}
3710
3711void t4_db_dropped(struct adapter *adap)
3712{
d14807dd 3713 if (is_t4(adap->params.chip))
2cc301d2 3714 queue_work(workq, &adap->db_drop_task);
881806bc
VP
3715}
3716
b8ff05a9
DM
3717static void uld_attach(struct adapter *adap, unsigned int uld)
3718{
3719 void *handle;
3720 struct cxgb4_lld_info lli;
dca4faeb 3721 unsigned short i;
b8ff05a9
DM
3722
3723 lli.pdev = adap->pdev;
3724 lli.l2t = adap->l2t;
3725 lli.tids = &adap->tids;
3726 lli.ports = adap->port;
3727 lli.vr = &adap->vres;
3728 lli.mtus = adap->params.mtus;
3729 if (uld == CXGB4_ULD_RDMA) {
3730 lli.rxq_ids = adap->sge.rdma_rxq;
3731 lli.nrxq = adap->sge.rdmaqs;
3732 } else if (uld == CXGB4_ULD_ISCSI) {
3733 lli.rxq_ids = adap->sge.ofld_rxq;
3734 lli.nrxq = adap->sge.ofldqsets;
3735 }
3736 lli.ntxq = adap->sge.ofldqsets;
3737 lli.nchan = adap->params.nports;
3738 lli.nports = adap->params.nports;
3739 lli.wr_cred = adap->params.ofldq_wr_cred;
d14807dd 3740 lli.adapter_type = adap->params.chip;
b8ff05a9
DM
3741 lli.iscsi_iolen = MAXRXDATA_GET(t4_read_reg(adap, TP_PARA_REG2));
3742 lli.udb_density = 1 << QUEUESPERPAGEPF0_GET(
060e0c75
DM
3743 t4_read_reg(adap, SGE_EGRESS_QUEUES_PER_PAGE_PF) >>
3744 (adap->fn * 4));
b8ff05a9 3745 lli.ucq_density = 1 << QUEUESPERPAGEPF0_GET(
060e0c75
DM
3746 t4_read_reg(adap, SGE_INGRESS_QUEUES_PER_PAGE_PF) >>
3747 (adap->fn * 4));
793dad94 3748 lli.filt_mode = adap->filter_mode;
dca4faeb
VP
3749 /* MODQ_REQ_MAP sets queues 0-3 to chan 0-3 */
3750 for (i = 0; i < NCHAN; i++)
3751 lli.tx_modq[i] = i;
b8ff05a9
DM
3752 lli.gts_reg = adap->regs + MYPF_REG(SGE_PF_GTS);
3753 lli.db_reg = adap->regs + MYPF_REG(SGE_PF_KDOORBELL);
3754 lli.fw_vers = adap->params.fw_vers;
3069ee9b 3755 lli.dbfifo_int_thresh = dbfifo_int_thresh;
dca4faeb
VP
3756 lli.sge_pktshift = adap->sge.pktshift;
3757 lli.enable_fw_ofld_conn = adap->flags & FW_OFLD_CONN;
b8ff05a9
DM
3758
3759 handle = ulds[uld].add(&lli);
3760 if (IS_ERR(handle)) {
3761 dev_warn(adap->pdev_dev,
3762 "could not attach to the %s driver, error %ld\n",
3763 uld_str[uld], PTR_ERR(handle));
3764 return;
3765 }
3766
3767 adap->uld_handle[uld] = handle;
3768
3769 if (!netevent_registered) {
3770 register_netevent_notifier(&cxgb4_netevent_nb);
3771 netevent_registered = true;
3772 }
e29f5dbc
DM
3773
3774 if (adap->flags & FULL_INIT_DONE)
3775 ulds[uld].state_change(handle, CXGB4_STATE_UP);
b8ff05a9
DM
3776}
3777
3778static void attach_ulds(struct adapter *adap)
3779{
3780 unsigned int i;
3781
01bcca68
VP
3782 spin_lock(&adap_rcu_lock);
3783 list_add_tail_rcu(&adap->rcu_node, &adap_rcu_list);
3784 spin_unlock(&adap_rcu_lock);
3785
b8ff05a9
DM
3786 mutex_lock(&uld_mutex);
3787 list_add_tail(&adap->list_node, &adapter_list);
3788 for (i = 0; i < CXGB4_ULD_MAX; i++)
3789 if (ulds[i].add)
3790 uld_attach(adap, i);
3791 mutex_unlock(&uld_mutex);
3792}
3793
3794static void detach_ulds(struct adapter *adap)
3795{
3796 unsigned int i;
3797
3798 mutex_lock(&uld_mutex);
3799 list_del(&adap->list_node);
3800 for (i = 0; i < CXGB4_ULD_MAX; i++)
3801 if (adap->uld_handle[i]) {
3802 ulds[i].state_change(adap->uld_handle[i],
3803 CXGB4_STATE_DETACH);
3804 adap->uld_handle[i] = NULL;
3805 }
3806 if (netevent_registered && list_empty(&adapter_list)) {
3807 unregister_netevent_notifier(&cxgb4_netevent_nb);
3808 netevent_registered = false;
3809 }
3810 mutex_unlock(&uld_mutex);
01bcca68
VP
3811
3812 spin_lock(&adap_rcu_lock);
3813 list_del_rcu(&adap->rcu_node);
3814 spin_unlock(&adap_rcu_lock);
b8ff05a9
DM
3815}
3816
3817static void notify_ulds(struct adapter *adap, enum cxgb4_state new_state)
3818{
3819 unsigned int i;
3820
3821 mutex_lock(&uld_mutex);
3822 for (i = 0; i < CXGB4_ULD_MAX; i++)
3823 if (adap->uld_handle[i])
3824 ulds[i].state_change(adap->uld_handle[i], new_state);
3825 mutex_unlock(&uld_mutex);
3826}
3827
3828/**
3829 * cxgb4_register_uld - register an upper-layer driver
3830 * @type: the ULD type
3831 * @p: the ULD methods
3832 *
3833 * Registers an upper-layer driver with this driver and notifies the ULD
3834 * about any presently available devices that support its type. Returns
3835 * %-EBUSY if a ULD of the same type is already registered.
3836 */
3837int cxgb4_register_uld(enum cxgb4_uld type, const struct cxgb4_uld_info *p)
3838{
3839 int ret = 0;
3840 struct adapter *adap;
3841
3842 if (type >= CXGB4_ULD_MAX)
3843 return -EINVAL;
3844 mutex_lock(&uld_mutex);
3845 if (ulds[type].add) {
3846 ret = -EBUSY;
3847 goto out;
3848 }
3849 ulds[type] = *p;
3850 list_for_each_entry(adap, &adapter_list, list_node)
3851 uld_attach(adap, type);
3852out: mutex_unlock(&uld_mutex);
3853 return ret;
3854}
3855EXPORT_SYMBOL(cxgb4_register_uld);
3856
3857/**
3858 * cxgb4_unregister_uld - unregister an upper-layer driver
3859 * @type: the ULD type
3860 *
3861 * Unregisters an existing upper-layer driver.
3862 */
3863int cxgb4_unregister_uld(enum cxgb4_uld type)
3864{
3865 struct adapter *adap;
3866
3867 if (type >= CXGB4_ULD_MAX)
3868 return -EINVAL;
3869 mutex_lock(&uld_mutex);
3870 list_for_each_entry(adap, &adapter_list, list_node)
3871 adap->uld_handle[type] = NULL;
3872 ulds[type].add = NULL;
3873 mutex_unlock(&uld_mutex);
3874 return 0;
3875}
3876EXPORT_SYMBOL(cxgb4_unregister_uld);
3877
01bcca68
VP
3878/* Check if netdev on which event is occured belongs to us or not. Return
3879 * suceess (1) if it belongs otherwise failure (0).
3880 */
3881static int cxgb4_netdev(struct net_device *netdev)
3882{
3883 struct adapter *adap;
3884 int i;
3885
3886 spin_lock(&adap_rcu_lock);
3887 list_for_each_entry_rcu(adap, &adap_rcu_list, rcu_node)
3888 for (i = 0; i < MAX_NPORTS; i++)
3889 if (adap->port[i] == netdev) {
3890 spin_unlock(&adap_rcu_lock);
3891 return 1;
3892 }
3893 spin_unlock(&adap_rcu_lock);
3894 return 0;
3895}
3896
3897static int clip_add(struct net_device *event_dev, struct inet6_ifaddr *ifa,
3898 unsigned long event)
3899{
3900 int ret = NOTIFY_DONE;
3901
3902 rcu_read_lock();
3903 if (cxgb4_netdev(event_dev)) {
3904 switch (event) {
3905 case NETDEV_UP:
3906 ret = cxgb4_clip_get(event_dev,
3907 (const struct in6_addr *)ifa->addr.s6_addr);
3908 if (ret < 0) {
3909 rcu_read_unlock();
3910 return ret;
3911 }
3912 ret = NOTIFY_OK;
3913 break;
3914 case NETDEV_DOWN:
3915 cxgb4_clip_release(event_dev,
3916 (const struct in6_addr *)ifa->addr.s6_addr);
3917 ret = NOTIFY_OK;
3918 break;
3919 default:
3920 break;
3921 }
3922 }
3923 rcu_read_unlock();
3924 return ret;
3925}
3926
3927static int cxgb4_inet6addr_handler(struct notifier_block *this,
3928 unsigned long event, void *data)
3929{
3930 struct inet6_ifaddr *ifa = data;
3931 struct net_device *event_dev;
3932 int ret = NOTIFY_DONE;
01bcca68 3933 struct bonding *bond = netdev_priv(ifa->idev->dev);
9caff1e7 3934 struct list_head *iter;
01bcca68
VP
3935 struct slave *slave;
3936 struct pci_dev *first_pdev = NULL;
3937
3938 if (ifa->idev->dev->priv_flags & IFF_802_1Q_VLAN) {
3939 event_dev = vlan_dev_real_dev(ifa->idev->dev);
3940 ret = clip_add(event_dev, ifa, event);
3941 } else if (ifa->idev->dev->flags & IFF_MASTER) {
3942 /* It is possible that two different adapters are bonded in one
3943 * bond. We need to find such different adapters and add clip
3944 * in all of them only once.
3945 */
3946 read_lock(&bond->lock);
9caff1e7 3947 bond_for_each_slave(bond, slave, iter) {
01bcca68
VP
3948 if (!first_pdev) {
3949 ret = clip_add(slave->dev, ifa, event);
3950 /* If clip_add is success then only initialize
3951 * first_pdev since it means it is our device
3952 */
3953 if (ret == NOTIFY_OK)
3954 first_pdev = to_pci_dev(
3955 slave->dev->dev.parent);
3956 } else if (first_pdev !=
3957 to_pci_dev(slave->dev->dev.parent))
3958 ret = clip_add(slave->dev, ifa, event);
3959 }
3960 read_unlock(&bond->lock);
3961 } else
3962 ret = clip_add(ifa->idev->dev, ifa, event);
3963
3964 return ret;
3965}
3966
3967static struct notifier_block cxgb4_inet6addr_notifier = {
3968 .notifier_call = cxgb4_inet6addr_handler
3969};
3970
3971/* Retrieves IPv6 addresses from a root device (bond, vlan) associated with
3972 * a physical device.
3973 * The physical device reference is needed to send the actul CLIP command.
3974 */
3975static int update_dev_clip(struct net_device *root_dev, struct net_device *dev)
3976{
3977 struct inet6_dev *idev = NULL;
3978 struct inet6_ifaddr *ifa;
3979 int ret = 0;
3980
3981 idev = __in6_dev_get(root_dev);
3982 if (!idev)
3983 return ret;
3984
3985 read_lock_bh(&idev->lock);
3986 list_for_each_entry(ifa, &idev->addr_list, if_list) {
3987 ret = cxgb4_clip_get(dev,
3988 (const struct in6_addr *)ifa->addr.s6_addr);
3989 if (ret < 0)
3990 break;
3991 }
3992 read_unlock_bh(&idev->lock);
3993
3994 return ret;
3995}
3996
3997static int update_root_dev_clip(struct net_device *dev)
3998{
3999 struct net_device *root_dev = NULL;
4000 int i, ret = 0;
4001
4002 /* First populate the real net device's IPv6 addresses */
4003 ret = update_dev_clip(dev, dev);
4004 if (ret)
4005 return ret;
4006
4007 /* Parse all bond and vlan devices layered on top of the physical dev */
4008 for (i = 0; i < VLAN_N_VID; i++) {
4009 root_dev = __vlan_find_dev_deep(dev, htons(ETH_P_8021Q), i);
4010 if (!root_dev)
4011 continue;
4012
4013 ret = update_dev_clip(root_dev, dev);
4014 if (ret)
4015 break;
4016 }
4017 return ret;
4018}
4019
4020static void update_clip(const struct adapter *adap)
4021{
4022 int i;
4023 struct net_device *dev;
4024 int ret;
4025
4026 rcu_read_lock();
4027
4028 for (i = 0; i < MAX_NPORTS; i++) {
4029 dev = adap->port[i];
4030 ret = 0;
4031
4032 if (dev)
4033 ret = update_root_dev_clip(dev);
4034
4035 if (ret < 0)
4036 break;
4037 }
4038 rcu_read_unlock();
4039}
4040
b8ff05a9
DM
4041/**
4042 * cxgb_up - enable the adapter
4043 * @adap: adapter being enabled
4044 *
4045 * Called when the first port is enabled, this function performs the
4046 * actions necessary to make an adapter operational, such as completing
4047 * the initialization of HW modules, and enabling interrupts.
4048 *
4049 * Must be called with the rtnl lock held.
4050 */
4051static int cxgb_up(struct adapter *adap)
4052{
aaefae9b 4053 int err;
b8ff05a9 4054
aaefae9b
DM
4055 err = setup_sge_queues(adap);
4056 if (err)
4057 goto out;
4058 err = setup_rss(adap);
4059 if (err)
4060 goto freeq;
b8ff05a9
DM
4061
4062 if (adap->flags & USING_MSIX) {
aaefae9b 4063 name_msix_vecs(adap);
b8ff05a9
DM
4064 err = request_irq(adap->msix_info[0].vec, t4_nondata_intr, 0,
4065 adap->msix_info[0].desc, adap);
4066 if (err)
4067 goto irq_err;
4068
4069 err = request_msix_queue_irqs(adap);
4070 if (err) {
4071 free_irq(adap->msix_info[0].vec, adap);
4072 goto irq_err;
4073 }
4074 } else {
4075 err = request_irq(adap->pdev->irq, t4_intr_handler(adap),
4076 (adap->flags & USING_MSI) ? 0 : IRQF_SHARED,
b1a3c2b6 4077 adap->port[0]->name, adap);
b8ff05a9
DM
4078 if (err)
4079 goto irq_err;
4080 }
4081 enable_rx(adap);
4082 t4_sge_start(adap);
4083 t4_intr_enable(adap);
aaefae9b 4084 adap->flags |= FULL_INIT_DONE;
b8ff05a9 4085 notify_ulds(adap, CXGB4_STATE_UP);
01bcca68 4086 update_clip(adap);
b8ff05a9
DM
4087 out:
4088 return err;
4089 irq_err:
4090 dev_err(adap->pdev_dev, "request_irq failed, err %d\n", err);
aaefae9b
DM
4091 freeq:
4092 t4_free_sge_resources(adap);
b8ff05a9
DM
4093 goto out;
4094}
4095
4096static void cxgb_down(struct adapter *adapter)
4097{
4098 t4_intr_disable(adapter);
4099 cancel_work_sync(&adapter->tid_release_task);
881806bc
VP
4100 cancel_work_sync(&adapter->db_full_task);
4101 cancel_work_sync(&adapter->db_drop_task);
b8ff05a9 4102 adapter->tid_release_task_busy = false;
204dc3c0 4103 adapter->tid_release_head = NULL;
b8ff05a9
DM
4104
4105 if (adapter->flags & USING_MSIX) {
4106 free_msix_queue_irqs(adapter);
4107 free_irq(adapter->msix_info[0].vec, adapter);
4108 } else
4109 free_irq(adapter->pdev->irq, adapter);
4110 quiesce_rx(adapter);
aaefae9b
DM
4111 t4_sge_stop(adapter);
4112 t4_free_sge_resources(adapter);
4113 adapter->flags &= ~FULL_INIT_DONE;
b8ff05a9
DM
4114}
4115
4116/*
4117 * net_device operations
4118 */
4119static int cxgb_open(struct net_device *dev)
4120{
4121 int err;
4122 struct port_info *pi = netdev_priv(dev);
4123 struct adapter *adapter = pi->adapter;
4124
6a3c869a
DM
4125 netif_carrier_off(dev);
4126
aaefae9b
DM
4127 if (!(adapter->flags & FULL_INIT_DONE)) {
4128 err = cxgb_up(adapter);
4129 if (err < 0)
4130 return err;
4131 }
b8ff05a9 4132
f68707b8
DM
4133 err = link_start(dev);
4134 if (!err)
4135 netif_tx_start_all_queues(dev);
4136 return err;
b8ff05a9
DM
4137}
4138
4139static int cxgb_close(struct net_device *dev)
4140{
b8ff05a9
DM
4141 struct port_info *pi = netdev_priv(dev);
4142 struct adapter *adapter = pi->adapter;
4143
4144 netif_tx_stop_all_queues(dev);
4145 netif_carrier_off(dev);
060e0c75 4146 return t4_enable_vi(adapter, adapter->fn, pi->viid, false, false);
b8ff05a9
DM
4147}
4148
f2b7e78d
VP
4149/* Return an error number if the indicated filter isn't writable ...
4150 */
4151static int writable_filter(struct filter_entry *f)
4152{
4153 if (f->locked)
4154 return -EPERM;
4155 if (f->pending)
4156 return -EBUSY;
4157
4158 return 0;
4159}
4160
4161/* Delete the filter at the specified index (if valid). The checks for all
4162 * the common problems with doing this like the filter being locked, currently
4163 * pending in another operation, etc.
4164 */
4165static int delete_filter(struct adapter *adapter, unsigned int fidx)
4166{
4167 struct filter_entry *f;
4168 int ret;
4169
dca4faeb 4170 if (fidx >= adapter->tids.nftids + adapter->tids.nsftids)
f2b7e78d
VP
4171 return -EINVAL;
4172
4173 f = &adapter->tids.ftid_tab[fidx];
4174 ret = writable_filter(f);
4175 if (ret)
4176 return ret;
4177 if (f->valid)
4178 return del_filter_wr(adapter, fidx);
4179
4180 return 0;
4181}
4182
dca4faeb 4183int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid,
793dad94
VP
4184 __be32 sip, __be16 sport, __be16 vlan,
4185 unsigned int queue, unsigned char port, unsigned char mask)
dca4faeb
VP
4186{
4187 int ret;
4188 struct filter_entry *f;
4189 struct adapter *adap;
4190 int i;
4191 u8 *val;
4192
4193 adap = netdev2adap(dev);
4194
1cab775c 4195 /* Adjust stid to correct filter index */
470c60c4 4196 stid -= adap->tids.sftid_base;
1cab775c
VP
4197 stid += adap->tids.nftids;
4198
dca4faeb
VP
4199 /* Check to make sure the filter requested is writable ...
4200 */
4201 f = &adap->tids.ftid_tab[stid];
4202 ret = writable_filter(f);
4203 if (ret)
4204 return ret;
4205
4206 /* Clear out any old resources being used by the filter before
4207 * we start constructing the new filter.
4208 */
4209 if (f->valid)
4210 clear_filter(adap, f);
4211
4212 /* Clear out filter specifications */
4213 memset(&f->fs, 0, sizeof(struct ch_filter_specification));
4214 f->fs.val.lport = cpu_to_be16(sport);
4215 f->fs.mask.lport = ~0;
4216 val = (u8 *)&sip;
793dad94 4217 if ((val[0] | val[1] | val[2] | val[3]) != 0) {
dca4faeb
VP
4218 for (i = 0; i < 4; i++) {
4219 f->fs.val.lip[i] = val[i];
4220 f->fs.mask.lip[i] = ~0;
4221 }
793dad94
VP
4222 if (adap->filter_mode & F_PORT) {
4223 f->fs.val.iport = port;
4224 f->fs.mask.iport = mask;
4225 }
4226 }
dca4faeb 4227
7c89e555
KS
4228 if (adap->filter_mode & F_PROTOCOL) {
4229 f->fs.val.proto = IPPROTO_TCP;
4230 f->fs.mask.proto = ~0;
4231 }
4232
dca4faeb
VP
4233 f->fs.dirsteer = 1;
4234 f->fs.iq = queue;
4235 /* Mark filter as locked */
4236 f->locked = 1;
4237 f->fs.rpttid = 1;
4238
4239 ret = set_filter_wr(adap, stid);
4240 if (ret) {
4241 clear_filter(adap, f);
4242 return ret;
4243 }
4244
4245 return 0;
4246}
4247EXPORT_SYMBOL(cxgb4_create_server_filter);
4248
4249int cxgb4_remove_server_filter(const struct net_device *dev, unsigned int stid,
4250 unsigned int queue, bool ipv6)
4251{
4252 int ret;
4253 struct filter_entry *f;
4254 struct adapter *adap;
4255
4256 adap = netdev2adap(dev);
1cab775c
VP
4257
4258 /* Adjust stid to correct filter index */
470c60c4 4259 stid -= adap->tids.sftid_base;
1cab775c
VP
4260 stid += adap->tids.nftids;
4261
dca4faeb
VP
4262 f = &adap->tids.ftid_tab[stid];
4263 /* Unlock the filter */
4264 f->locked = 0;
4265
4266 ret = delete_filter(adap, stid);
4267 if (ret)
4268 return ret;
4269
4270 return 0;
4271}
4272EXPORT_SYMBOL(cxgb4_remove_server_filter);
4273
f5152c90
DM
4274static struct rtnl_link_stats64 *cxgb_get_stats(struct net_device *dev,
4275 struct rtnl_link_stats64 *ns)
b8ff05a9
DM
4276{
4277 struct port_stats stats;
4278 struct port_info *p = netdev_priv(dev);
4279 struct adapter *adapter = p->adapter;
b8ff05a9
DM
4280
4281 spin_lock(&adapter->stats_lock);
4282 t4_get_port_stats(adapter, p->tx_chan, &stats);
4283 spin_unlock(&adapter->stats_lock);
4284
4285 ns->tx_bytes = stats.tx_octets;
4286 ns->tx_packets = stats.tx_frames;
4287 ns->rx_bytes = stats.rx_octets;
4288 ns->rx_packets = stats.rx_frames;
4289 ns->multicast = stats.rx_mcast_frames;
4290
4291 /* detailed rx_errors */
4292 ns->rx_length_errors = stats.rx_jabber + stats.rx_too_long +
4293 stats.rx_runt;
4294 ns->rx_over_errors = 0;
4295 ns->rx_crc_errors = stats.rx_fcs_err;
4296 ns->rx_frame_errors = stats.rx_symbol_err;
4297 ns->rx_fifo_errors = stats.rx_ovflow0 + stats.rx_ovflow1 +
4298 stats.rx_ovflow2 + stats.rx_ovflow3 +
4299 stats.rx_trunc0 + stats.rx_trunc1 +
4300 stats.rx_trunc2 + stats.rx_trunc3;
4301 ns->rx_missed_errors = 0;
4302
4303 /* detailed tx_errors */
4304 ns->tx_aborted_errors = 0;
4305 ns->tx_carrier_errors = 0;
4306 ns->tx_fifo_errors = 0;
4307 ns->tx_heartbeat_errors = 0;
4308 ns->tx_window_errors = 0;
4309
4310 ns->tx_errors = stats.tx_error_frames;
4311 ns->rx_errors = stats.rx_symbol_err + stats.rx_fcs_err +
4312 ns->rx_length_errors + stats.rx_len_err + ns->rx_fifo_errors;
4313 return ns;
4314}
4315
4316static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
4317{
060e0c75 4318 unsigned int mbox;
b8ff05a9
DM
4319 int ret = 0, prtad, devad;
4320 struct port_info *pi = netdev_priv(dev);
4321 struct mii_ioctl_data *data = (struct mii_ioctl_data *)&req->ifr_data;
4322
4323 switch (cmd) {
4324 case SIOCGMIIPHY:
4325 if (pi->mdio_addr < 0)
4326 return -EOPNOTSUPP;
4327 data->phy_id = pi->mdio_addr;
4328 break;
4329 case SIOCGMIIREG:
4330 case SIOCSMIIREG:
4331 if (mdio_phy_id_is_c45(data->phy_id)) {
4332 prtad = mdio_phy_id_prtad(data->phy_id);
4333 devad = mdio_phy_id_devad(data->phy_id);
4334 } else if (data->phy_id < 32) {
4335 prtad = data->phy_id;
4336 devad = 0;
4337 data->reg_num &= 0x1f;
4338 } else
4339 return -EINVAL;
4340
060e0c75 4341 mbox = pi->adapter->fn;
b8ff05a9 4342 if (cmd == SIOCGMIIREG)
060e0c75 4343 ret = t4_mdio_rd(pi->adapter, mbox, prtad, devad,
b8ff05a9
DM
4344 data->reg_num, &data->val_out);
4345 else
060e0c75 4346 ret = t4_mdio_wr(pi->adapter, mbox, prtad, devad,
b8ff05a9
DM
4347 data->reg_num, data->val_in);
4348 break;
4349 default:
4350 return -EOPNOTSUPP;
4351 }
4352 return ret;
4353}
4354
4355static void cxgb_set_rxmode(struct net_device *dev)
4356{
4357 /* unfortunately we can't return errors to the stack */
4358 set_rxmode(dev, -1, false);
4359}
4360
4361static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
4362{
4363 int ret;
4364 struct port_info *pi = netdev_priv(dev);
4365
4366 if (new_mtu < 81 || new_mtu > MAX_MTU) /* accommodate SACK */
4367 return -EINVAL;
060e0c75
DM
4368 ret = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, new_mtu, -1,
4369 -1, -1, -1, true);
b8ff05a9
DM
4370 if (!ret)
4371 dev->mtu = new_mtu;
4372 return ret;
4373}
4374
4375static int cxgb_set_mac_addr(struct net_device *dev, void *p)
4376{
4377 int ret;
4378 struct sockaddr *addr = p;
4379 struct port_info *pi = netdev_priv(dev);
4380
4381 if (!is_valid_ether_addr(addr->sa_data))
504f9b5a 4382 return -EADDRNOTAVAIL;
b8ff05a9 4383
060e0c75
DM
4384 ret = t4_change_mac(pi->adapter, pi->adapter->fn, pi->viid,
4385 pi->xact_addr_filt, addr->sa_data, true, true);
b8ff05a9
DM
4386 if (ret < 0)
4387 return ret;
4388
4389 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
4390 pi->xact_addr_filt = ret;
4391 return 0;
4392}
4393
b8ff05a9
DM
4394#ifdef CONFIG_NET_POLL_CONTROLLER
4395static void cxgb_netpoll(struct net_device *dev)
4396{
4397 struct port_info *pi = netdev_priv(dev);
4398 struct adapter *adap = pi->adapter;
4399
4400 if (adap->flags & USING_MSIX) {
4401 int i;
4402 struct sge_eth_rxq *rx = &adap->sge.ethrxq[pi->first_qset];
4403
4404 for (i = pi->nqsets; i; i--, rx++)
4405 t4_sge_intr_msix(0, &rx->rspq);
4406 } else
4407 t4_intr_handler(adap)(0, adap);
4408}
4409#endif
4410
4411static const struct net_device_ops cxgb4_netdev_ops = {
4412 .ndo_open = cxgb_open,
4413 .ndo_stop = cxgb_close,
4414 .ndo_start_xmit = t4_eth_xmit,
9be793bf 4415 .ndo_get_stats64 = cxgb_get_stats,
b8ff05a9
DM
4416 .ndo_set_rx_mode = cxgb_set_rxmode,
4417 .ndo_set_mac_address = cxgb_set_mac_addr,
2ed28baa 4418 .ndo_set_features = cxgb_set_features,
b8ff05a9
DM
4419 .ndo_validate_addr = eth_validate_addr,
4420 .ndo_do_ioctl = cxgb_ioctl,
4421 .ndo_change_mtu = cxgb_change_mtu,
b8ff05a9
DM
4422#ifdef CONFIG_NET_POLL_CONTROLLER
4423 .ndo_poll_controller = cxgb_netpoll,
4424#endif
4425};
4426
4427void t4_fatal_err(struct adapter *adap)
4428{
4429 t4_set_reg_field(adap, SGE_CONTROL, GLOBALENABLE, 0);
4430 t4_intr_disable(adap);
4431 dev_alert(adap->pdev_dev, "encountered fatal error, adapter stopped\n");
4432}
4433
4434static void setup_memwin(struct adapter *adap)
4435{
19dd37ba 4436 u32 bar0, mem_win0_base, mem_win1_base, mem_win2_base;
b8ff05a9
DM
4437
4438 bar0 = pci_resource_start(adap->pdev, 0); /* truncation intentional */
d14807dd 4439 if (is_t4(adap->params.chip)) {
19dd37ba
SR
4440 mem_win0_base = bar0 + MEMWIN0_BASE;
4441 mem_win1_base = bar0 + MEMWIN1_BASE;
4442 mem_win2_base = bar0 + MEMWIN2_BASE;
4443 } else {
4444 /* For T5, only relative offset inside the PCIe BAR is passed */
4445 mem_win0_base = MEMWIN0_BASE;
4446 mem_win1_base = MEMWIN1_BASE_T5;
4447 mem_win2_base = MEMWIN2_BASE_T5;
4448 }
b8ff05a9 4449 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 0),
19dd37ba 4450 mem_win0_base | BIR(0) |
b8ff05a9
DM
4451 WINDOW(ilog2(MEMWIN0_APERTURE) - 10));
4452 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 1),
19dd37ba 4453 mem_win1_base | BIR(0) |
b8ff05a9
DM
4454 WINDOW(ilog2(MEMWIN1_APERTURE) - 10));
4455 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 2),
19dd37ba 4456 mem_win2_base | BIR(0) |
b8ff05a9 4457 WINDOW(ilog2(MEMWIN2_APERTURE) - 10));
636f9d37
VP
4458}
4459
4460static void setup_memwin_rdma(struct adapter *adap)
4461{
1ae970e0
DM
4462 if (adap->vres.ocq.size) {
4463 unsigned int start, sz_kb;
4464
4465 start = pci_resource_start(adap->pdev, 2) +
4466 OCQ_WIN_OFFSET(adap->pdev, &adap->vres);
4467 sz_kb = roundup_pow_of_two(adap->vres.ocq.size) >> 10;
4468 t4_write_reg(adap,
4469 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 3),
4470 start | BIR(1) | WINDOW(ilog2(sz_kb)));
4471 t4_write_reg(adap,
4472 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, 3),
4473 adap->vres.ocq.start);
4474 t4_read_reg(adap,
4475 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, 3));
4476 }
b8ff05a9
DM
4477}
4478
02b5fb8e
DM
4479static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c)
4480{
4481 u32 v;
4482 int ret;
4483
4484 /* get device capabilities */
4485 memset(c, 0, sizeof(*c));
4486 c->op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
4487 FW_CMD_REQUEST | FW_CMD_READ);
ce91a923 4488 c->cfvalid_to_len16 = htonl(FW_LEN16(*c));
060e0c75 4489 ret = t4_wr_mbox(adap, adap->fn, c, sizeof(*c), c);
02b5fb8e
DM
4490 if (ret < 0)
4491 return ret;
4492
4493 /* select capabilities we'll be using */
4494 if (c->niccaps & htons(FW_CAPS_CONFIG_NIC_VM)) {
4495 if (!vf_acls)
4496 c->niccaps ^= htons(FW_CAPS_CONFIG_NIC_VM);
4497 else
4498 c->niccaps = htons(FW_CAPS_CONFIG_NIC_VM);
4499 } else if (vf_acls) {
4500 dev_err(adap->pdev_dev, "virtualization ACLs not supported");
4501 return ret;
4502 }
4503 c->op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
4504 FW_CMD_REQUEST | FW_CMD_WRITE);
060e0c75 4505 ret = t4_wr_mbox(adap, adap->fn, c, sizeof(*c), NULL);
02b5fb8e
DM
4506 if (ret < 0)
4507 return ret;
4508
060e0c75 4509 ret = t4_config_glbl_rss(adap, adap->fn,
02b5fb8e
DM
4510 FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL,
4511 FW_RSS_GLB_CONFIG_CMD_TNLMAPEN |
4512 FW_RSS_GLB_CONFIG_CMD_TNLALLLKP);
4513 if (ret < 0)
4514 return ret;
4515
060e0c75
DM
4516 ret = t4_cfg_pfvf(adap, adap->fn, adap->fn, 0, MAX_EGRQ, 64, MAX_INGQ,
4517 0, 0, 4, 0xf, 0xf, 16, FW_CMD_CAP_PF, FW_CMD_CAP_PF);
02b5fb8e
DM
4518 if (ret < 0)
4519 return ret;
4520
4521 t4_sge_init(adap);
4522
02b5fb8e
DM
4523 /* tweak some settings */
4524 t4_write_reg(adap, TP_SHIFT_CNT, 0x64f8849);
4525 t4_write_reg(adap, ULP_RX_TDDP_PSZ, HPZ0(PAGE_SHIFT - 12));
4526 t4_write_reg(adap, TP_PIO_ADDR, TP_INGRESS_CONFIG);
4527 v = t4_read_reg(adap, TP_PIO_DATA);
4528 t4_write_reg(adap, TP_PIO_DATA, v & ~CSUM_HAS_PSEUDO_HDR);
060e0c75 4529
dca4faeb
VP
4530 /* first 4 Tx modulation queues point to consecutive Tx channels */
4531 adap->params.tp.tx_modq_map = 0xE4;
4532 t4_write_reg(adap, A_TP_TX_MOD_QUEUE_REQ_MAP,
4533 V_TX_MOD_QUEUE_REQ_MAP(adap->params.tp.tx_modq_map));
4534
4535 /* associate each Tx modulation queue with consecutive Tx channels */
4536 v = 0x84218421;
4537 t4_write_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
4538 &v, 1, A_TP_TX_SCHED_HDR);
4539 t4_write_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
4540 &v, 1, A_TP_TX_SCHED_FIFO);
4541 t4_write_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
4542 &v, 1, A_TP_TX_SCHED_PCMD);
4543
4544#define T4_TX_MODQ_10G_WEIGHT_DEFAULT 16 /* in KB units */
4545 if (is_offload(adap)) {
4546 t4_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT0,
4547 V_TX_MODQ_WEIGHT0(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4548 V_TX_MODQ_WEIGHT1(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4549 V_TX_MODQ_WEIGHT2(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4550 V_TX_MODQ_WEIGHT3(T4_TX_MODQ_10G_WEIGHT_DEFAULT));
4551 t4_write_reg(adap, A_TP_TX_MOD_CHANNEL_WEIGHT,
4552 V_TX_MODQ_WEIGHT0(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4553 V_TX_MODQ_WEIGHT1(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4554 V_TX_MODQ_WEIGHT2(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4555 V_TX_MODQ_WEIGHT3(T4_TX_MODQ_10G_WEIGHT_DEFAULT));
4556 }
4557
060e0c75
DM
4558 /* get basic stuff going */
4559 return t4_early_init(adap, adap->fn);
02b5fb8e
DM
4560}
4561
b8ff05a9
DM
4562/*
4563 * Max # of ATIDs. The absolute HW max is 16K but we keep it lower.
4564 */
4565#define MAX_ATIDS 8192U
4566
636f9d37
VP
4567/*
4568 * Phase 0 of initialization: contact FW, obtain config, perform basic init.
4569 *
4570 * If the firmware we're dealing with has Configuration File support, then
4571 * we use that to perform all configuration
4572 */
4573
4574/*
4575 * Tweak configuration based on module parameters, etc. Most of these have
4576 * defaults assigned to them by Firmware Configuration Files (if we're using
4577 * them) but need to be explicitly set if we're using hard-coded
4578 * initialization. But even in the case of using Firmware Configuration
4579 * Files, we'd like to expose the ability to change these via module
4580 * parameters so these are essentially common tweaks/settings for
4581 * Configuration Files and hard-coded initialization ...
4582 */
4583static int adap_init0_tweaks(struct adapter *adapter)
4584{
4585 /*
4586 * Fix up various Host-Dependent Parameters like Page Size, Cache
4587 * Line Size, etc. The firmware default is for a 4KB Page Size and
4588 * 64B Cache Line Size ...
4589 */
4590 t4_fixup_host_params(adapter, PAGE_SIZE, L1_CACHE_BYTES);
4591
4592 /*
4593 * Process module parameters which affect early initialization.
4594 */
4595 if (rx_dma_offset != 2 && rx_dma_offset != 0) {
4596 dev_err(&adapter->pdev->dev,
4597 "Ignoring illegal rx_dma_offset=%d, using 2\n",
4598 rx_dma_offset);
4599 rx_dma_offset = 2;
4600 }
4601 t4_set_reg_field(adapter, SGE_CONTROL,
4602 PKTSHIFT_MASK,
4603 PKTSHIFT(rx_dma_offset));
4604
4605 /*
4606 * Don't include the "IP Pseudo Header" in CPL_RX_PKT checksums: Linux
4607 * adds the pseudo header itself.
4608 */
4609 t4_tp_wr_bits_indirect(adapter, TP_INGRESS_CONFIG,
4610 CSUM_HAS_PSEUDO_HDR, 0);
4611
4612 return 0;
4613}
4614
4615/*
4616 * Attempt to initialize the adapter via a Firmware Configuration File.
4617 */
4618static int adap_init0_config(struct adapter *adapter, int reset)
4619{
4620 struct fw_caps_config_cmd caps_cmd;
4621 const struct firmware *cf;
4622 unsigned long mtype = 0, maddr = 0;
4623 u32 finiver, finicsum, cfcsum;
16e47624
HS
4624 int ret;
4625 int config_issued = 0;
0a57a536 4626 char *fw_config_file, fw_config_file_path[256];
16e47624 4627 char *config_name = NULL;
636f9d37
VP
4628
4629 /*
4630 * Reset device if necessary.
4631 */
4632 if (reset) {
4633 ret = t4_fw_reset(adapter, adapter->mbox,
4634 PIORSTMODE | PIORST);
4635 if (ret < 0)
4636 goto bye;
4637 }
4638
4639 /*
4640 * If we have a T4 configuration file under /lib/firmware/cxgb4/,
4641 * then use that. Otherwise, use the configuration file stored
4642 * in the adapter flash ...
4643 */
d14807dd 4644 switch (CHELSIO_CHIP_VERSION(adapter->params.chip)) {
0a57a536 4645 case CHELSIO_T4:
16e47624 4646 fw_config_file = FW4_CFNAME;
0a57a536
SR
4647 break;
4648 case CHELSIO_T5:
4649 fw_config_file = FW5_CFNAME;
4650 break;
4651 default:
4652 dev_err(adapter->pdev_dev, "Device %d is not supported\n",
4653 adapter->pdev->device);
4654 ret = -EINVAL;
4655 goto bye;
4656 }
4657
4658 ret = request_firmware(&cf, fw_config_file, adapter->pdev_dev);
636f9d37 4659 if (ret < 0) {
16e47624 4660 config_name = "On FLASH";
636f9d37
VP
4661 mtype = FW_MEMTYPE_CF_FLASH;
4662 maddr = t4_flash_cfg_addr(adapter);
4663 } else {
4664 u32 params[7], val[7];
4665
16e47624
HS
4666 sprintf(fw_config_file_path,
4667 "/lib/firmware/%s", fw_config_file);
4668 config_name = fw_config_file_path;
4669
636f9d37
VP
4670 if (cf->size >= FLASH_CFG_MAX_SIZE)
4671 ret = -ENOMEM;
4672 else {
4673 params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
4674 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CF));
4675 ret = t4_query_params(adapter, adapter->mbox,
4676 adapter->fn, 0, 1, params, val);
4677 if (ret == 0) {
4678 /*
4679 * For t4_memory_write() below addresses and
4680 * sizes have to be in terms of multiples of 4
4681 * bytes. So, if the Configuration File isn't
4682 * a multiple of 4 bytes in length we'll have
4683 * to write that out separately since we can't
4684 * guarantee that the bytes following the
4685 * residual byte in the buffer returned by
4686 * request_firmware() are zeroed out ...
4687 */
4688 size_t resid = cf->size & 0x3;
4689 size_t size = cf->size & ~0x3;
4690 __be32 *data = (__be32 *)cf->data;
4691
4692 mtype = FW_PARAMS_PARAM_Y_GET(val[0]);
4693 maddr = FW_PARAMS_PARAM_Z_GET(val[0]) << 16;
4694
4695 ret = t4_memory_write(adapter, mtype, maddr,
4696 size, data);
4697 if (ret == 0 && resid != 0) {
4698 union {
4699 __be32 word;
4700 char buf[4];
4701 } last;
4702 int i;
4703
4704 last.word = data[size >> 2];
4705 for (i = resid; i < 4; i++)
4706 last.buf[i] = 0;
4707 ret = t4_memory_write(adapter, mtype,
4708 maddr + size,
4709 4, &last.word);
4710 }
4711 }
4712 }
4713
4714 release_firmware(cf);
4715 if (ret)
4716 goto bye;
4717 }
4718
4719 /*
4720 * Issue a Capability Configuration command to the firmware to get it
4721 * to parse the Configuration File. We don't use t4_fw_config_file()
4722 * because we want the ability to modify various features after we've
4723 * processed the configuration file ...
4724 */
4725 memset(&caps_cmd, 0, sizeof(caps_cmd));
4726 caps_cmd.op_to_write =
4727 htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
4728 FW_CMD_REQUEST |
4729 FW_CMD_READ);
ce91a923 4730 caps_cmd.cfvalid_to_len16 =
636f9d37
VP
4731 htonl(FW_CAPS_CONFIG_CMD_CFVALID |
4732 FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) |
4733 FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(maddr >> 16) |
4734 FW_LEN16(caps_cmd));
4735 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
4736 &caps_cmd);
16e47624
HS
4737
4738 /* If the CAPS_CONFIG failed with an ENOENT (for a Firmware
4739 * Configuration File in FLASH), our last gasp effort is to use the
4740 * Firmware Configuration File which is embedded in the firmware. A
4741 * very few early versions of the firmware didn't have one embedded
4742 * but we can ignore those.
4743 */
4744 if (ret == -ENOENT) {
4745 memset(&caps_cmd, 0, sizeof(caps_cmd));
4746 caps_cmd.op_to_write =
4747 htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
4748 FW_CMD_REQUEST |
4749 FW_CMD_READ);
4750 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
4751 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd,
4752 sizeof(caps_cmd), &caps_cmd);
4753 config_name = "Firmware Default";
4754 }
4755
4756 config_issued = 1;
636f9d37
VP
4757 if (ret < 0)
4758 goto bye;
4759
4760 finiver = ntohl(caps_cmd.finiver);
4761 finicsum = ntohl(caps_cmd.finicsum);
4762 cfcsum = ntohl(caps_cmd.cfcsum);
4763 if (finicsum != cfcsum)
4764 dev_warn(adapter->pdev_dev, "Configuration File checksum "\
4765 "mismatch: [fini] csum=%#x, computed csum=%#x\n",
4766 finicsum, cfcsum);
4767
636f9d37
VP
4768 /*
4769 * And now tell the firmware to use the configuration we just loaded.
4770 */
4771 caps_cmd.op_to_write =
4772 htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
4773 FW_CMD_REQUEST |
4774 FW_CMD_WRITE);
ce91a923 4775 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
636f9d37
VP
4776 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
4777 NULL);
4778 if (ret < 0)
4779 goto bye;
4780
4781 /*
4782 * Tweak configuration based on system architecture, module
4783 * parameters, etc.
4784 */
4785 ret = adap_init0_tweaks(adapter);
4786 if (ret < 0)
4787 goto bye;
4788
4789 /*
4790 * And finally tell the firmware to initialize itself using the
4791 * parameters from the Configuration File.
4792 */
4793 ret = t4_fw_initialize(adapter, adapter->mbox);
4794 if (ret < 0)
4795 goto bye;
4796
4797 /*
4798 * Return successfully and note that we're operating with parameters
4799 * not supplied by the driver, rather than from hard-wired
4800 * initialization constants burried in the driver.
4801 */
4802 adapter->flags |= USING_SOFT_PARAMS;
4803 dev_info(adapter->pdev_dev, "Successfully configured using Firmware "\
16e47624
HS
4804 "Configuration File \"%s\", version %#x, computed checksum %#x\n",
4805 config_name, finiver, cfcsum);
636f9d37
VP
4806 return 0;
4807
4808 /*
4809 * Something bad happened. Return the error ... (If the "error"
4810 * is that there's no Configuration File on the adapter we don't
4811 * want to issue a warning since this is fairly common.)
4812 */
4813bye:
16e47624
HS
4814 if (config_issued && ret != -ENOENT)
4815 dev_warn(adapter->pdev_dev, "\"%s\" configuration file error %d\n",
4816 config_name, -ret);
636f9d37
VP
4817 return ret;
4818}
4819
13ee15d3
VP
4820/*
4821 * Attempt to initialize the adapter via hard-coded, driver supplied
4822 * parameters ...
4823 */
4824static int adap_init0_no_config(struct adapter *adapter, int reset)
4825{
4826 struct sge *s = &adapter->sge;
4827 struct fw_caps_config_cmd caps_cmd;
4828 u32 v;
4829 int i, ret;
4830
4831 /*
4832 * Reset device if necessary
4833 */
4834 if (reset) {
4835 ret = t4_fw_reset(adapter, adapter->mbox,
4836 PIORSTMODE | PIORST);
4837 if (ret < 0)
4838 goto bye;
4839 }
4840
4841 /*
4842 * Get device capabilities and select which we'll be using.
4843 */
4844 memset(&caps_cmd, 0, sizeof(caps_cmd));
4845 caps_cmd.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
4846 FW_CMD_REQUEST | FW_CMD_READ);
ce91a923 4847 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
13ee15d3
VP
4848 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
4849 &caps_cmd);
4850 if (ret < 0)
4851 goto bye;
4852
13ee15d3
VP
4853 if (caps_cmd.niccaps & htons(FW_CAPS_CONFIG_NIC_VM)) {
4854 if (!vf_acls)
4855 caps_cmd.niccaps ^= htons(FW_CAPS_CONFIG_NIC_VM);
4856 else
4857 caps_cmd.niccaps = htons(FW_CAPS_CONFIG_NIC_VM);
4858 } else if (vf_acls) {
4859 dev_err(adapter->pdev_dev, "virtualization ACLs not supported");
4860 goto bye;
4861 }
4862 caps_cmd.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
4863 FW_CMD_REQUEST | FW_CMD_WRITE);
4864 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
4865 NULL);
4866 if (ret < 0)
4867 goto bye;
4868
4869 /*
4870 * Tweak configuration based on system architecture, module
4871 * parameters, etc.
4872 */
4873 ret = adap_init0_tweaks(adapter);
4874 if (ret < 0)
4875 goto bye;
4876
4877 /*
4878 * Select RSS Global Mode we want to use. We use "Basic Virtual"
4879 * mode which maps each Virtual Interface to its own section of
4880 * the RSS Table and we turn on all map and hash enables ...
4881 */
4882 adapter->flags |= RSS_TNLALLLOOKUP;
4883 ret = t4_config_glbl_rss(adapter, adapter->mbox,
4884 FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL,
4885 FW_RSS_GLB_CONFIG_CMD_TNLMAPEN |
4886 FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ |
4887 ((adapter->flags & RSS_TNLALLLOOKUP) ?
4888 FW_RSS_GLB_CONFIG_CMD_TNLALLLKP : 0));
4889 if (ret < 0)
4890 goto bye;
4891
4892 /*
4893 * Set up our own fundamental resource provisioning ...
4894 */
4895 ret = t4_cfg_pfvf(adapter, adapter->mbox, adapter->fn, 0,
4896 PFRES_NEQ, PFRES_NETHCTRL,
4897 PFRES_NIQFLINT, PFRES_NIQ,
4898 PFRES_TC, PFRES_NVI,
4899 FW_PFVF_CMD_CMASK_MASK,
4900 pfvfres_pmask(adapter, adapter->fn, 0),
4901 PFRES_NEXACTF,
4902 PFRES_R_CAPS, PFRES_WX_CAPS);
4903 if (ret < 0)
4904 goto bye;
4905
4906 /*
4907 * Perform low level SGE initialization. We need to do this before we
4908 * send the firmware the INITIALIZE command because that will cause
4909 * any other PF Drivers which are waiting for the Master
4910 * Initialization to proceed forward.
4911 */
4912 for (i = 0; i < SGE_NTIMERS - 1; i++)
4913 s->timer_val[i] = min(intr_holdoff[i], MAX_SGE_TIMERVAL);
4914 s->timer_val[SGE_NTIMERS - 1] = MAX_SGE_TIMERVAL;
4915 s->counter_val[0] = 1;
4916 for (i = 1; i < SGE_NCOUNTERS; i++)
4917 s->counter_val[i] = min(intr_cnt[i - 1],
4918 THRESHOLD_0_GET(THRESHOLD_0_MASK));
4919 t4_sge_init(adapter);
4920
4921#ifdef CONFIG_PCI_IOV
4922 /*
4923 * Provision resource limits for Virtual Functions. We currently
4924 * grant them all the same static resource limits except for the Port
4925 * Access Rights Mask which we're assigning based on the PF. All of
4926 * the static provisioning stuff for both the PF and VF really needs
4927 * to be managed in a persistent manner for each device which the
4928 * firmware controls.
4929 */
4930 {
4931 int pf, vf;
4932
7d6727cf 4933 for (pf = 0; pf < ARRAY_SIZE(num_vf); pf++) {
13ee15d3
VP
4934 if (num_vf[pf] <= 0)
4935 continue;
4936
4937 /* VF numbering starts at 1! */
4938 for (vf = 1; vf <= num_vf[pf]; vf++) {
4939 ret = t4_cfg_pfvf(adapter, adapter->mbox,
4940 pf, vf,
4941 VFRES_NEQ, VFRES_NETHCTRL,
4942 VFRES_NIQFLINT, VFRES_NIQ,
4943 VFRES_TC, VFRES_NVI,
1f1e4958 4944 FW_PFVF_CMD_CMASK_MASK,
13ee15d3
VP
4945 pfvfres_pmask(
4946 adapter, pf, vf),
4947 VFRES_NEXACTF,
4948 VFRES_R_CAPS, VFRES_WX_CAPS);
4949 if (ret < 0)
4950 dev_warn(adapter->pdev_dev,
4951 "failed to "\
4952 "provision pf/vf=%d/%d; "
4953 "err=%d\n", pf, vf, ret);
4954 }
4955 }
4956 }
4957#endif
4958
4959 /*
4960 * Set up the default filter mode. Later we'll want to implement this
4961 * via a firmware command, etc. ... This needs to be done before the
4962 * firmare initialization command ... If the selected set of fields
4963 * isn't equal to the default value, we'll need to make sure that the
4964 * field selections will fit in the 36-bit budget.
4965 */
4966 if (tp_vlan_pri_map != TP_VLAN_PRI_MAP_DEFAULT) {
404d9e3f 4967 int j, bits = 0;
13ee15d3 4968
404d9e3f
VP
4969 for (j = TP_VLAN_PRI_MAP_FIRST; j <= TP_VLAN_PRI_MAP_LAST; j++)
4970 switch (tp_vlan_pri_map & (1 << j)) {
13ee15d3
VP
4971 case 0:
4972 /* compressed filter field not enabled */
4973 break;
4974 case FCOE_MASK:
4975 bits += 1;
4976 break;
4977 case PORT_MASK:
4978 bits += 3;
4979 break;
4980 case VNIC_ID_MASK:
4981 bits += 17;
4982 break;
4983 case VLAN_MASK:
4984 bits += 17;
4985 break;
4986 case TOS_MASK:
4987 bits += 8;
4988 break;
4989 case PROTOCOL_MASK:
4990 bits += 8;
4991 break;
4992 case ETHERTYPE_MASK:
4993 bits += 16;
4994 break;
4995 case MACMATCH_MASK:
4996 bits += 9;
4997 break;
4998 case MPSHITTYPE_MASK:
4999 bits += 3;
5000 break;
5001 case FRAGMENTATION_MASK:
5002 bits += 1;
5003 break;
5004 }
5005
5006 if (bits > 36) {
5007 dev_err(adapter->pdev_dev,
5008 "tp_vlan_pri_map=%#x needs %d bits > 36;"\
5009 " using %#x\n", tp_vlan_pri_map, bits,
5010 TP_VLAN_PRI_MAP_DEFAULT);
5011 tp_vlan_pri_map = TP_VLAN_PRI_MAP_DEFAULT;
5012 }
5013 }
5014 v = tp_vlan_pri_map;
5015 t4_write_indirect(adapter, TP_PIO_ADDR, TP_PIO_DATA,
5016 &v, 1, TP_VLAN_PRI_MAP);
5017
5018 /*
5019 * We need Five Tuple Lookup mode to be set in TP_GLOBAL_CONFIG order
5020 * to support any of the compressed filter fields above. Newer
5021 * versions of the firmware do this automatically but it doesn't hurt
5022 * to set it here. Meanwhile, we do _not_ need to set Lookup Every
5023 * Packet in TP_INGRESS_CONFIG to support matching non-TCP packets
5024 * since the firmware automatically turns this on and off when we have
5025 * a non-zero number of filters active (since it does have a
5026 * performance impact).
5027 */
5028 if (tp_vlan_pri_map)
5029 t4_set_reg_field(adapter, TP_GLOBAL_CONFIG,
5030 FIVETUPLELOOKUP_MASK,
5031 FIVETUPLELOOKUP_MASK);
5032
5033 /*
5034 * Tweak some settings.
5035 */
5036 t4_write_reg(adapter, TP_SHIFT_CNT, SYNSHIFTMAX(6) |
5037 RXTSHIFTMAXR1(4) | RXTSHIFTMAXR2(15) |
5038 PERSHIFTBACKOFFMAX(8) | PERSHIFTMAX(8) |
5039 KEEPALIVEMAXR1(4) | KEEPALIVEMAXR2(9));
5040
5041 /*
5042 * Get basic stuff going by issuing the Firmware Initialize command.
5043 * Note that this _must_ be after all PFVF commands ...
5044 */
5045 ret = t4_fw_initialize(adapter, adapter->mbox);
5046 if (ret < 0)
5047 goto bye;
5048
5049 /*
5050 * Return successfully!
5051 */
5052 dev_info(adapter->pdev_dev, "Successfully configured using built-in "\
5053 "driver parameters\n");
5054 return 0;
5055
5056 /*
5057 * Something bad happened. Return the error ...
5058 */
5059bye:
5060 return ret;
5061}
5062
16e47624
HS
5063static struct fw_info fw_info_array[] = {
5064 {
5065 .chip = CHELSIO_T4,
5066 .fs_name = FW4_CFNAME,
5067 .fw_mod_name = FW4_FNAME,
5068 .fw_hdr = {
5069 .chip = FW_HDR_CHIP_T4,
5070 .fw_ver = __cpu_to_be32(FW_VERSION(T4)),
5071 .intfver_nic = FW_INTFVER(T4, NIC),
5072 .intfver_vnic = FW_INTFVER(T4, VNIC),
5073 .intfver_ri = FW_INTFVER(T4, RI),
5074 .intfver_iscsi = FW_INTFVER(T4, ISCSI),
5075 .intfver_fcoe = FW_INTFVER(T4, FCOE),
5076 },
5077 }, {
5078 .chip = CHELSIO_T5,
5079 .fs_name = FW5_CFNAME,
5080 .fw_mod_name = FW5_FNAME,
5081 .fw_hdr = {
5082 .chip = FW_HDR_CHIP_T5,
5083 .fw_ver = __cpu_to_be32(FW_VERSION(T5)),
5084 .intfver_nic = FW_INTFVER(T5, NIC),
5085 .intfver_vnic = FW_INTFVER(T5, VNIC),
5086 .intfver_ri = FW_INTFVER(T5, RI),
5087 .intfver_iscsi = FW_INTFVER(T5, ISCSI),
5088 .intfver_fcoe = FW_INTFVER(T5, FCOE),
5089 },
5090 }
5091};
5092
5093static struct fw_info *find_fw_info(int chip)
5094{
5095 int i;
5096
5097 for (i = 0; i < ARRAY_SIZE(fw_info_array); i++) {
5098 if (fw_info_array[i].chip == chip)
5099 return &fw_info_array[i];
5100 }
5101 return NULL;
5102}
5103
b8ff05a9
DM
5104/*
5105 * Phase 0 of initialization: contact FW, obtain config, perform basic init.
5106 */
5107static int adap_init0(struct adapter *adap)
5108{
5109 int ret;
5110 u32 v, port_vec;
5111 enum dev_state state;
5112 u32 params[7], val[7];
9a4da2cd 5113 struct fw_caps_config_cmd caps_cmd;
636f9d37 5114 int reset = 1, j;
b8ff05a9 5115
636f9d37
VP
5116 /*
5117 * Contact FW, advertising Master capability (and potentially forcing
5118 * ourselves as the Master PF if our module parameter force_init is
5119 * set).
5120 */
5121 ret = t4_fw_hello(adap, adap->mbox, adap->fn,
5122 force_init ? MASTER_MUST : MASTER_MAY,
5123 &state);
b8ff05a9
DM
5124 if (ret < 0) {
5125 dev_err(adap->pdev_dev, "could not connect to FW, error %d\n",
5126 ret);
5127 return ret;
5128 }
636f9d37
VP
5129 if (ret == adap->mbox)
5130 adap->flags |= MASTER_PF;
5131 if (force_init && state == DEV_STATE_INIT)
5132 state = DEV_STATE_UNINIT;
b8ff05a9 5133
636f9d37
VP
5134 /*
5135 * If we're the Master PF Driver and the device is uninitialized,
5136 * then let's consider upgrading the firmware ... (We always want
5137 * to check the firmware version number in order to A. get it for
5138 * later reporting and B. to warn if the currently loaded firmware
5139 * is excessively mismatched relative to the driver.)
5140 */
16e47624
HS
5141 t4_get_fw_version(adap, &adap->params.fw_vers);
5142 t4_get_tp_version(adap, &adap->params.tp_vers);
636f9d37 5143 if ((adap->flags & MASTER_PF) && state != DEV_STATE_INIT) {
16e47624
HS
5144 struct fw_info *fw_info;
5145 struct fw_hdr *card_fw;
5146 const struct firmware *fw;
5147 const u8 *fw_data = NULL;
5148 unsigned int fw_size = 0;
5149
5150 /* This is the firmware whose headers the driver was compiled
5151 * against
5152 */
5153 fw_info = find_fw_info(CHELSIO_CHIP_VERSION(adap->params.chip));
5154 if (fw_info == NULL) {
5155 dev_err(adap->pdev_dev,
5156 "unable to get firmware info for chip %d.\n",
5157 CHELSIO_CHIP_VERSION(adap->params.chip));
5158 return -EINVAL;
636f9d37 5159 }
16e47624
HS
5160
5161 /* allocate memory to read the header of the firmware on the
5162 * card
5163 */
5164 card_fw = t4_alloc_mem(sizeof(*card_fw));
5165
5166 /* Get FW from from /lib/firmware/ */
5167 ret = request_firmware(&fw, fw_info->fw_mod_name,
5168 adap->pdev_dev);
5169 if (ret < 0) {
5170 dev_err(adap->pdev_dev,
5171 "unable to load firmware image %s, error %d\n",
5172 fw_info->fw_mod_name, ret);
5173 } else {
5174 fw_data = fw->data;
5175 fw_size = fw->size;
5176 }
5177
5178 /* upgrade FW logic */
5179 ret = t4_prep_fw(adap, fw_info, fw_data, fw_size, card_fw,
5180 state, &reset);
5181
5182 /* Cleaning up */
5183 if (fw != NULL)
5184 release_firmware(fw);
5185 t4_free_mem(card_fw);
5186
636f9d37 5187 if (ret < 0)
16e47624 5188 goto bye;
636f9d37 5189 }
b8ff05a9 5190
636f9d37
VP
5191 /*
5192 * Grab VPD parameters. This should be done after we establish a
5193 * connection to the firmware since some of the VPD parameters
5194 * (notably the Core Clock frequency) are retrieved via requests to
5195 * the firmware. On the other hand, we need these fairly early on
5196 * so we do this right after getting ahold of the firmware.
5197 */
5198 ret = get_vpd_params(adap, &adap->params.vpd);
a0881cab
DM
5199 if (ret < 0)
5200 goto bye;
a0881cab 5201
636f9d37 5202 /*
13ee15d3
VP
5203 * Find out what ports are available to us. Note that we need to do
5204 * this before calling adap_init0_no_config() since it needs nports
5205 * and portvec ...
636f9d37
VP
5206 */
5207 v =
5208 FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
5209 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_PORTVEC);
5210 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 1, &v, &port_vec);
a0881cab
DM
5211 if (ret < 0)
5212 goto bye;
5213
636f9d37
VP
5214 adap->params.nports = hweight32(port_vec);
5215 adap->params.portvec = port_vec;
5216
5217 /*
5218 * If the firmware is initialized already (and we're not forcing a
5219 * master initialization), note that we're living with existing
5220 * adapter parameters. Otherwise, it's time to try initializing the
5221 * adapter ...
5222 */
5223 if (state == DEV_STATE_INIT) {
5224 dev_info(adap->pdev_dev, "Coming up as %s: "\
5225 "Adapter already initialized\n",
5226 adap->flags & MASTER_PF ? "MASTER" : "SLAVE");
5227 adap->flags |= USING_SOFT_PARAMS;
5228 } else {
5229 dev_info(adap->pdev_dev, "Coming up as MASTER: "\
5230 "Initializing adapter\n");
636f9d37
VP
5231
5232 /*
5233 * If the firmware doesn't support Configuration
5234 * Files warn user and exit,
5235 */
5236 if (ret < 0)
13ee15d3 5237 dev_warn(adap->pdev_dev, "Firmware doesn't support "
636f9d37 5238 "configuration file.\n");
13ee15d3
VP
5239 if (force_old_init)
5240 ret = adap_init0_no_config(adap, reset);
636f9d37
VP
5241 else {
5242 /*
13ee15d3
VP
5243 * Find out whether we're dealing with a version of
5244 * the firmware which has configuration file support.
636f9d37 5245 */
13ee15d3
VP
5246 params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
5247 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CF));
5248 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 1,
5249 params, val);
636f9d37 5250
13ee15d3
VP
5251 /*
5252 * If the firmware doesn't support Configuration
5253 * Files, use the old Driver-based, hard-wired
5254 * initialization. Otherwise, try using the
5255 * Configuration File support and fall back to the
5256 * Driver-based initialization if there's no
5257 * Configuration File found.
5258 */
5259 if (ret < 0)
5260 ret = adap_init0_no_config(adap, reset);
5261 else {
5262 /*
5263 * The firmware provides us with a memory
5264 * buffer where we can load a Configuration
5265 * File from the host if we want to override
5266 * the Configuration File in flash.
5267 */
5268
5269 ret = adap_init0_config(adap, reset);
5270 if (ret == -ENOENT) {
5271 dev_info(adap->pdev_dev,
5272 "No Configuration File present "
16e47624 5273 "on adapter. Using hard-wired "
13ee15d3
VP
5274 "configuration parameters.\n");
5275 ret = adap_init0_no_config(adap, reset);
5276 }
636f9d37
VP
5277 }
5278 }
5279 if (ret < 0) {
5280 dev_err(adap->pdev_dev,
5281 "could not initialize adapter, error %d\n",
5282 -ret);
5283 goto bye;
5284 }
5285 }
5286
5287 /*
5288 * If we're living with non-hard-coded parameters (either from a
5289 * Firmware Configuration File or values programmed by a different PF
5290 * Driver), give the SGE code a chance to pull in anything that it
5291 * needs ... Note that this must be called after we retrieve our VPD
5292 * parameters in order to know how to convert core ticks to seconds.
5293 */
5294 if (adap->flags & USING_SOFT_PARAMS) {
5295 ret = t4_sge_init(adap);
5296 if (ret < 0)
5297 goto bye;
5298 }
5299
9a4da2cd
VP
5300 if (is_bypass_device(adap->pdev->device))
5301 adap->params.bypass = 1;
5302
636f9d37
VP
5303 /*
5304 * Grab some of our basic fundamental operating parameters.
5305 */
5306#define FW_PARAM_DEV(param) \
5307 (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
5308 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
5309
b8ff05a9 5310#define FW_PARAM_PFVF(param) \
636f9d37
VP
5311 FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
5312 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param)| \
5313 FW_PARAMS_PARAM_Y(0) | \
5314 FW_PARAMS_PARAM_Z(0)
b8ff05a9 5315
636f9d37 5316 params[0] = FW_PARAM_PFVF(EQ_START);
b8ff05a9
DM
5317 params[1] = FW_PARAM_PFVF(L2T_START);
5318 params[2] = FW_PARAM_PFVF(L2T_END);
5319 params[3] = FW_PARAM_PFVF(FILTER_START);
5320 params[4] = FW_PARAM_PFVF(FILTER_END);
e46dab4d 5321 params[5] = FW_PARAM_PFVF(IQFLINT_START);
636f9d37 5322 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6, params, val);
b8ff05a9
DM
5323 if (ret < 0)
5324 goto bye;
636f9d37
VP
5325 adap->sge.egr_start = val[0];
5326 adap->l2t_start = val[1];
5327 adap->l2t_end = val[2];
b8ff05a9
DM
5328 adap->tids.ftid_base = val[3];
5329 adap->tids.nftids = val[4] - val[3] + 1;
e46dab4d 5330 adap->sge.ingr_start = val[5];
b8ff05a9 5331
636f9d37
VP
5332 /* query params related to active filter region */
5333 params[0] = FW_PARAM_PFVF(ACTIVE_FILTER_START);
5334 params[1] = FW_PARAM_PFVF(ACTIVE_FILTER_END);
5335 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, params, val);
5336 /* If Active filter size is set we enable establishing
5337 * offload connection through firmware work request
5338 */
5339 if ((val[0] != val[1]) && (ret >= 0)) {
5340 adap->flags |= FW_OFLD_CONN;
5341 adap->tids.aftid_base = val[0];
5342 adap->tids.aftid_end = val[1];
5343 }
5344
b407a4a9
VP
5345 /* If we're running on newer firmware, let it know that we're
5346 * prepared to deal with encapsulated CPL messages. Older
5347 * firmware won't understand this and we'll just get
5348 * unencapsulated messages ...
5349 */
5350 params[0] = FW_PARAM_PFVF(CPLFW4MSG_ENCAP);
5351 val[0] = 1;
5352 (void) t4_set_params(adap, adap->mbox, adap->fn, 0, 1, params, val);
5353
636f9d37
VP
5354 /*
5355 * Get device capabilities so we can determine what resources we need
5356 * to manage.
5357 */
5358 memset(&caps_cmd, 0, sizeof(caps_cmd));
9a4da2cd 5359 caps_cmd.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
13ee15d3 5360 FW_CMD_REQUEST | FW_CMD_READ);
ce91a923 5361 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
636f9d37
VP
5362 ret = t4_wr_mbox(adap, adap->mbox, &caps_cmd, sizeof(caps_cmd),
5363 &caps_cmd);
5364 if (ret < 0)
5365 goto bye;
5366
13ee15d3 5367 if (caps_cmd.ofldcaps) {
b8ff05a9
DM
5368 /* query offload-related parameters */
5369 params[0] = FW_PARAM_DEV(NTID);
5370 params[1] = FW_PARAM_PFVF(SERVER_START);
5371 params[2] = FW_PARAM_PFVF(SERVER_END);
5372 params[3] = FW_PARAM_PFVF(TDDP_START);
5373 params[4] = FW_PARAM_PFVF(TDDP_END);
5374 params[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
636f9d37
VP
5375 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6,
5376 params, val);
b8ff05a9
DM
5377 if (ret < 0)
5378 goto bye;
5379 adap->tids.ntids = val[0];
5380 adap->tids.natids = min(adap->tids.ntids / 2, MAX_ATIDS);
5381 adap->tids.stid_base = val[1];
5382 adap->tids.nstids = val[2] - val[1] + 1;
636f9d37
VP
5383 /*
5384 * Setup server filter region. Divide the availble filter
5385 * region into two parts. Regular filters get 1/3rd and server
5386 * filters get 2/3rd part. This is only enabled if workarond
5387 * path is enabled.
5388 * 1. For regular filters.
5389 * 2. Server filter: This are special filters which are used
5390 * to redirect SYN packets to offload queue.
5391 */
5392 if (adap->flags & FW_OFLD_CONN && !is_bypass(adap)) {
5393 adap->tids.sftid_base = adap->tids.ftid_base +
5394 DIV_ROUND_UP(adap->tids.nftids, 3);
5395 adap->tids.nsftids = adap->tids.nftids -
5396 DIV_ROUND_UP(adap->tids.nftids, 3);
5397 adap->tids.nftids = adap->tids.sftid_base -
5398 adap->tids.ftid_base;
5399 }
b8ff05a9
DM
5400 adap->vres.ddp.start = val[3];
5401 adap->vres.ddp.size = val[4] - val[3] + 1;
5402 adap->params.ofldq_wr_cred = val[5];
636f9d37 5403
b8ff05a9
DM
5404 adap->params.offload = 1;
5405 }
636f9d37 5406 if (caps_cmd.rdmacaps) {
b8ff05a9
DM
5407 params[0] = FW_PARAM_PFVF(STAG_START);
5408 params[1] = FW_PARAM_PFVF(STAG_END);
5409 params[2] = FW_PARAM_PFVF(RQ_START);
5410 params[3] = FW_PARAM_PFVF(RQ_END);
5411 params[4] = FW_PARAM_PFVF(PBL_START);
5412 params[5] = FW_PARAM_PFVF(PBL_END);
636f9d37
VP
5413 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6,
5414 params, val);
b8ff05a9
DM
5415 if (ret < 0)
5416 goto bye;
5417 adap->vres.stag.start = val[0];
5418 adap->vres.stag.size = val[1] - val[0] + 1;
5419 adap->vres.rq.start = val[2];
5420 adap->vres.rq.size = val[3] - val[2] + 1;
5421 adap->vres.pbl.start = val[4];
5422 adap->vres.pbl.size = val[5] - val[4] + 1;
a0881cab
DM
5423
5424 params[0] = FW_PARAM_PFVF(SQRQ_START);
5425 params[1] = FW_PARAM_PFVF(SQRQ_END);
5426 params[2] = FW_PARAM_PFVF(CQ_START);
5427 params[3] = FW_PARAM_PFVF(CQ_END);
1ae970e0
DM
5428 params[4] = FW_PARAM_PFVF(OCQ_START);
5429 params[5] = FW_PARAM_PFVF(OCQ_END);
636f9d37 5430 ret = t4_query_params(adap, 0, 0, 0, 6, params, val);
a0881cab
DM
5431 if (ret < 0)
5432 goto bye;
5433 adap->vres.qp.start = val[0];
5434 adap->vres.qp.size = val[1] - val[0] + 1;
5435 adap->vres.cq.start = val[2];
5436 adap->vres.cq.size = val[3] - val[2] + 1;
1ae970e0
DM
5437 adap->vres.ocq.start = val[4];
5438 adap->vres.ocq.size = val[5] - val[4] + 1;
b8ff05a9 5439 }
636f9d37 5440 if (caps_cmd.iscsicaps) {
b8ff05a9
DM
5441 params[0] = FW_PARAM_PFVF(ISCSI_START);
5442 params[1] = FW_PARAM_PFVF(ISCSI_END);
636f9d37
VP
5443 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2,
5444 params, val);
b8ff05a9
DM
5445 if (ret < 0)
5446 goto bye;
5447 adap->vres.iscsi.start = val[0];
5448 adap->vres.iscsi.size = val[1] - val[0] + 1;
5449 }
5450#undef FW_PARAM_PFVF
5451#undef FW_PARAM_DEV
5452
636f9d37
VP
5453 /*
5454 * These are finalized by FW initialization, load their values now.
5455 */
b8ff05a9
DM
5456 v = t4_read_reg(adap, TP_TIMER_RESOLUTION);
5457 adap->params.tp.tre = TIMERRESOLUTION_GET(v);
636f9d37 5458 adap->params.tp.dack_re = DELAYEDACKRESOLUTION_GET(v);
b8ff05a9
DM
5459 t4_read_mtu_tbl(adap, adap->params.mtus, NULL);
5460 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
5461 adap->params.b_wnd);
7ee9ff94 5462
636f9d37
VP
5463 /* MODQ_REQ_MAP defaults to setting queues 0-3 to chan 0-3 */
5464 for (j = 0; j < NCHAN; j++)
5465 adap->params.tp.tx_modq[j] = j;
7ee9ff94 5466
793dad94
VP
5467 t4_read_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
5468 &adap->filter_mode, 1,
5469 TP_VLAN_PRI_MAP);
5470
636f9d37 5471 adap->flags |= FW_OK;
b8ff05a9
DM
5472 return 0;
5473
5474 /*
636f9d37
VP
5475 * Something bad happened. If a command timed out or failed with EIO
5476 * FW does not operate within its spec or something catastrophic
5477 * happened to HW/FW, stop issuing commands.
b8ff05a9 5478 */
636f9d37
VP
5479bye:
5480 if (ret != -ETIMEDOUT && ret != -EIO)
5481 t4_fw_bye(adap, adap->mbox);
b8ff05a9
DM
5482 return ret;
5483}
5484
204dc3c0
DM
5485/* EEH callbacks */
5486
5487static pci_ers_result_t eeh_err_detected(struct pci_dev *pdev,
5488 pci_channel_state_t state)
5489{
5490 int i;
5491 struct adapter *adap = pci_get_drvdata(pdev);
5492
5493 if (!adap)
5494 goto out;
5495
5496 rtnl_lock();
5497 adap->flags &= ~FW_OK;
5498 notify_ulds(adap, CXGB4_STATE_START_RECOVERY);
5499 for_each_port(adap, i) {
5500 struct net_device *dev = adap->port[i];
5501
5502 netif_device_detach(dev);
5503 netif_carrier_off(dev);
5504 }
5505 if (adap->flags & FULL_INIT_DONE)
5506 cxgb_down(adap);
5507 rtnl_unlock();
5508 pci_disable_device(pdev);
5509out: return state == pci_channel_io_perm_failure ?
5510 PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET;
5511}
5512
5513static pci_ers_result_t eeh_slot_reset(struct pci_dev *pdev)
5514{
5515 int i, ret;
5516 struct fw_caps_config_cmd c;
5517 struct adapter *adap = pci_get_drvdata(pdev);
5518
5519 if (!adap) {
5520 pci_restore_state(pdev);
5521 pci_save_state(pdev);
5522 return PCI_ERS_RESULT_RECOVERED;
5523 }
5524
5525 if (pci_enable_device(pdev)) {
5526 dev_err(&pdev->dev, "cannot reenable PCI device after reset\n");
5527 return PCI_ERS_RESULT_DISCONNECT;
5528 }
5529
5530 pci_set_master(pdev);
5531 pci_restore_state(pdev);
5532 pci_save_state(pdev);
5533 pci_cleanup_aer_uncorrect_error_status(pdev);
5534
5535 if (t4_wait_dev_ready(adap) < 0)
5536 return PCI_ERS_RESULT_DISCONNECT;
777c2300 5537 if (t4_fw_hello(adap, adap->fn, adap->fn, MASTER_MUST, NULL) < 0)
204dc3c0
DM
5538 return PCI_ERS_RESULT_DISCONNECT;
5539 adap->flags |= FW_OK;
5540 if (adap_init1(adap, &c))
5541 return PCI_ERS_RESULT_DISCONNECT;
5542
5543 for_each_port(adap, i) {
5544 struct port_info *p = adap2pinfo(adap, i);
5545
060e0c75
DM
5546 ret = t4_alloc_vi(adap, adap->fn, p->tx_chan, adap->fn, 0, 1,
5547 NULL, NULL);
204dc3c0
DM
5548 if (ret < 0)
5549 return PCI_ERS_RESULT_DISCONNECT;
5550 p->viid = ret;
5551 p->xact_addr_filt = -1;
5552 }
5553
5554 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
5555 adap->params.b_wnd);
1ae970e0 5556 setup_memwin(adap);
204dc3c0
DM
5557 if (cxgb_up(adap))
5558 return PCI_ERS_RESULT_DISCONNECT;
5559 return PCI_ERS_RESULT_RECOVERED;
5560}
5561
5562static void eeh_resume(struct pci_dev *pdev)
5563{
5564 int i;
5565 struct adapter *adap = pci_get_drvdata(pdev);
5566
5567 if (!adap)
5568 return;
5569
5570 rtnl_lock();
5571 for_each_port(adap, i) {
5572 struct net_device *dev = adap->port[i];
5573
5574 if (netif_running(dev)) {
5575 link_start(dev);
5576 cxgb_set_rxmode(dev);
5577 }
5578 netif_device_attach(dev);
5579 }
5580 rtnl_unlock();
5581}
5582
3646f0e5 5583static const struct pci_error_handlers cxgb4_eeh = {
204dc3c0
DM
5584 .error_detected = eeh_err_detected,
5585 .slot_reset = eeh_slot_reset,
5586 .resume = eeh_resume,
5587};
5588
b8ff05a9
DM
5589static inline bool is_10g_port(const struct link_config *lc)
5590{
5591 return (lc->supported & FW_PORT_CAP_SPEED_10G) != 0;
5592}
5593
5594static inline void init_rspq(struct sge_rspq *q, u8 timer_idx, u8 pkt_cnt_idx,
5595 unsigned int size, unsigned int iqe_size)
5596{
5597 q->intr_params = QINTR_TIMER_IDX(timer_idx) |
5598 (pkt_cnt_idx < SGE_NCOUNTERS ? QINTR_CNT_EN : 0);
5599 q->pktcnt_idx = pkt_cnt_idx < SGE_NCOUNTERS ? pkt_cnt_idx : 0;
5600 q->iqe_len = iqe_size;
5601 q->size = size;
5602}
5603
5604/*
5605 * Perform default configuration of DMA queues depending on the number and type
5606 * of ports we found and the number of available CPUs. Most settings can be
5607 * modified by the admin prior to actual use.
5608 */
91744948 5609static void cfg_queues(struct adapter *adap)
b8ff05a9
DM
5610{
5611 struct sge *s = &adap->sge;
5612 int i, q10g = 0, n10g = 0, qidx = 0;
5613
5614 for_each_port(adap, i)
5615 n10g += is_10g_port(&adap2pinfo(adap, i)->link_cfg);
5616
5617 /*
5618 * We default to 1 queue per non-10G port and up to # of cores queues
5619 * per 10G port.
5620 */
5621 if (n10g)
5622 q10g = (MAX_ETH_QSETS - (adap->params.nports - n10g)) / n10g;
5952dde7
YM
5623 if (q10g > netif_get_num_default_rss_queues())
5624 q10g = netif_get_num_default_rss_queues();
b8ff05a9
DM
5625
5626 for_each_port(adap, i) {
5627 struct port_info *pi = adap2pinfo(adap, i);
5628
5629 pi->first_qset = qidx;
5630 pi->nqsets = is_10g_port(&pi->link_cfg) ? q10g : 1;
5631 qidx += pi->nqsets;
5632 }
5633
5634 s->ethqsets = qidx;
5635 s->max_ethqsets = qidx; /* MSI-X may lower it later */
5636
5637 if (is_offload(adap)) {
5638 /*
5639 * For offload we use 1 queue/channel if all ports are up to 1G,
5640 * otherwise we divide all available queues amongst the channels
5641 * capped by the number of available cores.
5642 */
5643 if (n10g) {
5644 i = min_t(int, ARRAY_SIZE(s->ofldrxq),
5645 num_online_cpus());
5646 s->ofldqsets = roundup(i, adap->params.nports);
5647 } else
5648 s->ofldqsets = adap->params.nports;
5649 /* For RDMA one Rx queue per channel suffices */
5650 s->rdmaqs = adap->params.nports;
5651 }
5652
5653 for (i = 0; i < ARRAY_SIZE(s->ethrxq); i++) {
5654 struct sge_eth_rxq *r = &s->ethrxq[i];
5655
5656 init_rspq(&r->rspq, 0, 0, 1024, 64);
5657 r->fl.size = 72;
5658 }
5659
5660 for (i = 0; i < ARRAY_SIZE(s->ethtxq); i++)
5661 s->ethtxq[i].q.size = 1024;
5662
5663 for (i = 0; i < ARRAY_SIZE(s->ctrlq); i++)
5664 s->ctrlq[i].q.size = 512;
5665
5666 for (i = 0; i < ARRAY_SIZE(s->ofldtxq); i++)
5667 s->ofldtxq[i].q.size = 1024;
5668
5669 for (i = 0; i < ARRAY_SIZE(s->ofldrxq); i++) {
5670 struct sge_ofld_rxq *r = &s->ofldrxq[i];
5671
5672 init_rspq(&r->rspq, 0, 0, 1024, 64);
5673 r->rspq.uld = CXGB4_ULD_ISCSI;
5674 r->fl.size = 72;
5675 }
5676
5677 for (i = 0; i < ARRAY_SIZE(s->rdmarxq); i++) {
5678 struct sge_ofld_rxq *r = &s->rdmarxq[i];
5679
5680 init_rspq(&r->rspq, 0, 0, 511, 64);
5681 r->rspq.uld = CXGB4_ULD_RDMA;
5682 r->fl.size = 72;
5683 }
5684
5685 init_rspq(&s->fw_evtq, 6, 0, 512, 64);
5686 init_rspq(&s->intrq, 6, 0, 2 * MAX_INGQ, 64);
5687}
5688
5689/*
5690 * Reduce the number of Ethernet queues across all ports to at most n.
5691 * n provides at least one queue per port.
5692 */
91744948 5693static void reduce_ethqs(struct adapter *adap, int n)
b8ff05a9
DM
5694{
5695 int i;
5696 struct port_info *pi;
5697
5698 while (n < adap->sge.ethqsets)
5699 for_each_port(adap, i) {
5700 pi = adap2pinfo(adap, i);
5701 if (pi->nqsets > 1) {
5702 pi->nqsets--;
5703 adap->sge.ethqsets--;
5704 if (adap->sge.ethqsets <= n)
5705 break;
5706 }
5707 }
5708
5709 n = 0;
5710 for_each_port(adap, i) {
5711 pi = adap2pinfo(adap, i);
5712 pi->first_qset = n;
5713 n += pi->nqsets;
5714 }
5715}
5716
5717/* 2 MSI-X vectors needed for the FW queue and non-data interrupts */
5718#define EXTRA_VECS 2
5719
91744948 5720static int enable_msix(struct adapter *adap)
b8ff05a9
DM
5721{
5722 int ofld_need = 0;
5723 int i, err, want, need;
5724 struct sge *s = &adap->sge;
5725 unsigned int nchan = adap->params.nports;
5726 struct msix_entry entries[MAX_INGQ + 1];
5727
5728 for (i = 0; i < ARRAY_SIZE(entries); ++i)
5729 entries[i].entry = i;
5730
5731 want = s->max_ethqsets + EXTRA_VECS;
5732 if (is_offload(adap)) {
5733 want += s->rdmaqs + s->ofldqsets;
5734 /* need nchan for each possible ULD */
5735 ofld_need = 2 * nchan;
5736 }
5737 need = adap->params.nports + EXTRA_VECS + ofld_need;
5738
5739 while ((err = pci_enable_msix(adap->pdev, entries, want)) >= need)
5740 want = err;
5741
5742 if (!err) {
5743 /*
5744 * Distribute available vectors to the various queue groups.
5745 * Every group gets its minimum requirement and NIC gets top
5746 * priority for leftovers.
5747 */
5748 i = want - EXTRA_VECS - ofld_need;
5749 if (i < s->max_ethqsets) {
5750 s->max_ethqsets = i;
5751 if (i < s->ethqsets)
5752 reduce_ethqs(adap, i);
5753 }
5754 if (is_offload(adap)) {
5755 i = want - EXTRA_VECS - s->max_ethqsets;
5756 i -= ofld_need - nchan;
5757 s->ofldqsets = (i / nchan) * nchan; /* round down */
5758 }
5759 for (i = 0; i < want; ++i)
5760 adap->msix_info[i].vec = entries[i].vector;
5761 } else if (err > 0)
5762 dev_info(adap->pdev_dev,
5763 "only %d MSI-X vectors left, not using MSI-X\n", err);
5764 return err;
5765}
5766
5767#undef EXTRA_VECS
5768
91744948 5769static int init_rss(struct adapter *adap)
671b0060
DM
5770{
5771 unsigned int i, j;
5772
5773 for_each_port(adap, i) {
5774 struct port_info *pi = adap2pinfo(adap, i);
5775
5776 pi->rss = kcalloc(pi->rss_size, sizeof(u16), GFP_KERNEL);
5777 if (!pi->rss)
5778 return -ENOMEM;
5779 for (j = 0; j < pi->rss_size; j++)
278bc429 5780 pi->rss[j] = ethtool_rxfh_indir_default(j, pi->nqsets);
671b0060
DM
5781 }
5782 return 0;
5783}
5784
91744948 5785static void print_port_info(const struct net_device *dev)
b8ff05a9
DM
5786{
5787 static const char *base[] = {
a0881cab 5788 "R XFI", "R XAUI", "T SGMII", "T XFI", "T XAUI", "KX4", "CX4",
7d5e77aa 5789 "KX", "KR", "R SFP+", "KR/KX", "KR/KX/KX4"
b8ff05a9
DM
5790 };
5791
b8ff05a9 5792 char buf[80];
118969ed 5793 char *bufp = buf;
f1a051b9 5794 const char *spd = "";
118969ed
DM
5795 const struct port_info *pi = netdev_priv(dev);
5796 const struct adapter *adap = pi->adapter;
f1a051b9
DM
5797
5798 if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_2_5GB)
5799 spd = " 2.5 GT/s";
5800 else if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_5_0GB)
5801 spd = " 5 GT/s";
b8ff05a9 5802
118969ed
DM
5803 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_100M)
5804 bufp += sprintf(bufp, "100/");
5805 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_1G)
5806 bufp += sprintf(bufp, "1000/");
5807 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_10G)
5808 bufp += sprintf(bufp, "10G/");
5809 if (bufp != buf)
5810 --bufp;
5811 sprintf(bufp, "BASE-%s", base[pi->port_type]);
5812
5813 netdev_info(dev, "Chelsio %s rev %d %s %sNIC PCIe x%d%s%s\n",
0a57a536 5814 adap->params.vpd.id,
d14807dd 5815 CHELSIO_CHIP_RELEASE(adap->params.chip), buf,
118969ed
DM
5816 is_offload(adap) ? "R" : "", adap->params.pci.width, spd,
5817 (adap->flags & USING_MSIX) ? " MSI-X" :
5818 (adap->flags & USING_MSI) ? " MSI" : "");
5819 netdev_info(dev, "S/N: %s, E/C: %s\n",
5820 adap->params.vpd.sn, adap->params.vpd.ec);
b8ff05a9
DM
5821}
5822
91744948 5823static void enable_pcie_relaxed_ordering(struct pci_dev *dev)
ef306b50 5824{
e5c8ae5f 5825 pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_RELAX_EN);
ef306b50
DM
5826}
5827
06546391
DM
5828/*
5829 * Free the following resources:
5830 * - memory used for tables
5831 * - MSI/MSI-X
5832 * - net devices
5833 * - resources FW is holding for us
5834 */
5835static void free_some_resources(struct adapter *adapter)
5836{
5837 unsigned int i;
5838
5839 t4_free_mem(adapter->l2t);
5840 t4_free_mem(adapter->tids.tid_tab);
5841 disable_msi(adapter);
5842
5843 for_each_port(adapter, i)
671b0060
DM
5844 if (adapter->port[i]) {
5845 kfree(adap2pinfo(adapter, i)->rss);
06546391 5846 free_netdev(adapter->port[i]);
671b0060 5847 }
06546391 5848 if (adapter->flags & FW_OK)
060e0c75 5849 t4_fw_bye(adapter, adapter->fn);
06546391
DM
5850}
5851
2ed28baa 5852#define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
35d35682 5853#define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \
b8ff05a9 5854 NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
22adfe0a 5855#define SEGMENT_SIZE 128
b8ff05a9 5856
1dd06ae8 5857static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
b8ff05a9 5858{
22adfe0a 5859 int func, i, err, s_qpp, qpp, num_seg;
b8ff05a9 5860 struct port_info *pi;
c8f44aff 5861 bool highdma = false;
b8ff05a9
DM
5862 struct adapter *adapter = NULL;
5863
5864 printk_once(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
5865
5866 err = pci_request_regions(pdev, KBUILD_MODNAME);
5867 if (err) {
5868 /* Just info, some other driver may have claimed the device. */
5869 dev_info(&pdev->dev, "cannot obtain PCI resources\n");
5870 return err;
5871 }
5872
060e0c75 5873 /* We control everything through one PF */
b8ff05a9 5874 func = PCI_FUNC(pdev->devfn);
060e0c75 5875 if (func != ent->driver_data) {
204dc3c0 5876 pci_save_state(pdev); /* to restore SR-IOV later */
b8ff05a9 5877 goto sriov;
204dc3c0 5878 }
b8ff05a9
DM
5879
5880 err = pci_enable_device(pdev);
5881 if (err) {
5882 dev_err(&pdev->dev, "cannot enable PCI device\n");
5883 goto out_release_regions;
5884 }
5885
5886 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
c8f44aff 5887 highdma = true;
b8ff05a9
DM
5888 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
5889 if (err) {
5890 dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
5891 "coherent allocations\n");
5892 goto out_disable_device;
5893 }
5894 } else {
5895 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
5896 if (err) {
5897 dev_err(&pdev->dev, "no usable DMA configuration\n");
5898 goto out_disable_device;
5899 }
5900 }
5901
5902 pci_enable_pcie_error_reporting(pdev);
ef306b50 5903 enable_pcie_relaxed_ordering(pdev);
b8ff05a9
DM
5904 pci_set_master(pdev);
5905 pci_save_state(pdev);
5906
5907 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
5908 if (!adapter) {
5909 err = -ENOMEM;
5910 goto out_disable_device;
5911 }
5912
5913 adapter->regs = pci_ioremap_bar(pdev, 0);
5914 if (!adapter->regs) {
5915 dev_err(&pdev->dev, "cannot map device registers\n");
5916 err = -ENOMEM;
5917 goto out_free_adapter;
5918 }
5919
5920 adapter->pdev = pdev;
5921 adapter->pdev_dev = &pdev->dev;
3069ee9b 5922 adapter->mbox = func;
060e0c75 5923 adapter->fn = func;
b8ff05a9
DM
5924 adapter->msg_enable = dflt_msg_enable;
5925 memset(adapter->chan_map, 0xff, sizeof(adapter->chan_map));
5926
5927 spin_lock_init(&adapter->stats_lock);
5928 spin_lock_init(&adapter->tid_release_lock);
5929
5930 INIT_WORK(&adapter->tid_release_task, process_tid_release_list);
881806bc
VP
5931 INIT_WORK(&adapter->db_full_task, process_db_full);
5932 INIT_WORK(&adapter->db_drop_task, process_db_drop);
b8ff05a9
DM
5933
5934 err = t4_prep_adapter(adapter);
5935 if (err)
22adfe0a
SR
5936 goto out_unmap_bar0;
5937
d14807dd 5938 if (!is_t4(adapter->params.chip)) {
22adfe0a
SR
5939 s_qpp = QUEUESPERPAGEPF1 * adapter->fn;
5940 qpp = 1 << QUEUESPERPAGEPF0_GET(t4_read_reg(adapter,
5941 SGE_EGRESS_QUEUES_PER_PAGE_PF) >> s_qpp);
5942 num_seg = PAGE_SIZE / SEGMENT_SIZE;
5943
5944 /* Each segment size is 128B. Write coalescing is enabled only
5945 * when SGE_EGRESS_QUEUES_PER_PAGE_PF reg value for the
5946 * queue is less no of segments that can be accommodated in
5947 * a page size.
5948 */
5949 if (qpp > num_seg) {
5950 dev_err(&pdev->dev,
5951 "Incorrect number of egress queues per page\n");
5952 err = -EINVAL;
5953 goto out_unmap_bar0;
5954 }
5955 adapter->bar2 = ioremap_wc(pci_resource_start(pdev, 2),
5956 pci_resource_len(pdev, 2));
5957 if (!adapter->bar2) {
5958 dev_err(&pdev->dev, "cannot map device bar2 region\n");
5959 err = -ENOMEM;
5960 goto out_unmap_bar0;
5961 }
5962 }
5963
636f9d37 5964 setup_memwin(adapter);
b8ff05a9 5965 err = adap_init0(adapter);
636f9d37 5966 setup_memwin_rdma(adapter);
b8ff05a9
DM
5967 if (err)
5968 goto out_unmap_bar;
5969
5970 for_each_port(adapter, i) {
5971 struct net_device *netdev;
5972
5973 netdev = alloc_etherdev_mq(sizeof(struct port_info),
5974 MAX_ETH_QSETS);
5975 if (!netdev) {
5976 err = -ENOMEM;
5977 goto out_free_dev;
5978 }
5979
5980 SET_NETDEV_DEV(netdev, &pdev->dev);
5981
5982 adapter->port[i] = netdev;
5983 pi = netdev_priv(netdev);
5984 pi->adapter = adapter;
5985 pi->xact_addr_filt = -1;
b8ff05a9 5986 pi->port_id = i;
b8ff05a9
DM
5987 netdev->irq = pdev->irq;
5988
2ed28baa
MM
5989 netdev->hw_features = NETIF_F_SG | TSO_FLAGS |
5990 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
5991 NETIF_F_RXCSUM | NETIF_F_RXHASH |
f646968f 5992 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
c8f44aff
MM
5993 if (highdma)
5994 netdev->hw_features |= NETIF_F_HIGHDMA;
5995 netdev->features |= netdev->hw_features;
b8ff05a9
DM
5996 netdev->vlan_features = netdev->features & VLAN_FEAT;
5997
01789349
JP
5998 netdev->priv_flags |= IFF_UNICAST_FLT;
5999
b8ff05a9
DM
6000 netdev->netdev_ops = &cxgb4_netdev_ops;
6001 SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops);
6002 }
6003
6004 pci_set_drvdata(pdev, adapter);
6005
6006 if (adapter->flags & FW_OK) {
060e0c75 6007 err = t4_port_init(adapter, func, func, 0);
b8ff05a9
DM
6008 if (err)
6009 goto out_free_dev;
6010 }
6011
6012 /*
6013 * Configure queues and allocate tables now, they can be needed as
6014 * soon as the first register_netdev completes.
6015 */
6016 cfg_queues(adapter);
6017
6018 adapter->l2t = t4_init_l2t();
6019 if (!adapter->l2t) {
6020 /* We tolerate a lack of L2T, giving up some functionality */
6021 dev_warn(&pdev->dev, "could not allocate L2T, continuing\n");
6022 adapter->params.offload = 0;
6023 }
6024
6025 if (is_offload(adapter) && tid_init(&adapter->tids) < 0) {
6026 dev_warn(&pdev->dev, "could not allocate TID table, "
6027 "continuing\n");
6028 adapter->params.offload = 0;
6029 }
6030
f7cabcdd
DM
6031 /* See what interrupts we'll be using */
6032 if (msi > 1 && enable_msix(adapter) == 0)
6033 adapter->flags |= USING_MSIX;
6034 else if (msi > 0 && pci_enable_msi(pdev) == 0)
6035 adapter->flags |= USING_MSI;
6036
671b0060
DM
6037 err = init_rss(adapter);
6038 if (err)
6039 goto out_free_dev;
6040
b8ff05a9
DM
6041 /*
6042 * The card is now ready to go. If any errors occur during device
6043 * registration we do not fail the whole card but rather proceed only
6044 * with the ports we manage to register successfully. However we must
6045 * register at least one net device.
6046 */
6047 for_each_port(adapter, i) {
a57cabe0
DM
6048 pi = adap2pinfo(adapter, i);
6049 netif_set_real_num_tx_queues(adapter->port[i], pi->nqsets);
6050 netif_set_real_num_rx_queues(adapter->port[i], pi->nqsets);
6051
b8ff05a9
DM
6052 err = register_netdev(adapter->port[i]);
6053 if (err)
b1a3c2b6 6054 break;
b1a3c2b6
DM
6055 adapter->chan_map[pi->tx_chan] = i;
6056 print_port_info(adapter->port[i]);
b8ff05a9 6057 }
b1a3c2b6 6058 if (i == 0) {
b8ff05a9
DM
6059 dev_err(&pdev->dev, "could not register any net devices\n");
6060 goto out_free_dev;
6061 }
b1a3c2b6
DM
6062 if (err) {
6063 dev_warn(&pdev->dev, "only %d net devices registered\n", i);
6064 err = 0;
6403eab1 6065 }
b8ff05a9
DM
6066
6067 if (cxgb4_debugfs_root) {
6068 adapter->debugfs_root = debugfs_create_dir(pci_name(pdev),
6069 cxgb4_debugfs_root);
6070 setup_debugfs(adapter);
6071 }
6072
6482aa7c
DLR
6073 /* PCIe EEH recovery on powerpc platforms needs fundamental reset */
6074 pdev->needs_freset = 1;
6075
b8ff05a9
DM
6076 if (is_offload(adapter))
6077 attach_ulds(adapter);
6078
b8ff05a9
DM
6079sriov:
6080#ifdef CONFIG_PCI_IOV
7d6727cf 6081 if (func < ARRAY_SIZE(num_vf) && num_vf[func] > 0)
b8ff05a9
DM
6082 if (pci_enable_sriov(pdev, num_vf[func]) == 0)
6083 dev_info(&pdev->dev,
6084 "instantiated %u virtual functions\n",
6085 num_vf[func]);
6086#endif
6087 return 0;
6088
6089 out_free_dev:
06546391 6090 free_some_resources(adapter);
b8ff05a9 6091 out_unmap_bar:
d14807dd 6092 if (!is_t4(adapter->params.chip))
22adfe0a
SR
6093 iounmap(adapter->bar2);
6094 out_unmap_bar0:
b8ff05a9
DM
6095 iounmap(adapter->regs);
6096 out_free_adapter:
6097 kfree(adapter);
6098 out_disable_device:
6099 pci_disable_pcie_error_reporting(pdev);
6100 pci_disable_device(pdev);
6101 out_release_regions:
6102 pci_release_regions(pdev);
b8ff05a9
DM
6103 return err;
6104}
6105
91744948 6106static void remove_one(struct pci_dev *pdev)
b8ff05a9
DM
6107{
6108 struct adapter *adapter = pci_get_drvdata(pdev);
6109
636f9d37 6110#ifdef CONFIG_PCI_IOV
b8ff05a9
DM
6111 pci_disable_sriov(pdev);
6112
636f9d37
VP
6113#endif
6114
b8ff05a9
DM
6115 if (adapter) {
6116 int i;
6117
6118 if (is_offload(adapter))
6119 detach_ulds(adapter);
6120
6121 for_each_port(adapter, i)
8f3a7676 6122 if (adapter->port[i]->reg_state == NETREG_REGISTERED)
b8ff05a9
DM
6123 unregister_netdev(adapter->port[i]);
6124
6125 if (adapter->debugfs_root)
6126 debugfs_remove_recursive(adapter->debugfs_root);
6127
f2b7e78d
VP
6128 /* If we allocated filters, free up state associated with any
6129 * valid filters ...
6130 */
6131 if (adapter->tids.ftid_tab) {
6132 struct filter_entry *f = &adapter->tids.ftid_tab[0];
dca4faeb
VP
6133 for (i = 0; i < (adapter->tids.nftids +
6134 adapter->tids.nsftids); i++, f++)
f2b7e78d
VP
6135 if (f->valid)
6136 clear_filter(adapter, f);
6137 }
6138
aaefae9b
DM
6139 if (adapter->flags & FULL_INIT_DONE)
6140 cxgb_down(adapter);
b8ff05a9 6141
06546391 6142 free_some_resources(adapter);
b8ff05a9 6143 iounmap(adapter->regs);
d14807dd 6144 if (!is_t4(adapter->params.chip))
22adfe0a 6145 iounmap(adapter->bar2);
b8ff05a9
DM
6146 kfree(adapter);
6147 pci_disable_pcie_error_reporting(pdev);
6148 pci_disable_device(pdev);
6149 pci_release_regions(pdev);
a069ec91 6150 } else
b8ff05a9
DM
6151 pci_release_regions(pdev);
6152}
6153
6154static struct pci_driver cxgb4_driver = {
6155 .name = KBUILD_MODNAME,
6156 .id_table = cxgb4_pci_tbl,
6157 .probe = init_one,
91744948 6158 .remove = remove_one,
204dc3c0 6159 .err_handler = &cxgb4_eeh,
b8ff05a9
DM
6160};
6161
6162static int __init cxgb4_init_module(void)
6163{
6164 int ret;
6165
3069ee9b
VP
6166 workq = create_singlethread_workqueue("cxgb4");
6167 if (!workq)
6168 return -ENOMEM;
6169
b8ff05a9
DM
6170 /* Debugfs support is optional, just warn if this fails */
6171 cxgb4_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
6172 if (!cxgb4_debugfs_root)
428ac43f 6173 pr_warn("could not create debugfs entry, continuing\n");
b8ff05a9
DM
6174
6175 ret = pci_register_driver(&cxgb4_driver);
73a695f8 6176 if (ret < 0) {
b8ff05a9 6177 debugfs_remove(cxgb4_debugfs_root);
73a695f8
WY
6178 destroy_workqueue(workq);
6179 }
01bcca68
VP
6180
6181 register_inet6addr_notifier(&cxgb4_inet6addr_notifier);
6182
b8ff05a9
DM
6183 return ret;
6184}
6185
6186static void __exit cxgb4_cleanup_module(void)
6187{
01bcca68 6188 unregister_inet6addr_notifier(&cxgb4_inet6addr_notifier);
b8ff05a9
DM
6189 pci_unregister_driver(&cxgb4_driver);
6190 debugfs_remove(cxgb4_debugfs_root); /* NULL ok */
3069ee9b
VP
6191 flush_workqueue(workq);
6192 destroy_workqueue(workq);
b8ff05a9
DM
6193}
6194
6195module_init(cxgb4_init_module);
6196module_exit(cxgb4_cleanup_module);