]> git.ipfire.org Git - thirdparty/linux.git/blame - drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
cxgb4: Add routines to create and remove listening IPv6 servers
[thirdparty/linux.git] / drivers / net / ethernet / chelsio / cxgb4 / cxgb4_main.c
CommitLineData
b8ff05a9
DM
1/*
2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
3 *
4 * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
36
37#include <linux/bitmap.h>
38#include <linux/crc32.h>
39#include <linux/ctype.h>
40#include <linux/debugfs.h>
41#include <linux/err.h>
42#include <linux/etherdevice.h>
43#include <linux/firmware.h>
01789349 44#include <linux/if.h>
b8ff05a9
DM
45#include <linux/if_vlan.h>
46#include <linux/init.h>
47#include <linux/log2.h>
48#include <linux/mdio.h>
49#include <linux/module.h>
50#include <linux/moduleparam.h>
51#include <linux/mutex.h>
52#include <linux/netdevice.h>
53#include <linux/pci.h>
54#include <linux/aer.h>
55#include <linux/rtnetlink.h>
56#include <linux/sched.h>
57#include <linux/seq_file.h>
58#include <linux/sockios.h>
59#include <linux/vmalloc.h>
60#include <linux/workqueue.h>
61#include <net/neighbour.h>
62#include <net/netevent.h>
63#include <asm/uaccess.h>
64
65#include "cxgb4.h"
66#include "t4_regs.h"
67#include "t4_msg.h"
68#include "t4fw_api.h"
69#include "l2t.h"
70
3a7f8554
SR
71#define DRV_VERSION "2.0.0-ko"
72#define DRV_DESC "Chelsio T4/T5 Network Driver"
b8ff05a9
DM
73
74/*
75 * Max interrupt hold-off timer value in us. Queues fall back to this value
76 * under extreme memory pressure so it's largish to give the system time to
77 * recover.
78 */
79#define MAX_SGE_TIMERVAL 200U
80
7ee9ff94 81enum {
13ee15d3
VP
82 /*
83 * Physical Function provisioning constants.
84 */
85 PFRES_NVI = 4, /* # of Virtual Interfaces */
86 PFRES_NETHCTRL = 128, /* # of EQs used for ETH or CTRL Qs */
87 PFRES_NIQFLINT = 128, /* # of ingress Qs/w Free List(s)/intr
88 */
89 PFRES_NEQ = 256, /* # of egress queues */
90 PFRES_NIQ = 0, /* # of ingress queues */
91 PFRES_TC = 0, /* PCI-E traffic class */
92 PFRES_NEXACTF = 128, /* # of exact MPS filters */
93
94 PFRES_R_CAPS = FW_CMD_CAP_PF,
95 PFRES_WX_CAPS = FW_CMD_CAP_PF,
96
97#ifdef CONFIG_PCI_IOV
98 /*
99 * Virtual Function provisioning constants. We need two extra Ingress
100 * Queues with Interrupt capability to serve as the VF's Firmware
101 * Event Queue and Forwarded Interrupt Queue (when using MSI mode) --
102 * neither will have Free Lists associated with them). For each
103 * Ethernet/Control Egress Queue and for each Free List, we need an
104 * Egress Context.
105 */
7ee9ff94
CL
106 VFRES_NPORTS = 1, /* # of "ports" per VF */
107 VFRES_NQSETS = 2, /* # of "Queue Sets" per VF */
108
109 VFRES_NVI = VFRES_NPORTS, /* # of Virtual Interfaces */
110 VFRES_NETHCTRL = VFRES_NQSETS, /* # of EQs used for ETH or CTRL Qs */
111 VFRES_NIQFLINT = VFRES_NQSETS+2,/* # of ingress Qs/w Free List(s)/intr */
7ee9ff94 112 VFRES_NEQ = VFRES_NQSETS*2, /* # of egress queues */
13ee15d3 113 VFRES_NIQ = 0, /* # of non-fl/int ingress queues */
7ee9ff94
CL
114 VFRES_TC = 0, /* PCI-E traffic class */
115 VFRES_NEXACTF = 16, /* # of exact MPS filters */
116
117 VFRES_R_CAPS = FW_CMD_CAP_DMAQ|FW_CMD_CAP_VF|FW_CMD_CAP_PORT,
118 VFRES_WX_CAPS = FW_CMD_CAP_DMAQ|FW_CMD_CAP_VF,
13ee15d3 119#endif
7ee9ff94
CL
120};
121
122/*
123 * Provide a Port Access Rights Mask for the specified PF/VF. This is very
124 * static and likely not to be useful in the long run. We really need to
125 * implement some form of persistent configuration which the firmware
126 * controls.
127 */
128static unsigned int pfvfres_pmask(struct adapter *adapter,
129 unsigned int pf, unsigned int vf)
130{
131 unsigned int portn, portvec;
132
133 /*
134 * Give PF's access to all of the ports.
135 */
136 if (vf == 0)
137 return FW_PFVF_CMD_PMASK_MASK;
138
139 /*
140 * For VFs, we'll assign them access to the ports based purely on the
141 * PF. We assign active ports in order, wrapping around if there are
142 * fewer active ports than PFs: e.g. active port[pf % nports].
143 * Unfortunately the adapter's port_info structs haven't been
144 * initialized yet so we have to compute this.
145 */
146 if (adapter->params.nports == 0)
147 return 0;
148
149 portn = pf % adapter->params.nports;
150 portvec = adapter->params.portvec;
151 for (;;) {
152 /*
153 * Isolate the lowest set bit in the port vector. If we're at
154 * the port number that we want, return that as the pmask.
155 * otherwise mask that bit out of the port vector and
156 * decrement our port number ...
157 */
158 unsigned int pmask = portvec ^ (portvec & (portvec-1));
159 if (portn == 0)
160 return pmask;
161 portn--;
162 portvec &= ~pmask;
163 }
164 /*NOTREACHED*/
165}
7ee9ff94 166
b8ff05a9
DM
167enum {
168 MAX_TXQ_ENTRIES = 16384,
169 MAX_CTRL_TXQ_ENTRIES = 1024,
170 MAX_RSPQ_ENTRIES = 16384,
171 MAX_RX_BUFFERS = 16384,
172 MIN_TXQ_ENTRIES = 32,
173 MIN_CTRL_TXQ_ENTRIES = 32,
174 MIN_RSPQ_ENTRIES = 128,
175 MIN_FL_ENTRIES = 16
176};
177
f2b7e78d
VP
178/* Host shadow copy of ingress filter entry. This is in host native format
179 * and doesn't match the ordering or bit order, etc. of the hardware of the
180 * firmware command. The use of bit-field structure elements is purely to
181 * remind ourselves of the field size limitations and save memory in the case
182 * where the filter table is large.
183 */
184struct filter_entry {
185 /* Administrative fields for filter.
186 */
187 u32 valid:1; /* filter allocated and valid */
188 u32 locked:1; /* filter is administratively locked */
189
190 u32 pending:1; /* filter action is pending firmware reply */
191 u32 smtidx:8; /* Source MAC Table index for smac */
192 struct l2t_entry *l2t; /* Layer Two Table entry for dmac */
193
194 /* The filter itself. Most of this is a straight copy of information
195 * provided by the extended ioctl(). Some fields are translated to
196 * internal forms -- for instance the Ingress Queue ID passed in from
197 * the ioctl() is translated into the Absolute Ingress Queue ID.
198 */
199 struct ch_filter_specification fs;
200};
201
b8ff05a9
DM
202#define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
203 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
204 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
205
060e0c75 206#define CH_DEVICE(devid, data) { PCI_VDEVICE(CHELSIO, devid), (data) }
b8ff05a9
DM
207
208static DEFINE_PCI_DEVICE_TABLE(cxgb4_pci_tbl) = {
060e0c75 209 CH_DEVICE(0xa000, 0), /* PE10K */
ccea790e
DM
210 CH_DEVICE(0x4001, -1),
211 CH_DEVICE(0x4002, -1),
212 CH_DEVICE(0x4003, -1),
213 CH_DEVICE(0x4004, -1),
214 CH_DEVICE(0x4005, -1),
215 CH_DEVICE(0x4006, -1),
216 CH_DEVICE(0x4007, -1),
217 CH_DEVICE(0x4008, -1),
218 CH_DEVICE(0x4009, -1),
219 CH_DEVICE(0x400a, -1),
220 CH_DEVICE(0x4401, 4),
221 CH_DEVICE(0x4402, 4),
222 CH_DEVICE(0x4403, 4),
223 CH_DEVICE(0x4404, 4),
224 CH_DEVICE(0x4405, 4),
225 CH_DEVICE(0x4406, 4),
226 CH_DEVICE(0x4407, 4),
227 CH_DEVICE(0x4408, 4),
228 CH_DEVICE(0x4409, 4),
229 CH_DEVICE(0x440a, 4),
f637d577
VP
230 CH_DEVICE(0x440d, 4),
231 CH_DEVICE(0x440e, 4),
9ef603a0
VP
232 CH_DEVICE(0x5001, 4),
233 CH_DEVICE(0x5002, 4),
234 CH_DEVICE(0x5003, 4),
235 CH_DEVICE(0x5004, 4),
236 CH_DEVICE(0x5005, 4),
237 CH_DEVICE(0x5006, 4),
238 CH_DEVICE(0x5007, 4),
239 CH_DEVICE(0x5008, 4),
240 CH_DEVICE(0x5009, 4),
241 CH_DEVICE(0x500A, 4),
242 CH_DEVICE(0x500B, 4),
243 CH_DEVICE(0x500C, 4),
244 CH_DEVICE(0x500D, 4),
245 CH_DEVICE(0x500E, 4),
246 CH_DEVICE(0x500F, 4),
247 CH_DEVICE(0x5010, 4),
248 CH_DEVICE(0x5011, 4),
249 CH_DEVICE(0x5012, 4),
250 CH_DEVICE(0x5013, 4),
251 CH_DEVICE(0x5401, 4),
252 CH_DEVICE(0x5402, 4),
253 CH_DEVICE(0x5403, 4),
254 CH_DEVICE(0x5404, 4),
255 CH_DEVICE(0x5405, 4),
256 CH_DEVICE(0x5406, 4),
257 CH_DEVICE(0x5407, 4),
258 CH_DEVICE(0x5408, 4),
259 CH_DEVICE(0x5409, 4),
260 CH_DEVICE(0x540A, 4),
261 CH_DEVICE(0x540B, 4),
262 CH_DEVICE(0x540C, 4),
263 CH_DEVICE(0x540D, 4),
264 CH_DEVICE(0x540E, 4),
265 CH_DEVICE(0x540F, 4),
266 CH_DEVICE(0x5410, 4),
267 CH_DEVICE(0x5411, 4),
268 CH_DEVICE(0x5412, 4),
269 CH_DEVICE(0x5413, 4),
b8ff05a9
DM
270 { 0, }
271};
272
273#define FW_FNAME "cxgb4/t4fw.bin"
0a57a536 274#define FW5_FNAME "cxgb4/t5fw.bin"
636f9d37 275#define FW_CFNAME "cxgb4/t4-config.txt"
0a57a536 276#define FW5_CFNAME "cxgb4/t5-config.txt"
b8ff05a9
DM
277
278MODULE_DESCRIPTION(DRV_DESC);
279MODULE_AUTHOR("Chelsio Communications");
280MODULE_LICENSE("Dual BSD/GPL");
281MODULE_VERSION(DRV_VERSION);
282MODULE_DEVICE_TABLE(pci, cxgb4_pci_tbl);
283MODULE_FIRMWARE(FW_FNAME);
0a57a536 284MODULE_FIRMWARE(FW5_FNAME);
b8ff05a9 285
636f9d37
VP
286/*
287 * Normally we're willing to become the firmware's Master PF but will be happy
288 * if another PF has already become the Master and initialized the adapter.
289 * Setting "force_init" will cause this driver to forcibly establish itself as
290 * the Master PF and initialize the adapter.
291 */
292static uint force_init;
293
294module_param(force_init, uint, 0644);
295MODULE_PARM_DESC(force_init, "Forcibly become Master PF and initialize adapter");
296
13ee15d3
VP
297/*
298 * Normally if the firmware we connect to has Configuration File support, we
299 * use that and only fall back to the old Driver-based initialization if the
300 * Configuration File fails for some reason. If force_old_init is set, then
301 * we'll always use the old Driver-based initialization sequence.
302 */
303static uint force_old_init;
304
305module_param(force_old_init, uint, 0644);
306MODULE_PARM_DESC(force_old_init, "Force old initialization sequence");
307
b8ff05a9
DM
308static int dflt_msg_enable = DFLT_MSG_ENABLE;
309
310module_param(dflt_msg_enable, int, 0644);
311MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T4 default message enable bitmap");
312
313/*
314 * The driver uses the best interrupt scheme available on a platform in the
315 * order MSI-X, MSI, legacy INTx interrupts. This parameter determines which
316 * of these schemes the driver may consider as follows:
317 *
318 * msi = 2: choose from among all three options
319 * msi = 1: only consider MSI and INTx interrupts
320 * msi = 0: force INTx interrupts
321 */
322static int msi = 2;
323
324module_param(msi, int, 0644);
325MODULE_PARM_DESC(msi, "whether to use INTx (0), MSI (1) or MSI-X (2)");
326
327/*
328 * Queue interrupt hold-off timer values. Queues default to the first of these
329 * upon creation.
330 */
331static unsigned int intr_holdoff[SGE_NTIMERS - 1] = { 5, 10, 20, 50, 100 };
332
333module_param_array(intr_holdoff, uint, NULL, 0644);
334MODULE_PARM_DESC(intr_holdoff, "values for queue interrupt hold-off timers "
335 "0..4 in microseconds");
336
337static unsigned int intr_cnt[SGE_NCOUNTERS - 1] = { 4, 8, 16 };
338
339module_param_array(intr_cnt, uint, NULL, 0644);
340MODULE_PARM_DESC(intr_cnt,
341 "thresholds 1..3 for queue interrupt packet counters");
342
636f9d37
VP
343/*
344 * Normally we tell the chip to deliver Ingress Packets into our DMA buffers
345 * offset by 2 bytes in order to have the IP headers line up on 4-byte
346 * boundaries. This is a requirement for many architectures which will throw
347 * a machine check fault if an attempt is made to access one of the 4-byte IP
348 * header fields on a non-4-byte boundary. And it's a major performance issue
349 * even on some architectures which allow it like some implementations of the
350 * x86 ISA. However, some architectures don't mind this and for some very
351 * edge-case performance sensitive applications (like forwarding large volumes
352 * of small packets), setting this DMA offset to 0 will decrease the number of
353 * PCI-E Bus transfers enough to measurably affect performance.
354 */
355static int rx_dma_offset = 2;
356
eb939922 357static bool vf_acls;
b8ff05a9
DM
358
359#ifdef CONFIG_PCI_IOV
360module_param(vf_acls, bool, 0644);
361MODULE_PARM_DESC(vf_acls, "if set enable virtualization L2 ACL enforcement");
362
7d6727cf
SR
363/* Configure the number of PCI-E Virtual Function which are to be instantiated
364 * on SR-IOV Capable Physical Functions.
0a57a536 365 */
7d6727cf 366static unsigned int num_vf[NUM_OF_PF_WITH_SRIOV];
b8ff05a9
DM
367
368module_param_array(num_vf, uint, NULL, 0644);
7d6727cf 369MODULE_PARM_DESC(num_vf, "number of VFs for each of PFs 0-3");
b8ff05a9
DM
370#endif
371
13ee15d3
VP
372/*
373 * The filter TCAM has a fixed portion and a variable portion. The fixed
374 * portion can match on source/destination IP IPv4/IPv6 addresses and TCP/UDP
375 * ports. The variable portion is 36 bits which can include things like Exact
376 * Match MAC Index (9 bits), Ether Type (16 bits), IP Protocol (8 bits),
377 * [Inner] VLAN Tag (17 bits), etc. which, if all were somehow selected, would
378 * far exceed the 36-bit budget for this "compressed" header portion of the
379 * filter. Thus, we have a scarce resource which must be carefully managed.
380 *
381 * By default we set this up to mostly match the set of filter matching
382 * capabilities of T3 but with accommodations for some of T4's more
383 * interesting features:
384 *
385 * { IP Fragment (1), MPS Match Type (3), IP Protocol (8),
386 * [Inner] VLAN (17), Port (3), FCoE (1) }
387 */
388enum {
389 TP_VLAN_PRI_MAP_DEFAULT = HW_TPL_FR_MT_PR_IV_P_FC,
390 TP_VLAN_PRI_MAP_FIRST = FCOE_SHIFT,
391 TP_VLAN_PRI_MAP_LAST = FRAGMENTATION_SHIFT,
392};
393
394static unsigned int tp_vlan_pri_map = TP_VLAN_PRI_MAP_DEFAULT;
395
f2b7e78d
VP
396module_param(tp_vlan_pri_map, uint, 0644);
397MODULE_PARM_DESC(tp_vlan_pri_map, "global compressed filter configuration");
398
b8ff05a9
DM
399static struct dentry *cxgb4_debugfs_root;
400
401static LIST_HEAD(adapter_list);
402static DEFINE_MUTEX(uld_mutex);
403static struct cxgb4_uld_info ulds[CXGB4_ULD_MAX];
404static const char *uld_str[] = { "RDMA", "iSCSI" };
405
406static void link_report(struct net_device *dev)
407{
408 if (!netif_carrier_ok(dev))
409 netdev_info(dev, "link down\n");
410 else {
411 static const char *fc[] = { "no", "Rx", "Tx", "Tx/Rx" };
412
413 const char *s = "10Mbps";
414 const struct port_info *p = netdev_priv(dev);
415
416 switch (p->link_cfg.speed) {
417 case SPEED_10000:
418 s = "10Gbps";
419 break;
420 case SPEED_1000:
421 s = "1000Mbps";
422 break;
423 case SPEED_100:
424 s = "100Mbps";
425 break;
426 }
427
428 netdev_info(dev, "link up, %s, full-duplex, %s PAUSE\n", s,
429 fc[p->link_cfg.fc]);
430 }
431}
432
433void t4_os_link_changed(struct adapter *adapter, int port_id, int link_stat)
434{
435 struct net_device *dev = adapter->port[port_id];
436
437 /* Skip changes from disabled ports. */
438 if (netif_running(dev) && link_stat != netif_carrier_ok(dev)) {
439 if (link_stat)
440 netif_carrier_on(dev);
441 else
442 netif_carrier_off(dev);
443
444 link_report(dev);
445 }
446}
447
448void t4_os_portmod_changed(const struct adapter *adap, int port_id)
449{
450 static const char *mod_str[] = {
a0881cab 451 NULL, "LR", "SR", "ER", "passive DA", "active DA", "LRM"
b8ff05a9
DM
452 };
453
454 const struct net_device *dev = adap->port[port_id];
455 const struct port_info *pi = netdev_priv(dev);
456
457 if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
458 netdev_info(dev, "port module unplugged\n");
a0881cab 459 else if (pi->mod_type < ARRAY_SIZE(mod_str))
b8ff05a9
DM
460 netdev_info(dev, "%s module inserted\n", mod_str[pi->mod_type]);
461}
462
463/*
464 * Configure the exact and hash address filters to handle a port's multicast
465 * and secondary unicast MAC addresses.
466 */
467static int set_addr_filters(const struct net_device *dev, bool sleep)
468{
469 u64 mhash = 0;
470 u64 uhash = 0;
471 bool free = true;
472 u16 filt_idx[7];
473 const u8 *addr[7];
474 int ret, naddr = 0;
b8ff05a9
DM
475 const struct netdev_hw_addr *ha;
476 int uc_cnt = netdev_uc_count(dev);
4a35ecf8 477 int mc_cnt = netdev_mc_count(dev);
b8ff05a9 478 const struct port_info *pi = netdev_priv(dev);
060e0c75 479 unsigned int mb = pi->adapter->fn;
b8ff05a9
DM
480
481 /* first do the secondary unicast addresses */
482 netdev_for_each_uc_addr(ha, dev) {
483 addr[naddr++] = ha->addr;
484 if (--uc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) {
060e0c75 485 ret = t4_alloc_mac_filt(pi->adapter, mb, pi->viid, free,
b8ff05a9
DM
486 naddr, addr, filt_idx, &uhash, sleep);
487 if (ret < 0)
488 return ret;
489
490 free = false;
491 naddr = 0;
492 }
493 }
494
495 /* next set up the multicast addresses */
4a35ecf8
DM
496 netdev_for_each_mc_addr(ha, dev) {
497 addr[naddr++] = ha->addr;
498 if (--mc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) {
060e0c75 499 ret = t4_alloc_mac_filt(pi->adapter, mb, pi->viid, free,
b8ff05a9
DM
500 naddr, addr, filt_idx, &mhash, sleep);
501 if (ret < 0)
502 return ret;
503
504 free = false;
505 naddr = 0;
506 }
507 }
508
060e0c75 509 return t4_set_addr_hash(pi->adapter, mb, pi->viid, uhash != 0,
b8ff05a9
DM
510 uhash | mhash, sleep);
511}
512
3069ee9b
VP
513int dbfifo_int_thresh = 10; /* 10 == 640 entry threshold */
514module_param(dbfifo_int_thresh, int, 0644);
515MODULE_PARM_DESC(dbfifo_int_thresh, "doorbell fifo interrupt threshold");
516
404d9e3f
VP
517/*
518 * usecs to sleep while draining the dbfifo
519 */
520static int dbfifo_drain_delay = 1000;
3069ee9b
VP
521module_param(dbfifo_drain_delay, int, 0644);
522MODULE_PARM_DESC(dbfifo_drain_delay,
523 "usecs to sleep while draining the dbfifo");
524
b8ff05a9
DM
525/*
526 * Set Rx properties of a port, such as promiscruity, address filters, and MTU.
527 * If @mtu is -1 it is left unchanged.
528 */
529static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok)
530{
531 int ret;
532 struct port_info *pi = netdev_priv(dev);
533
534 ret = set_addr_filters(dev, sleep_ok);
535 if (ret == 0)
060e0c75 536 ret = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, mtu,
b8ff05a9 537 (dev->flags & IFF_PROMISC) ? 1 : 0,
f8f5aafa 538 (dev->flags & IFF_ALLMULTI) ? 1 : 0, 1, -1,
b8ff05a9
DM
539 sleep_ok);
540 return ret;
541}
542
3069ee9b
VP
543static struct workqueue_struct *workq;
544
b8ff05a9
DM
545/**
546 * link_start - enable a port
547 * @dev: the port to enable
548 *
549 * Performs the MAC and PHY actions needed to enable a port.
550 */
551static int link_start(struct net_device *dev)
552{
553 int ret;
554 struct port_info *pi = netdev_priv(dev);
060e0c75 555 unsigned int mb = pi->adapter->fn;
b8ff05a9
DM
556
557 /*
558 * We do not set address filters and promiscuity here, the stack does
559 * that step explicitly.
560 */
060e0c75 561 ret = t4_set_rxmode(pi->adapter, mb, pi->viid, dev->mtu, -1, -1, -1,
f646968f 562 !!(dev->features & NETIF_F_HW_VLAN_CTAG_RX), true);
b8ff05a9 563 if (ret == 0) {
060e0c75 564 ret = t4_change_mac(pi->adapter, mb, pi->viid,
b8ff05a9 565 pi->xact_addr_filt, dev->dev_addr, true,
b6bd29e7 566 true);
b8ff05a9
DM
567 if (ret >= 0) {
568 pi->xact_addr_filt = ret;
569 ret = 0;
570 }
571 }
572 if (ret == 0)
060e0c75
DM
573 ret = t4_link_start(pi->adapter, mb, pi->tx_chan,
574 &pi->link_cfg);
b8ff05a9 575 if (ret == 0)
060e0c75 576 ret = t4_enable_vi(pi->adapter, mb, pi->viid, true, true);
b8ff05a9
DM
577 return ret;
578}
579
f2b7e78d
VP
580/* Clear a filter and release any of its resources that we own. This also
581 * clears the filter's "pending" status.
582 */
583static void clear_filter(struct adapter *adap, struct filter_entry *f)
584{
585 /* If the new or old filter have loopback rewriteing rules then we'll
586 * need to free any existing Layer Two Table (L2T) entries of the old
587 * filter rule. The firmware will handle freeing up any Source MAC
588 * Table (SMT) entries used for rewriting Source MAC Addresses in
589 * loopback rules.
590 */
591 if (f->l2t)
592 cxgb4_l2t_release(f->l2t);
593
594 /* The zeroing of the filter rule below clears the filter valid,
595 * pending, locked flags, l2t pointer, etc. so it's all we need for
596 * this operation.
597 */
598 memset(f, 0, sizeof(*f));
599}
600
601/* Handle a filter write/deletion reply.
602 */
603static void filter_rpl(struct adapter *adap, const struct cpl_set_tcb_rpl *rpl)
604{
605 unsigned int idx = GET_TID(rpl);
606 unsigned int nidx = idx - adap->tids.ftid_base;
607 unsigned int ret;
608 struct filter_entry *f;
609
610 if (idx >= adap->tids.ftid_base && nidx <
611 (adap->tids.nftids + adap->tids.nsftids)) {
612 idx = nidx;
613 ret = GET_TCB_COOKIE(rpl->cookie);
614 f = &adap->tids.ftid_tab[idx];
615
616 if (ret == FW_FILTER_WR_FLT_DELETED) {
617 /* Clear the filter when we get confirmation from the
618 * hardware that the filter has been deleted.
619 */
620 clear_filter(adap, f);
621 } else if (ret == FW_FILTER_WR_SMT_TBL_FULL) {
622 dev_err(adap->pdev_dev, "filter %u setup failed due to full SMT\n",
623 idx);
624 clear_filter(adap, f);
625 } else if (ret == FW_FILTER_WR_FLT_ADDED) {
626 f->smtidx = (be64_to_cpu(rpl->oldval) >> 24) & 0xff;
627 f->pending = 0; /* asynchronous setup completed */
628 f->valid = 1;
629 } else {
630 /* Something went wrong. Issue a warning about the
631 * problem and clear everything out.
632 */
633 dev_err(adap->pdev_dev, "filter %u setup failed with error %u\n",
634 idx, ret);
635 clear_filter(adap, f);
636 }
637 }
638}
639
640/* Response queue handler for the FW event queue.
b8ff05a9
DM
641 */
642static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,
643 const struct pkt_gl *gl)
644{
645 u8 opcode = ((const struct rss_header *)rsp)->opcode;
646
647 rsp++; /* skip RSS header */
b407a4a9
VP
648
649 /* FW can send EGR_UPDATEs encapsulated in a CPL_FW4_MSG.
650 */
651 if (unlikely(opcode == CPL_FW4_MSG &&
652 ((const struct cpl_fw4_msg *)rsp)->type == FW_TYPE_RSSCPL)) {
653 rsp++;
654 opcode = ((const struct rss_header *)rsp)->opcode;
655 rsp++;
656 if (opcode != CPL_SGE_EGR_UPDATE) {
657 dev_err(q->adap->pdev_dev, "unexpected FW4/CPL %#x on FW event queue\n"
658 , opcode);
659 goto out;
660 }
661 }
662
b8ff05a9
DM
663 if (likely(opcode == CPL_SGE_EGR_UPDATE)) {
664 const struct cpl_sge_egr_update *p = (void *)rsp;
665 unsigned int qid = EGR_QID(ntohl(p->opcode_qid));
e46dab4d 666 struct sge_txq *txq;
b8ff05a9 667
e46dab4d 668 txq = q->adap->sge.egr_map[qid - q->adap->sge.egr_start];
b8ff05a9 669 txq->restarts++;
e46dab4d 670 if ((u8 *)txq < (u8 *)q->adap->sge.ofldtxq) {
b8ff05a9
DM
671 struct sge_eth_txq *eq;
672
673 eq = container_of(txq, struct sge_eth_txq, q);
674 netif_tx_wake_queue(eq->txq);
675 } else {
676 struct sge_ofld_txq *oq;
677
678 oq = container_of(txq, struct sge_ofld_txq, q);
679 tasklet_schedule(&oq->qresume_tsk);
680 }
681 } else if (opcode == CPL_FW6_MSG || opcode == CPL_FW4_MSG) {
682 const struct cpl_fw6_msg *p = (void *)rsp;
683
684 if (p->type == 0)
685 t4_handle_fw_rpl(q->adap, p->data);
686 } else if (opcode == CPL_L2T_WRITE_RPL) {
687 const struct cpl_l2t_write_rpl *p = (void *)rsp;
688
689 do_l2t_write_rpl(q->adap, p);
f2b7e78d
VP
690 } else if (opcode == CPL_SET_TCB_RPL) {
691 const struct cpl_set_tcb_rpl *p = (void *)rsp;
692
693 filter_rpl(q->adap, p);
b8ff05a9
DM
694 } else
695 dev_err(q->adap->pdev_dev,
696 "unexpected CPL %#x on FW event queue\n", opcode);
b407a4a9 697out:
b8ff05a9
DM
698 return 0;
699}
700
701/**
702 * uldrx_handler - response queue handler for ULD queues
703 * @q: the response queue that received the packet
704 * @rsp: the response queue descriptor holding the offload message
705 * @gl: the gather list of packet fragments
706 *
707 * Deliver an ingress offload packet to a ULD. All processing is done by
708 * the ULD, we just maintain statistics.
709 */
710static int uldrx_handler(struct sge_rspq *q, const __be64 *rsp,
711 const struct pkt_gl *gl)
712{
713 struct sge_ofld_rxq *rxq = container_of(q, struct sge_ofld_rxq, rspq);
714
b407a4a9
VP
715 /* FW can send CPLs encapsulated in a CPL_FW4_MSG.
716 */
717 if (((const struct rss_header *)rsp)->opcode == CPL_FW4_MSG &&
718 ((const struct cpl_fw4_msg *)(rsp + 1))->type == FW_TYPE_RSSCPL)
719 rsp += 2;
720
b8ff05a9
DM
721 if (ulds[q->uld].rx_handler(q->adap->uld_handle[q->uld], rsp, gl)) {
722 rxq->stats.nomem++;
723 return -1;
724 }
725 if (gl == NULL)
726 rxq->stats.imm++;
727 else if (gl == CXGB4_MSG_AN)
728 rxq->stats.an++;
729 else
730 rxq->stats.pkts++;
731 return 0;
732}
733
734static void disable_msi(struct adapter *adapter)
735{
736 if (adapter->flags & USING_MSIX) {
737 pci_disable_msix(adapter->pdev);
738 adapter->flags &= ~USING_MSIX;
739 } else if (adapter->flags & USING_MSI) {
740 pci_disable_msi(adapter->pdev);
741 adapter->flags &= ~USING_MSI;
742 }
743}
744
745/*
746 * Interrupt handler for non-data events used with MSI-X.
747 */
748static irqreturn_t t4_nondata_intr(int irq, void *cookie)
749{
750 struct adapter *adap = cookie;
751
752 u32 v = t4_read_reg(adap, MYPF_REG(PL_PF_INT_CAUSE));
753 if (v & PFSW) {
754 adap->swintr = 1;
755 t4_write_reg(adap, MYPF_REG(PL_PF_INT_CAUSE), v);
756 }
757 t4_slow_intr_handler(adap);
758 return IRQ_HANDLED;
759}
760
761/*
762 * Name the MSI-X interrupts.
763 */
764static void name_msix_vecs(struct adapter *adap)
765{
ba27816c 766 int i, j, msi_idx = 2, n = sizeof(adap->msix_info[0].desc);
b8ff05a9
DM
767
768 /* non-data interrupts */
b1a3c2b6 769 snprintf(adap->msix_info[0].desc, n, "%s", adap->port[0]->name);
b8ff05a9
DM
770
771 /* FW events */
b1a3c2b6
DM
772 snprintf(adap->msix_info[1].desc, n, "%s-FWeventq",
773 adap->port[0]->name);
b8ff05a9
DM
774
775 /* Ethernet queues */
776 for_each_port(adap, j) {
777 struct net_device *d = adap->port[j];
778 const struct port_info *pi = netdev_priv(d);
779
ba27816c 780 for (i = 0; i < pi->nqsets; i++, msi_idx++)
b8ff05a9
DM
781 snprintf(adap->msix_info[msi_idx].desc, n, "%s-Rx%d",
782 d->name, i);
b8ff05a9
DM
783 }
784
785 /* offload queues */
ba27816c
DM
786 for_each_ofldrxq(&adap->sge, i)
787 snprintf(adap->msix_info[msi_idx++].desc, n, "%s-ofld%d",
b1a3c2b6 788 adap->port[0]->name, i);
ba27816c
DM
789
790 for_each_rdmarxq(&adap->sge, i)
791 snprintf(adap->msix_info[msi_idx++].desc, n, "%s-rdma%d",
b1a3c2b6 792 adap->port[0]->name, i);
b8ff05a9
DM
793}
794
795static int request_msix_queue_irqs(struct adapter *adap)
796{
797 struct sge *s = &adap->sge;
404d9e3f 798 int err, ethqidx, ofldqidx = 0, rdmaqidx = 0, msi_index = 2;
b8ff05a9
DM
799
800 err = request_irq(adap->msix_info[1].vec, t4_sge_intr_msix, 0,
801 adap->msix_info[1].desc, &s->fw_evtq);
802 if (err)
803 return err;
804
805 for_each_ethrxq(s, ethqidx) {
404d9e3f
VP
806 err = request_irq(adap->msix_info[msi_index].vec,
807 t4_sge_intr_msix, 0,
808 adap->msix_info[msi_index].desc,
b8ff05a9
DM
809 &s->ethrxq[ethqidx].rspq);
810 if (err)
811 goto unwind;
404d9e3f 812 msi_index++;
b8ff05a9
DM
813 }
814 for_each_ofldrxq(s, ofldqidx) {
404d9e3f
VP
815 err = request_irq(adap->msix_info[msi_index].vec,
816 t4_sge_intr_msix, 0,
817 adap->msix_info[msi_index].desc,
b8ff05a9
DM
818 &s->ofldrxq[ofldqidx].rspq);
819 if (err)
820 goto unwind;
404d9e3f 821 msi_index++;
b8ff05a9
DM
822 }
823 for_each_rdmarxq(s, rdmaqidx) {
404d9e3f
VP
824 err = request_irq(adap->msix_info[msi_index].vec,
825 t4_sge_intr_msix, 0,
826 adap->msix_info[msi_index].desc,
b8ff05a9
DM
827 &s->rdmarxq[rdmaqidx].rspq);
828 if (err)
829 goto unwind;
404d9e3f 830 msi_index++;
b8ff05a9
DM
831 }
832 return 0;
833
834unwind:
835 while (--rdmaqidx >= 0)
404d9e3f 836 free_irq(adap->msix_info[--msi_index].vec,
b8ff05a9
DM
837 &s->rdmarxq[rdmaqidx].rspq);
838 while (--ofldqidx >= 0)
404d9e3f 839 free_irq(adap->msix_info[--msi_index].vec,
b8ff05a9
DM
840 &s->ofldrxq[ofldqidx].rspq);
841 while (--ethqidx >= 0)
404d9e3f
VP
842 free_irq(adap->msix_info[--msi_index].vec,
843 &s->ethrxq[ethqidx].rspq);
b8ff05a9
DM
844 free_irq(adap->msix_info[1].vec, &s->fw_evtq);
845 return err;
846}
847
848static void free_msix_queue_irqs(struct adapter *adap)
849{
404d9e3f 850 int i, msi_index = 2;
b8ff05a9
DM
851 struct sge *s = &adap->sge;
852
853 free_irq(adap->msix_info[1].vec, &s->fw_evtq);
854 for_each_ethrxq(s, i)
404d9e3f 855 free_irq(adap->msix_info[msi_index++].vec, &s->ethrxq[i].rspq);
b8ff05a9 856 for_each_ofldrxq(s, i)
404d9e3f 857 free_irq(adap->msix_info[msi_index++].vec, &s->ofldrxq[i].rspq);
b8ff05a9 858 for_each_rdmarxq(s, i)
404d9e3f 859 free_irq(adap->msix_info[msi_index++].vec, &s->rdmarxq[i].rspq);
b8ff05a9
DM
860}
861
671b0060
DM
862/**
863 * write_rss - write the RSS table for a given port
864 * @pi: the port
865 * @queues: array of queue indices for RSS
866 *
867 * Sets up the portion of the HW RSS table for the port's VI to distribute
868 * packets to the Rx queues in @queues.
869 */
870static int write_rss(const struct port_info *pi, const u16 *queues)
871{
872 u16 *rss;
873 int i, err;
874 const struct sge_eth_rxq *q = &pi->adapter->sge.ethrxq[pi->first_qset];
875
876 rss = kmalloc(pi->rss_size * sizeof(u16), GFP_KERNEL);
877 if (!rss)
878 return -ENOMEM;
879
880 /* map the queue indices to queue ids */
881 for (i = 0; i < pi->rss_size; i++, queues++)
882 rss[i] = q[*queues].rspq.abs_id;
883
060e0c75
DM
884 err = t4_config_rss_range(pi->adapter, pi->adapter->fn, pi->viid, 0,
885 pi->rss_size, rss, pi->rss_size);
671b0060
DM
886 kfree(rss);
887 return err;
888}
889
b8ff05a9
DM
890/**
891 * setup_rss - configure RSS
892 * @adap: the adapter
893 *
671b0060 894 * Sets up RSS for each port.
b8ff05a9
DM
895 */
896static int setup_rss(struct adapter *adap)
897{
671b0060 898 int i, err;
b8ff05a9
DM
899
900 for_each_port(adap, i) {
901 const struct port_info *pi = adap2pinfo(adap, i);
b8ff05a9 902
671b0060 903 err = write_rss(pi, pi->rss);
b8ff05a9
DM
904 if (err)
905 return err;
906 }
907 return 0;
908}
909
e46dab4d
DM
910/*
911 * Return the channel of the ingress queue with the given qid.
912 */
913static unsigned int rxq_to_chan(const struct sge *p, unsigned int qid)
914{
915 qid -= p->ingr_start;
916 return netdev2pinfo(p->ingr_map[qid]->netdev)->tx_chan;
917}
918
b8ff05a9
DM
919/*
920 * Wait until all NAPI handlers are descheduled.
921 */
922static void quiesce_rx(struct adapter *adap)
923{
924 int i;
925
926 for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) {
927 struct sge_rspq *q = adap->sge.ingr_map[i];
928
929 if (q && q->handler)
930 napi_disable(&q->napi);
931 }
932}
933
934/*
935 * Enable NAPI scheduling and interrupt generation for all Rx queues.
936 */
937static void enable_rx(struct adapter *adap)
938{
939 int i;
940
941 for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) {
942 struct sge_rspq *q = adap->sge.ingr_map[i];
943
944 if (!q)
945 continue;
946 if (q->handler)
947 napi_enable(&q->napi);
948 /* 0-increment GTS to start the timer and enable interrupts */
949 t4_write_reg(adap, MYPF_REG(SGE_PF_GTS),
950 SEINTARM(q->intr_params) |
951 INGRESSQID(q->cntxt_id));
952 }
953}
954
955/**
956 * setup_sge_queues - configure SGE Tx/Rx/response queues
957 * @adap: the adapter
958 *
959 * Determines how many sets of SGE queues to use and initializes them.
960 * We support multiple queue sets per port if we have MSI-X, otherwise
961 * just one queue set per port.
962 */
963static int setup_sge_queues(struct adapter *adap)
964{
965 int err, msi_idx, i, j;
966 struct sge *s = &adap->sge;
967
968 bitmap_zero(s->starving_fl, MAX_EGRQ);
969 bitmap_zero(s->txq_maperr, MAX_EGRQ);
970
971 if (adap->flags & USING_MSIX)
972 msi_idx = 1; /* vector 0 is for non-queue interrupts */
973 else {
974 err = t4_sge_alloc_rxq(adap, &s->intrq, false, adap->port[0], 0,
975 NULL, NULL);
976 if (err)
977 return err;
978 msi_idx = -((int)s->intrq.abs_id + 1);
979 }
980
981 err = t4_sge_alloc_rxq(adap, &s->fw_evtq, true, adap->port[0],
982 msi_idx, NULL, fwevtq_handler);
983 if (err) {
984freeout: t4_free_sge_resources(adap);
985 return err;
986 }
987
988 for_each_port(adap, i) {
989 struct net_device *dev = adap->port[i];
990 struct port_info *pi = netdev_priv(dev);
991 struct sge_eth_rxq *q = &s->ethrxq[pi->first_qset];
992 struct sge_eth_txq *t = &s->ethtxq[pi->first_qset];
993
994 for (j = 0; j < pi->nqsets; j++, q++) {
995 if (msi_idx > 0)
996 msi_idx++;
997 err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev,
998 msi_idx, &q->fl,
999 t4_ethrx_handler);
1000 if (err)
1001 goto freeout;
1002 q->rspq.idx = j;
1003 memset(&q->stats, 0, sizeof(q->stats));
1004 }
1005 for (j = 0; j < pi->nqsets; j++, t++) {
1006 err = t4_sge_alloc_eth_txq(adap, t, dev,
1007 netdev_get_tx_queue(dev, j),
1008 s->fw_evtq.cntxt_id);
1009 if (err)
1010 goto freeout;
1011 }
1012 }
1013
1014 j = s->ofldqsets / adap->params.nports; /* ofld queues per channel */
1015 for_each_ofldrxq(s, i) {
1016 struct sge_ofld_rxq *q = &s->ofldrxq[i];
1017 struct net_device *dev = adap->port[i / j];
1018
1019 if (msi_idx > 0)
1020 msi_idx++;
1021 err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev, msi_idx,
1022 &q->fl, uldrx_handler);
1023 if (err)
1024 goto freeout;
1025 memset(&q->stats, 0, sizeof(q->stats));
1026 s->ofld_rxq[i] = q->rspq.abs_id;
1027 err = t4_sge_alloc_ofld_txq(adap, &s->ofldtxq[i], dev,
1028 s->fw_evtq.cntxt_id);
1029 if (err)
1030 goto freeout;
1031 }
1032
1033 for_each_rdmarxq(s, i) {
1034 struct sge_ofld_rxq *q = &s->rdmarxq[i];
1035
1036 if (msi_idx > 0)
1037 msi_idx++;
1038 err = t4_sge_alloc_rxq(adap, &q->rspq, false, adap->port[i],
1039 msi_idx, &q->fl, uldrx_handler);
1040 if (err)
1041 goto freeout;
1042 memset(&q->stats, 0, sizeof(q->stats));
1043 s->rdma_rxq[i] = q->rspq.abs_id;
1044 }
1045
1046 for_each_port(adap, i) {
1047 /*
1048 * Note that ->rdmarxq[i].rspq.cntxt_id below is 0 if we don't
1049 * have RDMA queues, and that's the right value.
1050 */
1051 err = t4_sge_alloc_ctrl_txq(adap, &s->ctrlq[i], adap->port[i],
1052 s->fw_evtq.cntxt_id,
1053 s->rdmarxq[i].rspq.cntxt_id);
1054 if (err)
1055 goto freeout;
1056 }
1057
1058 t4_write_reg(adap, MPS_TRC_RSS_CONTROL,
1059 RSSCONTROL(netdev2pinfo(adap->port[0])->tx_chan) |
1060 QUEUENUMBER(s->ethrxq[0].rspq.abs_id));
1061 return 0;
1062}
1063
1064/*
1065 * Returns 0 if new FW was successfully loaded, a positive errno if a load was
1066 * started but failed, and a negative errno if flash load couldn't start.
1067 */
1068static int upgrade_fw(struct adapter *adap)
1069{
1070 int ret;
0a57a536 1071 u32 vers, exp_major;
b8ff05a9
DM
1072 const struct fw_hdr *hdr;
1073 const struct firmware *fw;
1074 struct device *dev = adap->pdev_dev;
0a57a536 1075 char *fw_file_name;
b8ff05a9 1076
0a57a536
SR
1077 switch (CHELSIO_CHIP_VERSION(adap->chip)) {
1078 case CHELSIO_T4:
1079 fw_file_name = FW_FNAME;
1080 exp_major = FW_VERSION_MAJOR;
1081 break;
1082 case CHELSIO_T5:
1083 fw_file_name = FW5_FNAME;
1084 exp_major = FW_VERSION_MAJOR_T5;
1085 break;
1086 default:
1087 dev_err(dev, "Unsupported chip type, %x\n", adap->chip);
1088 return -EINVAL;
1089 }
1090
1091 ret = request_firmware(&fw, fw_file_name, dev);
b8ff05a9 1092 if (ret < 0) {
0a57a536
SR
1093 dev_err(dev, "unable to load firmware image %s, error %d\n",
1094 fw_file_name, ret);
b8ff05a9
DM
1095 return ret;
1096 }
1097
1098 hdr = (const struct fw_hdr *)fw->data;
1099 vers = ntohl(hdr->fw_ver);
0a57a536 1100 if (FW_HDR_FW_VER_MAJOR_GET(vers) != exp_major) {
b8ff05a9
DM
1101 ret = -EINVAL; /* wrong major version, won't do */
1102 goto out;
1103 }
1104
1105 /*
1106 * If the flash FW is unusable or we found something newer, load it.
1107 */
0a57a536 1108 if (FW_HDR_FW_VER_MAJOR_GET(adap->params.fw_vers) != exp_major ||
b8ff05a9 1109 vers > adap->params.fw_vers) {
26f7cbc0
VP
1110 dev_info(dev, "upgrading firmware ...\n");
1111 ret = t4_fw_upgrade(adap, adap->mbox, fw->data, fw->size,
1112 /*force=*/false);
b8ff05a9 1113 if (!ret)
0a57a536
SR
1114 dev_info(dev,
1115 "firmware upgraded to version %pI4 from %s\n",
1116 &hdr->fw_ver, fw_file_name);
26f7cbc0
VP
1117 else
1118 dev_err(dev, "firmware upgrade failed! err=%d\n", -ret);
1648a22b
VP
1119 } else {
1120 /*
1121 * Tell our caller that we didn't upgrade the firmware.
1122 */
1123 ret = -EINVAL;
b8ff05a9 1124 }
1648a22b 1125
b8ff05a9
DM
1126out: release_firmware(fw);
1127 return ret;
1128}
1129
1130/*
1131 * Allocate a chunk of memory using kmalloc or, if that fails, vmalloc.
1132 * The allocated memory is cleared.
1133 */
1134void *t4_alloc_mem(size_t size)
1135{
89bf67f1 1136 void *p = kzalloc(size, GFP_KERNEL);
b8ff05a9
DM
1137
1138 if (!p)
89bf67f1 1139 p = vzalloc(size);
b8ff05a9
DM
1140 return p;
1141}
1142
1143/*
1144 * Free memory allocated through alloc_mem().
1145 */
31b9c19b 1146static void t4_free_mem(void *addr)
b8ff05a9
DM
1147{
1148 if (is_vmalloc_addr(addr))
1149 vfree(addr);
1150 else
1151 kfree(addr);
1152}
1153
f2b7e78d
VP
1154/* Send a Work Request to write the filter at a specified index. We construct
1155 * a Firmware Filter Work Request to have the work done and put the indicated
1156 * filter into "pending" mode which will prevent any further actions against
1157 * it till we get a reply from the firmware on the completion status of the
1158 * request.
1159 */
1160static int set_filter_wr(struct adapter *adapter, int fidx)
1161{
1162 struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
1163 struct sk_buff *skb;
1164 struct fw_filter_wr *fwr;
1165 unsigned int ftid;
1166
1167 /* If the new filter requires loopback Destination MAC and/or VLAN
1168 * rewriting then we need to allocate a Layer 2 Table (L2T) entry for
1169 * the filter.
1170 */
1171 if (f->fs.newdmac || f->fs.newvlan) {
1172 /* allocate L2T entry for new filter */
1173 f->l2t = t4_l2t_alloc_switching(adapter->l2t);
1174 if (f->l2t == NULL)
1175 return -EAGAIN;
1176 if (t4_l2t_set_switching(adapter, f->l2t, f->fs.vlan,
1177 f->fs.eport, f->fs.dmac)) {
1178 cxgb4_l2t_release(f->l2t);
1179 f->l2t = NULL;
1180 return -ENOMEM;
1181 }
1182 }
1183
1184 ftid = adapter->tids.ftid_base + fidx;
1185
1186 skb = alloc_skb(sizeof(*fwr), GFP_KERNEL | __GFP_NOFAIL);
1187 fwr = (struct fw_filter_wr *)__skb_put(skb, sizeof(*fwr));
1188 memset(fwr, 0, sizeof(*fwr));
1189
1190 /* It would be nice to put most of the following in t4_hw.c but most
1191 * of the work is translating the cxgbtool ch_filter_specification
1192 * into the Work Request and the definition of that structure is
1193 * currently in cxgbtool.h which isn't appropriate to pull into the
1194 * common code. We may eventually try to come up with a more neutral
1195 * filter specification structure but for now it's easiest to simply
1196 * put this fairly direct code in line ...
1197 */
1198 fwr->op_pkd = htonl(FW_WR_OP(FW_FILTER_WR));
1199 fwr->len16_pkd = htonl(FW_WR_LEN16(sizeof(*fwr)/16));
1200 fwr->tid_to_iq =
1201 htonl(V_FW_FILTER_WR_TID(ftid) |
1202 V_FW_FILTER_WR_RQTYPE(f->fs.type) |
1203 V_FW_FILTER_WR_NOREPLY(0) |
1204 V_FW_FILTER_WR_IQ(f->fs.iq));
1205 fwr->del_filter_to_l2tix =
1206 htonl(V_FW_FILTER_WR_RPTTID(f->fs.rpttid) |
1207 V_FW_FILTER_WR_DROP(f->fs.action == FILTER_DROP) |
1208 V_FW_FILTER_WR_DIRSTEER(f->fs.dirsteer) |
1209 V_FW_FILTER_WR_MASKHASH(f->fs.maskhash) |
1210 V_FW_FILTER_WR_DIRSTEERHASH(f->fs.dirsteerhash) |
1211 V_FW_FILTER_WR_LPBK(f->fs.action == FILTER_SWITCH) |
1212 V_FW_FILTER_WR_DMAC(f->fs.newdmac) |
1213 V_FW_FILTER_WR_SMAC(f->fs.newsmac) |
1214 V_FW_FILTER_WR_INSVLAN(f->fs.newvlan == VLAN_INSERT ||
1215 f->fs.newvlan == VLAN_REWRITE) |
1216 V_FW_FILTER_WR_RMVLAN(f->fs.newvlan == VLAN_REMOVE ||
1217 f->fs.newvlan == VLAN_REWRITE) |
1218 V_FW_FILTER_WR_HITCNTS(f->fs.hitcnts) |
1219 V_FW_FILTER_WR_TXCHAN(f->fs.eport) |
1220 V_FW_FILTER_WR_PRIO(f->fs.prio) |
1221 V_FW_FILTER_WR_L2TIX(f->l2t ? f->l2t->idx : 0));
1222 fwr->ethtype = htons(f->fs.val.ethtype);
1223 fwr->ethtypem = htons(f->fs.mask.ethtype);
1224 fwr->frag_to_ovlan_vldm =
1225 (V_FW_FILTER_WR_FRAG(f->fs.val.frag) |
1226 V_FW_FILTER_WR_FRAGM(f->fs.mask.frag) |
1227 V_FW_FILTER_WR_IVLAN_VLD(f->fs.val.ivlan_vld) |
1228 V_FW_FILTER_WR_OVLAN_VLD(f->fs.val.ovlan_vld) |
1229 V_FW_FILTER_WR_IVLAN_VLDM(f->fs.mask.ivlan_vld) |
1230 V_FW_FILTER_WR_OVLAN_VLDM(f->fs.mask.ovlan_vld));
1231 fwr->smac_sel = 0;
1232 fwr->rx_chan_rx_rpl_iq =
1233 htons(V_FW_FILTER_WR_RX_CHAN(0) |
1234 V_FW_FILTER_WR_RX_RPL_IQ(adapter->sge.fw_evtq.abs_id));
1235 fwr->maci_to_matchtypem =
1236 htonl(V_FW_FILTER_WR_MACI(f->fs.val.macidx) |
1237 V_FW_FILTER_WR_MACIM(f->fs.mask.macidx) |
1238 V_FW_FILTER_WR_FCOE(f->fs.val.fcoe) |
1239 V_FW_FILTER_WR_FCOEM(f->fs.mask.fcoe) |
1240 V_FW_FILTER_WR_PORT(f->fs.val.iport) |
1241 V_FW_FILTER_WR_PORTM(f->fs.mask.iport) |
1242 V_FW_FILTER_WR_MATCHTYPE(f->fs.val.matchtype) |
1243 V_FW_FILTER_WR_MATCHTYPEM(f->fs.mask.matchtype));
1244 fwr->ptcl = f->fs.val.proto;
1245 fwr->ptclm = f->fs.mask.proto;
1246 fwr->ttyp = f->fs.val.tos;
1247 fwr->ttypm = f->fs.mask.tos;
1248 fwr->ivlan = htons(f->fs.val.ivlan);
1249 fwr->ivlanm = htons(f->fs.mask.ivlan);
1250 fwr->ovlan = htons(f->fs.val.ovlan);
1251 fwr->ovlanm = htons(f->fs.mask.ovlan);
1252 memcpy(fwr->lip, f->fs.val.lip, sizeof(fwr->lip));
1253 memcpy(fwr->lipm, f->fs.mask.lip, sizeof(fwr->lipm));
1254 memcpy(fwr->fip, f->fs.val.fip, sizeof(fwr->fip));
1255 memcpy(fwr->fipm, f->fs.mask.fip, sizeof(fwr->fipm));
1256 fwr->lp = htons(f->fs.val.lport);
1257 fwr->lpm = htons(f->fs.mask.lport);
1258 fwr->fp = htons(f->fs.val.fport);
1259 fwr->fpm = htons(f->fs.mask.fport);
1260 if (f->fs.newsmac)
1261 memcpy(fwr->sma, f->fs.smac, sizeof(fwr->sma));
1262
1263 /* Mark the filter as "pending" and ship off the Filter Work Request.
1264 * When we get the Work Request Reply we'll clear the pending status.
1265 */
1266 f->pending = 1;
1267 set_wr_txq(skb, CPL_PRIORITY_CONTROL, f->fs.val.iport & 0x3);
1268 t4_ofld_send(adapter, skb);
1269 return 0;
1270}
1271
1272/* Delete the filter at a specified index.
1273 */
1274static int del_filter_wr(struct adapter *adapter, int fidx)
1275{
1276 struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
1277 struct sk_buff *skb;
1278 struct fw_filter_wr *fwr;
1279 unsigned int len, ftid;
1280
1281 len = sizeof(*fwr);
1282 ftid = adapter->tids.ftid_base + fidx;
1283
1284 skb = alloc_skb(len, GFP_KERNEL | __GFP_NOFAIL);
1285 fwr = (struct fw_filter_wr *)__skb_put(skb, len);
1286 t4_mk_filtdelwr(ftid, fwr, adapter->sge.fw_evtq.abs_id);
1287
1288 /* Mark the filter as "pending" and ship off the Filter Work Request.
1289 * When we get the Work Request Reply we'll clear the pending status.
1290 */
1291 f->pending = 1;
1292 t4_mgmt_tx(adapter, skb);
1293 return 0;
1294}
1295
b8ff05a9
DM
1296static inline int is_offload(const struct adapter *adap)
1297{
1298 return adap->params.offload;
1299}
1300
1301/*
1302 * Implementation of ethtool operations.
1303 */
1304
1305static u32 get_msglevel(struct net_device *dev)
1306{
1307 return netdev2adap(dev)->msg_enable;
1308}
1309
1310static void set_msglevel(struct net_device *dev, u32 val)
1311{
1312 netdev2adap(dev)->msg_enable = val;
1313}
1314
1315static char stats_strings[][ETH_GSTRING_LEN] = {
1316 "TxOctetsOK ",
1317 "TxFramesOK ",
1318 "TxBroadcastFrames ",
1319 "TxMulticastFrames ",
1320 "TxUnicastFrames ",
1321 "TxErrorFrames ",
1322
1323 "TxFrames64 ",
1324 "TxFrames65To127 ",
1325 "TxFrames128To255 ",
1326 "TxFrames256To511 ",
1327 "TxFrames512To1023 ",
1328 "TxFrames1024To1518 ",
1329 "TxFrames1519ToMax ",
1330
1331 "TxFramesDropped ",
1332 "TxPauseFrames ",
1333 "TxPPP0Frames ",
1334 "TxPPP1Frames ",
1335 "TxPPP2Frames ",
1336 "TxPPP3Frames ",
1337 "TxPPP4Frames ",
1338 "TxPPP5Frames ",
1339 "TxPPP6Frames ",
1340 "TxPPP7Frames ",
1341
1342 "RxOctetsOK ",
1343 "RxFramesOK ",
1344 "RxBroadcastFrames ",
1345 "RxMulticastFrames ",
1346 "RxUnicastFrames ",
1347
1348 "RxFramesTooLong ",
1349 "RxJabberErrors ",
1350 "RxFCSErrors ",
1351 "RxLengthErrors ",
1352 "RxSymbolErrors ",
1353 "RxRuntFrames ",
1354
1355 "RxFrames64 ",
1356 "RxFrames65To127 ",
1357 "RxFrames128To255 ",
1358 "RxFrames256To511 ",
1359 "RxFrames512To1023 ",
1360 "RxFrames1024To1518 ",
1361 "RxFrames1519ToMax ",
1362
1363 "RxPauseFrames ",
1364 "RxPPP0Frames ",
1365 "RxPPP1Frames ",
1366 "RxPPP2Frames ",
1367 "RxPPP3Frames ",
1368 "RxPPP4Frames ",
1369 "RxPPP5Frames ",
1370 "RxPPP6Frames ",
1371 "RxPPP7Frames ",
1372
1373 "RxBG0FramesDropped ",
1374 "RxBG1FramesDropped ",
1375 "RxBG2FramesDropped ",
1376 "RxBG3FramesDropped ",
1377 "RxBG0FramesTrunc ",
1378 "RxBG1FramesTrunc ",
1379 "RxBG2FramesTrunc ",
1380 "RxBG3FramesTrunc ",
1381
1382 "TSO ",
1383 "TxCsumOffload ",
1384 "RxCsumGood ",
1385 "VLANextractions ",
1386 "VLANinsertions ",
4a6346d4
DM
1387 "GROpackets ",
1388 "GROmerged ",
22adfe0a
SR
1389 "WriteCoalSuccess ",
1390 "WriteCoalFail ",
b8ff05a9
DM
1391};
1392
1393static int get_sset_count(struct net_device *dev, int sset)
1394{
1395 switch (sset) {
1396 case ETH_SS_STATS:
1397 return ARRAY_SIZE(stats_strings);
1398 default:
1399 return -EOPNOTSUPP;
1400 }
1401}
1402
1403#define T4_REGMAP_SIZE (160 * 1024)
251f9e88 1404#define T5_REGMAP_SIZE (332 * 1024)
b8ff05a9
DM
1405
1406static int get_regs_len(struct net_device *dev)
1407{
251f9e88
SR
1408 struct adapter *adap = netdev2adap(dev);
1409 if (is_t4(adap->chip))
1410 return T4_REGMAP_SIZE;
1411 else
1412 return T5_REGMAP_SIZE;
b8ff05a9
DM
1413}
1414
1415static int get_eeprom_len(struct net_device *dev)
1416{
1417 return EEPROMSIZE;
1418}
1419
1420static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1421{
1422 struct adapter *adapter = netdev2adap(dev);
1423
23020ab3
RJ
1424 strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
1425 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
1426 strlcpy(info->bus_info, pci_name(adapter->pdev),
1427 sizeof(info->bus_info));
b8ff05a9 1428
84b40501 1429 if (adapter->params.fw_vers)
b8ff05a9
DM
1430 snprintf(info->fw_version, sizeof(info->fw_version),
1431 "%u.%u.%u.%u, TP %u.%u.%u.%u",
1432 FW_HDR_FW_VER_MAJOR_GET(adapter->params.fw_vers),
1433 FW_HDR_FW_VER_MINOR_GET(adapter->params.fw_vers),
1434 FW_HDR_FW_VER_MICRO_GET(adapter->params.fw_vers),
1435 FW_HDR_FW_VER_BUILD_GET(adapter->params.fw_vers),
1436 FW_HDR_FW_VER_MAJOR_GET(adapter->params.tp_vers),
1437 FW_HDR_FW_VER_MINOR_GET(adapter->params.tp_vers),
1438 FW_HDR_FW_VER_MICRO_GET(adapter->params.tp_vers),
1439 FW_HDR_FW_VER_BUILD_GET(adapter->params.tp_vers));
1440}
1441
1442static void get_strings(struct net_device *dev, u32 stringset, u8 *data)
1443{
1444 if (stringset == ETH_SS_STATS)
1445 memcpy(data, stats_strings, sizeof(stats_strings));
1446}
1447
1448/*
1449 * port stats maintained per queue of the port. They should be in the same
1450 * order as in stats_strings above.
1451 */
1452struct queue_port_stats {
1453 u64 tso;
1454 u64 tx_csum;
1455 u64 rx_csum;
1456 u64 vlan_ex;
1457 u64 vlan_ins;
4a6346d4
DM
1458 u64 gro_pkts;
1459 u64 gro_merged;
b8ff05a9
DM
1460};
1461
1462static void collect_sge_port_stats(const struct adapter *adap,
1463 const struct port_info *p, struct queue_port_stats *s)
1464{
1465 int i;
1466 const struct sge_eth_txq *tx = &adap->sge.ethtxq[p->first_qset];
1467 const struct sge_eth_rxq *rx = &adap->sge.ethrxq[p->first_qset];
1468
1469 memset(s, 0, sizeof(*s));
1470 for (i = 0; i < p->nqsets; i++, rx++, tx++) {
1471 s->tso += tx->tso;
1472 s->tx_csum += tx->tx_cso;
1473 s->rx_csum += rx->stats.rx_cso;
1474 s->vlan_ex += rx->stats.vlan_ex;
1475 s->vlan_ins += tx->vlan_ins;
4a6346d4
DM
1476 s->gro_pkts += rx->stats.lro_pkts;
1477 s->gro_merged += rx->stats.lro_merged;
b8ff05a9
DM
1478 }
1479}
1480
1481static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1482 u64 *data)
1483{
1484 struct port_info *pi = netdev_priv(dev);
1485 struct adapter *adapter = pi->adapter;
22adfe0a 1486 u32 val1, val2;
b8ff05a9
DM
1487
1488 t4_get_port_stats(adapter, pi->tx_chan, (struct port_stats *)data);
1489
1490 data += sizeof(struct port_stats) / sizeof(u64);
1491 collect_sge_port_stats(adapter, pi, (struct queue_port_stats *)data);
22adfe0a
SR
1492 data += sizeof(struct queue_port_stats) / sizeof(u64);
1493 if (!is_t4(adapter->chip)) {
1494 t4_write_reg(adapter, SGE_STAT_CFG, STATSOURCE_T5(7));
1495 val1 = t4_read_reg(adapter, SGE_STAT_TOTAL);
1496 val2 = t4_read_reg(adapter, SGE_STAT_MATCH);
1497 *data = val1 - val2;
1498 data++;
1499 *data = val2;
1500 data++;
1501 } else {
1502 memset(data, 0, 2 * sizeof(u64));
1503 *data += 2;
1504 }
b8ff05a9
DM
1505}
1506
1507/*
1508 * Return a version number to identify the type of adapter. The scheme is:
1509 * - bits 0..9: chip version
1510 * - bits 10..15: chip revision
835bb606 1511 * - bits 16..23: register dump version
b8ff05a9
DM
1512 */
1513static inline unsigned int mk_adap_vers(const struct adapter *ap)
1514{
0a57a536
SR
1515 return CHELSIO_CHIP_VERSION(ap->chip) |
1516 (CHELSIO_CHIP_RELEASE(ap->chip) << 10) | (1 << 16);
b8ff05a9
DM
1517}
1518
1519static void reg_block_dump(struct adapter *ap, void *buf, unsigned int start,
1520 unsigned int end)
1521{
1522 u32 *p = buf + start;
1523
1524 for ( ; start <= end; start += sizeof(u32))
1525 *p++ = t4_read_reg(ap, start);
1526}
1527
1528static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
1529 void *buf)
1530{
251f9e88 1531 static const unsigned int t4_reg_ranges[] = {
b8ff05a9
DM
1532 0x1008, 0x1108,
1533 0x1180, 0x11b4,
1534 0x11fc, 0x123c,
1535 0x1300, 0x173c,
1536 0x1800, 0x18fc,
1537 0x3000, 0x30d8,
1538 0x30e0, 0x5924,
1539 0x5960, 0x59d4,
1540 0x5a00, 0x5af8,
1541 0x6000, 0x6098,
1542 0x6100, 0x6150,
1543 0x6200, 0x6208,
1544 0x6240, 0x6248,
1545 0x6280, 0x6338,
1546 0x6370, 0x638c,
1547 0x6400, 0x643c,
1548 0x6500, 0x6524,
1549 0x6a00, 0x6a38,
1550 0x6a60, 0x6a78,
1551 0x6b00, 0x6b84,
1552 0x6bf0, 0x6c84,
1553 0x6cf0, 0x6d84,
1554 0x6df0, 0x6e84,
1555 0x6ef0, 0x6f84,
1556 0x6ff0, 0x7084,
1557 0x70f0, 0x7184,
1558 0x71f0, 0x7284,
1559 0x72f0, 0x7384,
1560 0x73f0, 0x7450,
1561 0x7500, 0x7530,
1562 0x7600, 0x761c,
1563 0x7680, 0x76cc,
1564 0x7700, 0x7798,
1565 0x77c0, 0x77fc,
1566 0x7900, 0x79fc,
1567 0x7b00, 0x7c38,
1568 0x7d00, 0x7efc,
1569 0x8dc0, 0x8e1c,
1570 0x8e30, 0x8e78,
1571 0x8ea0, 0x8f6c,
1572 0x8fc0, 0x9074,
1573 0x90fc, 0x90fc,
1574 0x9400, 0x9458,
1575 0x9600, 0x96bc,
1576 0x9800, 0x9808,
1577 0x9820, 0x983c,
1578 0x9850, 0x9864,
1579 0x9c00, 0x9c6c,
1580 0x9c80, 0x9cec,
1581 0x9d00, 0x9d6c,
1582 0x9d80, 0x9dec,
1583 0x9e00, 0x9e6c,
1584 0x9e80, 0x9eec,
1585 0x9f00, 0x9f6c,
1586 0x9f80, 0x9fec,
1587 0xd004, 0xd03c,
1588 0xdfc0, 0xdfe0,
1589 0xe000, 0xea7c,
1590 0xf000, 0x11190,
835bb606
DM
1591 0x19040, 0x1906c,
1592 0x19078, 0x19080,
1593 0x1908c, 0x19124,
b8ff05a9
DM
1594 0x19150, 0x191b0,
1595 0x191d0, 0x191e8,
1596 0x19238, 0x1924c,
1597 0x193f8, 0x19474,
1598 0x19490, 0x194f8,
1599 0x19800, 0x19f30,
1600 0x1a000, 0x1a06c,
1601 0x1a0b0, 0x1a120,
1602 0x1a128, 0x1a138,
1603 0x1a190, 0x1a1c4,
1604 0x1a1fc, 0x1a1fc,
1605 0x1e040, 0x1e04c,
835bb606 1606 0x1e284, 0x1e28c,
b8ff05a9
DM
1607 0x1e2c0, 0x1e2c0,
1608 0x1e2e0, 0x1e2e0,
1609 0x1e300, 0x1e384,
1610 0x1e3c0, 0x1e3c8,
1611 0x1e440, 0x1e44c,
835bb606 1612 0x1e684, 0x1e68c,
b8ff05a9
DM
1613 0x1e6c0, 0x1e6c0,
1614 0x1e6e0, 0x1e6e0,
1615 0x1e700, 0x1e784,
1616 0x1e7c0, 0x1e7c8,
1617 0x1e840, 0x1e84c,
835bb606 1618 0x1ea84, 0x1ea8c,
b8ff05a9
DM
1619 0x1eac0, 0x1eac0,
1620 0x1eae0, 0x1eae0,
1621 0x1eb00, 0x1eb84,
1622 0x1ebc0, 0x1ebc8,
1623 0x1ec40, 0x1ec4c,
835bb606 1624 0x1ee84, 0x1ee8c,
b8ff05a9
DM
1625 0x1eec0, 0x1eec0,
1626 0x1eee0, 0x1eee0,
1627 0x1ef00, 0x1ef84,
1628 0x1efc0, 0x1efc8,
1629 0x1f040, 0x1f04c,
835bb606 1630 0x1f284, 0x1f28c,
b8ff05a9
DM
1631 0x1f2c0, 0x1f2c0,
1632 0x1f2e0, 0x1f2e0,
1633 0x1f300, 0x1f384,
1634 0x1f3c0, 0x1f3c8,
1635 0x1f440, 0x1f44c,
835bb606 1636 0x1f684, 0x1f68c,
b8ff05a9
DM
1637 0x1f6c0, 0x1f6c0,
1638 0x1f6e0, 0x1f6e0,
1639 0x1f700, 0x1f784,
1640 0x1f7c0, 0x1f7c8,
1641 0x1f840, 0x1f84c,
835bb606 1642 0x1fa84, 0x1fa8c,
b8ff05a9
DM
1643 0x1fac0, 0x1fac0,
1644 0x1fae0, 0x1fae0,
1645 0x1fb00, 0x1fb84,
1646 0x1fbc0, 0x1fbc8,
1647 0x1fc40, 0x1fc4c,
835bb606 1648 0x1fe84, 0x1fe8c,
b8ff05a9
DM
1649 0x1fec0, 0x1fec0,
1650 0x1fee0, 0x1fee0,
1651 0x1ff00, 0x1ff84,
1652 0x1ffc0, 0x1ffc8,
1653 0x20000, 0x2002c,
1654 0x20100, 0x2013c,
1655 0x20190, 0x201c8,
1656 0x20200, 0x20318,
1657 0x20400, 0x20528,
1658 0x20540, 0x20614,
1659 0x21000, 0x21040,
1660 0x2104c, 0x21060,
1661 0x210c0, 0x210ec,
1662 0x21200, 0x21268,
1663 0x21270, 0x21284,
1664 0x212fc, 0x21388,
1665 0x21400, 0x21404,
1666 0x21500, 0x21518,
1667 0x2152c, 0x2153c,
1668 0x21550, 0x21554,
1669 0x21600, 0x21600,
1670 0x21608, 0x21628,
1671 0x21630, 0x2163c,
1672 0x21700, 0x2171c,
1673 0x21780, 0x2178c,
1674 0x21800, 0x21c38,
1675 0x21c80, 0x21d7c,
1676 0x21e00, 0x21e04,
1677 0x22000, 0x2202c,
1678 0x22100, 0x2213c,
1679 0x22190, 0x221c8,
1680 0x22200, 0x22318,
1681 0x22400, 0x22528,
1682 0x22540, 0x22614,
1683 0x23000, 0x23040,
1684 0x2304c, 0x23060,
1685 0x230c0, 0x230ec,
1686 0x23200, 0x23268,
1687 0x23270, 0x23284,
1688 0x232fc, 0x23388,
1689 0x23400, 0x23404,
1690 0x23500, 0x23518,
1691 0x2352c, 0x2353c,
1692 0x23550, 0x23554,
1693 0x23600, 0x23600,
1694 0x23608, 0x23628,
1695 0x23630, 0x2363c,
1696 0x23700, 0x2371c,
1697 0x23780, 0x2378c,
1698 0x23800, 0x23c38,
1699 0x23c80, 0x23d7c,
1700 0x23e00, 0x23e04,
1701 0x24000, 0x2402c,
1702 0x24100, 0x2413c,
1703 0x24190, 0x241c8,
1704 0x24200, 0x24318,
1705 0x24400, 0x24528,
1706 0x24540, 0x24614,
1707 0x25000, 0x25040,
1708 0x2504c, 0x25060,
1709 0x250c0, 0x250ec,
1710 0x25200, 0x25268,
1711 0x25270, 0x25284,
1712 0x252fc, 0x25388,
1713 0x25400, 0x25404,
1714 0x25500, 0x25518,
1715 0x2552c, 0x2553c,
1716 0x25550, 0x25554,
1717 0x25600, 0x25600,
1718 0x25608, 0x25628,
1719 0x25630, 0x2563c,
1720 0x25700, 0x2571c,
1721 0x25780, 0x2578c,
1722 0x25800, 0x25c38,
1723 0x25c80, 0x25d7c,
1724 0x25e00, 0x25e04,
1725 0x26000, 0x2602c,
1726 0x26100, 0x2613c,
1727 0x26190, 0x261c8,
1728 0x26200, 0x26318,
1729 0x26400, 0x26528,
1730 0x26540, 0x26614,
1731 0x27000, 0x27040,
1732 0x2704c, 0x27060,
1733 0x270c0, 0x270ec,
1734 0x27200, 0x27268,
1735 0x27270, 0x27284,
1736 0x272fc, 0x27388,
1737 0x27400, 0x27404,
1738 0x27500, 0x27518,
1739 0x2752c, 0x2753c,
1740 0x27550, 0x27554,
1741 0x27600, 0x27600,
1742 0x27608, 0x27628,
1743 0x27630, 0x2763c,
1744 0x27700, 0x2771c,
1745 0x27780, 0x2778c,
1746 0x27800, 0x27c38,
1747 0x27c80, 0x27d7c,
1748 0x27e00, 0x27e04
1749 };
1750
251f9e88
SR
1751 static const unsigned int t5_reg_ranges[] = {
1752 0x1008, 0x1148,
1753 0x1180, 0x11b4,
1754 0x11fc, 0x123c,
1755 0x1280, 0x173c,
1756 0x1800, 0x18fc,
1757 0x3000, 0x3028,
1758 0x3060, 0x30d8,
1759 0x30e0, 0x30fc,
1760 0x3140, 0x357c,
1761 0x35a8, 0x35cc,
1762 0x35ec, 0x35ec,
1763 0x3600, 0x5624,
1764 0x56cc, 0x575c,
1765 0x580c, 0x5814,
1766 0x5890, 0x58bc,
1767 0x5940, 0x59dc,
1768 0x59fc, 0x5a18,
1769 0x5a60, 0x5a9c,
1770 0x5b9c, 0x5bfc,
1771 0x6000, 0x6040,
1772 0x6058, 0x614c,
1773 0x7700, 0x7798,
1774 0x77c0, 0x78fc,
1775 0x7b00, 0x7c54,
1776 0x7d00, 0x7efc,
1777 0x8dc0, 0x8de0,
1778 0x8df8, 0x8e84,
1779 0x8ea0, 0x8f84,
1780 0x8fc0, 0x90f8,
1781 0x9400, 0x9470,
1782 0x9600, 0x96f4,
1783 0x9800, 0x9808,
1784 0x9820, 0x983c,
1785 0x9850, 0x9864,
1786 0x9c00, 0x9c6c,
1787 0x9c80, 0x9cec,
1788 0x9d00, 0x9d6c,
1789 0x9d80, 0x9dec,
1790 0x9e00, 0x9e6c,
1791 0x9e80, 0x9eec,
1792 0x9f00, 0x9f6c,
1793 0x9f80, 0xa020,
1794 0xd004, 0xd03c,
1795 0xdfc0, 0xdfe0,
1796 0xe000, 0x11088,
1797 0x1109c, 0x1117c,
1798 0x11190, 0x11204,
1799 0x19040, 0x1906c,
1800 0x19078, 0x19080,
1801 0x1908c, 0x19124,
1802 0x19150, 0x191b0,
1803 0x191d0, 0x191e8,
1804 0x19238, 0x19290,
1805 0x193f8, 0x19474,
1806 0x19490, 0x194cc,
1807 0x194f0, 0x194f8,
1808 0x19c00, 0x19c60,
1809 0x19c94, 0x19e10,
1810 0x19e50, 0x19f34,
1811 0x19f40, 0x19f50,
1812 0x19f90, 0x19fe4,
1813 0x1a000, 0x1a06c,
1814 0x1a0b0, 0x1a120,
1815 0x1a128, 0x1a138,
1816 0x1a190, 0x1a1c4,
1817 0x1a1fc, 0x1a1fc,
1818 0x1e008, 0x1e00c,
1819 0x1e040, 0x1e04c,
1820 0x1e284, 0x1e290,
1821 0x1e2c0, 0x1e2c0,
1822 0x1e2e0, 0x1e2e0,
1823 0x1e300, 0x1e384,
1824 0x1e3c0, 0x1e3c8,
1825 0x1e408, 0x1e40c,
1826 0x1e440, 0x1e44c,
1827 0x1e684, 0x1e690,
1828 0x1e6c0, 0x1e6c0,
1829 0x1e6e0, 0x1e6e0,
1830 0x1e700, 0x1e784,
1831 0x1e7c0, 0x1e7c8,
1832 0x1e808, 0x1e80c,
1833 0x1e840, 0x1e84c,
1834 0x1ea84, 0x1ea90,
1835 0x1eac0, 0x1eac0,
1836 0x1eae0, 0x1eae0,
1837 0x1eb00, 0x1eb84,
1838 0x1ebc0, 0x1ebc8,
1839 0x1ec08, 0x1ec0c,
1840 0x1ec40, 0x1ec4c,
1841 0x1ee84, 0x1ee90,
1842 0x1eec0, 0x1eec0,
1843 0x1eee0, 0x1eee0,
1844 0x1ef00, 0x1ef84,
1845 0x1efc0, 0x1efc8,
1846 0x1f008, 0x1f00c,
1847 0x1f040, 0x1f04c,
1848 0x1f284, 0x1f290,
1849 0x1f2c0, 0x1f2c0,
1850 0x1f2e0, 0x1f2e0,
1851 0x1f300, 0x1f384,
1852 0x1f3c0, 0x1f3c8,
1853 0x1f408, 0x1f40c,
1854 0x1f440, 0x1f44c,
1855 0x1f684, 0x1f690,
1856 0x1f6c0, 0x1f6c0,
1857 0x1f6e0, 0x1f6e0,
1858 0x1f700, 0x1f784,
1859 0x1f7c0, 0x1f7c8,
1860 0x1f808, 0x1f80c,
1861 0x1f840, 0x1f84c,
1862 0x1fa84, 0x1fa90,
1863 0x1fac0, 0x1fac0,
1864 0x1fae0, 0x1fae0,
1865 0x1fb00, 0x1fb84,
1866 0x1fbc0, 0x1fbc8,
1867 0x1fc08, 0x1fc0c,
1868 0x1fc40, 0x1fc4c,
1869 0x1fe84, 0x1fe90,
1870 0x1fec0, 0x1fec0,
1871 0x1fee0, 0x1fee0,
1872 0x1ff00, 0x1ff84,
1873 0x1ffc0, 0x1ffc8,
1874 0x30000, 0x30030,
1875 0x30100, 0x30144,
1876 0x30190, 0x301d0,
1877 0x30200, 0x30318,
1878 0x30400, 0x3052c,
1879 0x30540, 0x3061c,
1880 0x30800, 0x30834,
1881 0x308c0, 0x30908,
1882 0x30910, 0x309ac,
1883 0x30a00, 0x30a04,
1884 0x30a0c, 0x30a2c,
1885 0x30a44, 0x30a50,
1886 0x30a74, 0x30c24,
1887 0x30d08, 0x30d14,
1888 0x30d1c, 0x30d20,
1889 0x30d3c, 0x30d50,
1890 0x31200, 0x3120c,
1891 0x31220, 0x31220,
1892 0x31240, 0x31240,
1893 0x31600, 0x31600,
1894 0x31608, 0x3160c,
1895 0x31a00, 0x31a1c,
1896 0x31e04, 0x31e20,
1897 0x31e38, 0x31e3c,
1898 0x31e80, 0x31e80,
1899 0x31e88, 0x31ea8,
1900 0x31eb0, 0x31eb4,
1901 0x31ec8, 0x31ed4,
1902 0x31fb8, 0x32004,
1903 0x32208, 0x3223c,
1904 0x32600, 0x32630,
1905 0x32a00, 0x32abc,
1906 0x32b00, 0x32b70,
1907 0x33000, 0x33048,
1908 0x33060, 0x3309c,
1909 0x330f0, 0x33148,
1910 0x33160, 0x3319c,
1911 0x331f0, 0x332e4,
1912 0x332f8, 0x333e4,
1913 0x333f8, 0x33448,
1914 0x33460, 0x3349c,
1915 0x334f0, 0x33548,
1916 0x33560, 0x3359c,
1917 0x335f0, 0x336e4,
1918 0x336f8, 0x337e4,
1919 0x337f8, 0x337fc,
1920 0x33814, 0x33814,
1921 0x3382c, 0x3382c,
1922 0x33880, 0x3388c,
1923 0x338e8, 0x338ec,
1924 0x33900, 0x33948,
1925 0x33960, 0x3399c,
1926 0x339f0, 0x33ae4,
1927 0x33af8, 0x33b10,
1928 0x33b28, 0x33b28,
1929 0x33b3c, 0x33b50,
1930 0x33bf0, 0x33c10,
1931 0x33c28, 0x33c28,
1932 0x33c3c, 0x33c50,
1933 0x33cf0, 0x33cfc,
1934 0x34000, 0x34030,
1935 0x34100, 0x34144,
1936 0x34190, 0x341d0,
1937 0x34200, 0x34318,
1938 0x34400, 0x3452c,
1939 0x34540, 0x3461c,
1940 0x34800, 0x34834,
1941 0x348c0, 0x34908,
1942 0x34910, 0x349ac,
1943 0x34a00, 0x34a04,
1944 0x34a0c, 0x34a2c,
1945 0x34a44, 0x34a50,
1946 0x34a74, 0x34c24,
1947 0x34d08, 0x34d14,
1948 0x34d1c, 0x34d20,
1949 0x34d3c, 0x34d50,
1950 0x35200, 0x3520c,
1951 0x35220, 0x35220,
1952 0x35240, 0x35240,
1953 0x35600, 0x35600,
1954 0x35608, 0x3560c,
1955 0x35a00, 0x35a1c,
1956 0x35e04, 0x35e20,
1957 0x35e38, 0x35e3c,
1958 0x35e80, 0x35e80,
1959 0x35e88, 0x35ea8,
1960 0x35eb0, 0x35eb4,
1961 0x35ec8, 0x35ed4,
1962 0x35fb8, 0x36004,
1963 0x36208, 0x3623c,
1964 0x36600, 0x36630,
1965 0x36a00, 0x36abc,
1966 0x36b00, 0x36b70,
1967 0x37000, 0x37048,
1968 0x37060, 0x3709c,
1969 0x370f0, 0x37148,
1970 0x37160, 0x3719c,
1971 0x371f0, 0x372e4,
1972 0x372f8, 0x373e4,
1973 0x373f8, 0x37448,
1974 0x37460, 0x3749c,
1975 0x374f0, 0x37548,
1976 0x37560, 0x3759c,
1977 0x375f0, 0x376e4,
1978 0x376f8, 0x377e4,
1979 0x377f8, 0x377fc,
1980 0x37814, 0x37814,
1981 0x3782c, 0x3782c,
1982 0x37880, 0x3788c,
1983 0x378e8, 0x378ec,
1984 0x37900, 0x37948,
1985 0x37960, 0x3799c,
1986 0x379f0, 0x37ae4,
1987 0x37af8, 0x37b10,
1988 0x37b28, 0x37b28,
1989 0x37b3c, 0x37b50,
1990 0x37bf0, 0x37c10,
1991 0x37c28, 0x37c28,
1992 0x37c3c, 0x37c50,
1993 0x37cf0, 0x37cfc,
1994 0x38000, 0x38030,
1995 0x38100, 0x38144,
1996 0x38190, 0x381d0,
1997 0x38200, 0x38318,
1998 0x38400, 0x3852c,
1999 0x38540, 0x3861c,
2000 0x38800, 0x38834,
2001 0x388c0, 0x38908,
2002 0x38910, 0x389ac,
2003 0x38a00, 0x38a04,
2004 0x38a0c, 0x38a2c,
2005 0x38a44, 0x38a50,
2006 0x38a74, 0x38c24,
2007 0x38d08, 0x38d14,
2008 0x38d1c, 0x38d20,
2009 0x38d3c, 0x38d50,
2010 0x39200, 0x3920c,
2011 0x39220, 0x39220,
2012 0x39240, 0x39240,
2013 0x39600, 0x39600,
2014 0x39608, 0x3960c,
2015 0x39a00, 0x39a1c,
2016 0x39e04, 0x39e20,
2017 0x39e38, 0x39e3c,
2018 0x39e80, 0x39e80,
2019 0x39e88, 0x39ea8,
2020 0x39eb0, 0x39eb4,
2021 0x39ec8, 0x39ed4,
2022 0x39fb8, 0x3a004,
2023 0x3a208, 0x3a23c,
2024 0x3a600, 0x3a630,
2025 0x3aa00, 0x3aabc,
2026 0x3ab00, 0x3ab70,
2027 0x3b000, 0x3b048,
2028 0x3b060, 0x3b09c,
2029 0x3b0f0, 0x3b148,
2030 0x3b160, 0x3b19c,
2031 0x3b1f0, 0x3b2e4,
2032 0x3b2f8, 0x3b3e4,
2033 0x3b3f8, 0x3b448,
2034 0x3b460, 0x3b49c,
2035 0x3b4f0, 0x3b548,
2036 0x3b560, 0x3b59c,
2037 0x3b5f0, 0x3b6e4,
2038 0x3b6f8, 0x3b7e4,
2039 0x3b7f8, 0x3b7fc,
2040 0x3b814, 0x3b814,
2041 0x3b82c, 0x3b82c,
2042 0x3b880, 0x3b88c,
2043 0x3b8e8, 0x3b8ec,
2044 0x3b900, 0x3b948,
2045 0x3b960, 0x3b99c,
2046 0x3b9f0, 0x3bae4,
2047 0x3baf8, 0x3bb10,
2048 0x3bb28, 0x3bb28,
2049 0x3bb3c, 0x3bb50,
2050 0x3bbf0, 0x3bc10,
2051 0x3bc28, 0x3bc28,
2052 0x3bc3c, 0x3bc50,
2053 0x3bcf0, 0x3bcfc,
2054 0x3c000, 0x3c030,
2055 0x3c100, 0x3c144,
2056 0x3c190, 0x3c1d0,
2057 0x3c200, 0x3c318,
2058 0x3c400, 0x3c52c,
2059 0x3c540, 0x3c61c,
2060 0x3c800, 0x3c834,
2061 0x3c8c0, 0x3c908,
2062 0x3c910, 0x3c9ac,
2063 0x3ca00, 0x3ca04,
2064 0x3ca0c, 0x3ca2c,
2065 0x3ca44, 0x3ca50,
2066 0x3ca74, 0x3cc24,
2067 0x3cd08, 0x3cd14,
2068 0x3cd1c, 0x3cd20,
2069 0x3cd3c, 0x3cd50,
2070 0x3d200, 0x3d20c,
2071 0x3d220, 0x3d220,
2072 0x3d240, 0x3d240,
2073 0x3d600, 0x3d600,
2074 0x3d608, 0x3d60c,
2075 0x3da00, 0x3da1c,
2076 0x3de04, 0x3de20,
2077 0x3de38, 0x3de3c,
2078 0x3de80, 0x3de80,
2079 0x3de88, 0x3dea8,
2080 0x3deb0, 0x3deb4,
2081 0x3dec8, 0x3ded4,
2082 0x3dfb8, 0x3e004,
2083 0x3e208, 0x3e23c,
2084 0x3e600, 0x3e630,
2085 0x3ea00, 0x3eabc,
2086 0x3eb00, 0x3eb70,
2087 0x3f000, 0x3f048,
2088 0x3f060, 0x3f09c,
2089 0x3f0f0, 0x3f148,
2090 0x3f160, 0x3f19c,
2091 0x3f1f0, 0x3f2e4,
2092 0x3f2f8, 0x3f3e4,
2093 0x3f3f8, 0x3f448,
2094 0x3f460, 0x3f49c,
2095 0x3f4f0, 0x3f548,
2096 0x3f560, 0x3f59c,
2097 0x3f5f0, 0x3f6e4,
2098 0x3f6f8, 0x3f7e4,
2099 0x3f7f8, 0x3f7fc,
2100 0x3f814, 0x3f814,
2101 0x3f82c, 0x3f82c,
2102 0x3f880, 0x3f88c,
2103 0x3f8e8, 0x3f8ec,
2104 0x3f900, 0x3f948,
2105 0x3f960, 0x3f99c,
2106 0x3f9f0, 0x3fae4,
2107 0x3faf8, 0x3fb10,
2108 0x3fb28, 0x3fb28,
2109 0x3fb3c, 0x3fb50,
2110 0x3fbf0, 0x3fc10,
2111 0x3fc28, 0x3fc28,
2112 0x3fc3c, 0x3fc50,
2113 0x3fcf0, 0x3fcfc,
2114 0x40000, 0x4000c,
2115 0x40040, 0x40068,
2116 0x40080, 0x40144,
2117 0x40180, 0x4018c,
2118 0x40200, 0x40298,
2119 0x402ac, 0x4033c,
2120 0x403f8, 0x403fc,
2121 0x41300, 0x413c4,
2122 0x41400, 0x4141c,
2123 0x41480, 0x414d0,
2124 0x44000, 0x44078,
2125 0x440c0, 0x44278,
2126 0x442c0, 0x44478,
2127 0x444c0, 0x44678,
2128 0x446c0, 0x44878,
2129 0x448c0, 0x449fc,
2130 0x45000, 0x45068,
2131 0x45080, 0x45084,
2132 0x450a0, 0x450b0,
2133 0x45200, 0x45268,
2134 0x45280, 0x45284,
2135 0x452a0, 0x452b0,
2136 0x460c0, 0x460e4,
2137 0x47000, 0x4708c,
2138 0x47200, 0x47250,
2139 0x47400, 0x47420,
2140 0x47600, 0x47618,
2141 0x47800, 0x47814,
2142 0x48000, 0x4800c,
2143 0x48040, 0x48068,
2144 0x48080, 0x48144,
2145 0x48180, 0x4818c,
2146 0x48200, 0x48298,
2147 0x482ac, 0x4833c,
2148 0x483f8, 0x483fc,
2149 0x49300, 0x493c4,
2150 0x49400, 0x4941c,
2151 0x49480, 0x494d0,
2152 0x4c000, 0x4c078,
2153 0x4c0c0, 0x4c278,
2154 0x4c2c0, 0x4c478,
2155 0x4c4c0, 0x4c678,
2156 0x4c6c0, 0x4c878,
2157 0x4c8c0, 0x4c9fc,
2158 0x4d000, 0x4d068,
2159 0x4d080, 0x4d084,
2160 0x4d0a0, 0x4d0b0,
2161 0x4d200, 0x4d268,
2162 0x4d280, 0x4d284,
2163 0x4d2a0, 0x4d2b0,
2164 0x4e0c0, 0x4e0e4,
2165 0x4f000, 0x4f08c,
2166 0x4f200, 0x4f250,
2167 0x4f400, 0x4f420,
2168 0x4f600, 0x4f618,
2169 0x4f800, 0x4f814,
2170 0x50000, 0x500cc,
2171 0x50400, 0x50400,
2172 0x50800, 0x508cc,
2173 0x50c00, 0x50c00,
2174 0x51000, 0x5101c,
2175 0x51300, 0x51308,
2176 };
2177
b8ff05a9
DM
2178 int i;
2179 struct adapter *ap = netdev2adap(dev);
251f9e88
SR
2180 static const unsigned int *reg_ranges;
2181 int arr_size = 0, buf_size = 0;
2182
2183 if (is_t4(ap->chip)) {
2184 reg_ranges = &t4_reg_ranges[0];
2185 arr_size = ARRAY_SIZE(t4_reg_ranges);
2186 buf_size = T4_REGMAP_SIZE;
2187 } else {
2188 reg_ranges = &t5_reg_ranges[0];
2189 arr_size = ARRAY_SIZE(t5_reg_ranges);
2190 buf_size = T5_REGMAP_SIZE;
2191 }
b8ff05a9
DM
2192
2193 regs->version = mk_adap_vers(ap);
2194
251f9e88
SR
2195 memset(buf, 0, buf_size);
2196 for (i = 0; i < arr_size; i += 2)
b8ff05a9
DM
2197 reg_block_dump(ap, buf, reg_ranges[i], reg_ranges[i + 1]);
2198}
2199
2200static int restart_autoneg(struct net_device *dev)
2201{
2202 struct port_info *p = netdev_priv(dev);
2203
2204 if (!netif_running(dev))
2205 return -EAGAIN;
2206 if (p->link_cfg.autoneg != AUTONEG_ENABLE)
2207 return -EINVAL;
060e0c75 2208 t4_restart_aneg(p->adapter, p->adapter->fn, p->tx_chan);
b8ff05a9
DM
2209 return 0;
2210}
2211
c5e06360
DM
2212static int identify_port(struct net_device *dev,
2213 enum ethtool_phys_id_state state)
b8ff05a9 2214{
c5e06360 2215 unsigned int val;
060e0c75
DM
2216 struct adapter *adap = netdev2adap(dev);
2217
c5e06360
DM
2218 if (state == ETHTOOL_ID_ACTIVE)
2219 val = 0xffff;
2220 else if (state == ETHTOOL_ID_INACTIVE)
2221 val = 0;
2222 else
2223 return -EINVAL;
b8ff05a9 2224
c5e06360 2225 return t4_identify_port(adap, adap->fn, netdev2pinfo(dev)->viid, val);
b8ff05a9
DM
2226}
2227
2228static unsigned int from_fw_linkcaps(unsigned int type, unsigned int caps)
2229{
2230 unsigned int v = 0;
2231
a0881cab
DM
2232 if (type == FW_PORT_TYPE_BT_SGMII || type == FW_PORT_TYPE_BT_XFI ||
2233 type == FW_PORT_TYPE_BT_XAUI) {
b8ff05a9
DM
2234 v |= SUPPORTED_TP;
2235 if (caps & FW_PORT_CAP_SPEED_100M)
2236 v |= SUPPORTED_100baseT_Full;
2237 if (caps & FW_PORT_CAP_SPEED_1G)
2238 v |= SUPPORTED_1000baseT_Full;
2239 if (caps & FW_PORT_CAP_SPEED_10G)
2240 v |= SUPPORTED_10000baseT_Full;
2241 } else if (type == FW_PORT_TYPE_KX4 || type == FW_PORT_TYPE_KX) {
2242 v |= SUPPORTED_Backplane;
2243 if (caps & FW_PORT_CAP_SPEED_1G)
2244 v |= SUPPORTED_1000baseKX_Full;
2245 if (caps & FW_PORT_CAP_SPEED_10G)
2246 v |= SUPPORTED_10000baseKX4_Full;
2247 } else if (type == FW_PORT_TYPE_KR)
2248 v |= SUPPORTED_Backplane | SUPPORTED_10000baseKR_Full;
a0881cab 2249 else if (type == FW_PORT_TYPE_BP_AP)
7d5e77aa
DM
2250 v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC |
2251 SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full;
2252 else if (type == FW_PORT_TYPE_BP4_AP)
2253 v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC |
2254 SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full |
2255 SUPPORTED_10000baseKX4_Full;
a0881cab
DM
2256 else if (type == FW_PORT_TYPE_FIBER_XFI ||
2257 type == FW_PORT_TYPE_FIBER_XAUI || type == FW_PORT_TYPE_SFP)
b8ff05a9
DM
2258 v |= SUPPORTED_FIBRE;
2259
2260 if (caps & FW_PORT_CAP_ANEG)
2261 v |= SUPPORTED_Autoneg;
2262 return v;
2263}
2264
2265static unsigned int to_fw_linkcaps(unsigned int caps)
2266{
2267 unsigned int v = 0;
2268
2269 if (caps & ADVERTISED_100baseT_Full)
2270 v |= FW_PORT_CAP_SPEED_100M;
2271 if (caps & ADVERTISED_1000baseT_Full)
2272 v |= FW_PORT_CAP_SPEED_1G;
2273 if (caps & ADVERTISED_10000baseT_Full)
2274 v |= FW_PORT_CAP_SPEED_10G;
2275 return v;
2276}
2277
2278static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2279{
2280 const struct port_info *p = netdev_priv(dev);
2281
2282 if (p->port_type == FW_PORT_TYPE_BT_SGMII ||
a0881cab 2283 p->port_type == FW_PORT_TYPE_BT_XFI ||
b8ff05a9
DM
2284 p->port_type == FW_PORT_TYPE_BT_XAUI)
2285 cmd->port = PORT_TP;
a0881cab
DM
2286 else if (p->port_type == FW_PORT_TYPE_FIBER_XFI ||
2287 p->port_type == FW_PORT_TYPE_FIBER_XAUI)
b8ff05a9 2288 cmd->port = PORT_FIBRE;
a0881cab
DM
2289 else if (p->port_type == FW_PORT_TYPE_SFP) {
2290 if (p->mod_type == FW_PORT_MOD_TYPE_TWINAX_PASSIVE ||
2291 p->mod_type == FW_PORT_MOD_TYPE_TWINAX_ACTIVE)
2292 cmd->port = PORT_DA;
2293 else
2294 cmd->port = PORT_FIBRE;
2295 } else
b8ff05a9
DM
2296 cmd->port = PORT_OTHER;
2297
2298 if (p->mdio_addr >= 0) {
2299 cmd->phy_address = p->mdio_addr;
2300 cmd->transceiver = XCVR_EXTERNAL;
2301 cmd->mdio_support = p->port_type == FW_PORT_TYPE_BT_SGMII ?
2302 MDIO_SUPPORTS_C22 : MDIO_SUPPORTS_C45;
2303 } else {
2304 cmd->phy_address = 0; /* not really, but no better option */
2305 cmd->transceiver = XCVR_INTERNAL;
2306 cmd->mdio_support = 0;
2307 }
2308
2309 cmd->supported = from_fw_linkcaps(p->port_type, p->link_cfg.supported);
2310 cmd->advertising = from_fw_linkcaps(p->port_type,
2311 p->link_cfg.advertising);
70739497
DD
2312 ethtool_cmd_speed_set(cmd,
2313 netif_carrier_ok(dev) ? p->link_cfg.speed : 0);
b8ff05a9
DM
2314 cmd->duplex = DUPLEX_FULL;
2315 cmd->autoneg = p->link_cfg.autoneg;
2316 cmd->maxtxpkt = 0;
2317 cmd->maxrxpkt = 0;
2318 return 0;
2319}
2320
2321static unsigned int speed_to_caps(int speed)
2322{
2323 if (speed == SPEED_100)
2324 return FW_PORT_CAP_SPEED_100M;
2325 if (speed == SPEED_1000)
2326 return FW_PORT_CAP_SPEED_1G;
2327 if (speed == SPEED_10000)
2328 return FW_PORT_CAP_SPEED_10G;
2329 return 0;
2330}
2331
2332static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2333{
2334 unsigned int cap;
2335 struct port_info *p = netdev_priv(dev);
2336 struct link_config *lc = &p->link_cfg;
25db0338 2337 u32 speed = ethtool_cmd_speed(cmd);
b8ff05a9
DM
2338
2339 if (cmd->duplex != DUPLEX_FULL) /* only full-duplex supported */
2340 return -EINVAL;
2341
2342 if (!(lc->supported & FW_PORT_CAP_ANEG)) {
2343 /*
2344 * PHY offers a single speed. See if that's what's
2345 * being requested.
2346 */
2347 if (cmd->autoneg == AUTONEG_DISABLE &&
25db0338
DD
2348 (lc->supported & speed_to_caps(speed)))
2349 return 0;
b8ff05a9
DM
2350 return -EINVAL;
2351 }
2352
2353 if (cmd->autoneg == AUTONEG_DISABLE) {
25db0338 2354 cap = speed_to_caps(speed);
b8ff05a9 2355
25db0338
DD
2356 if (!(lc->supported & cap) || (speed == SPEED_1000) ||
2357 (speed == SPEED_10000))
b8ff05a9
DM
2358 return -EINVAL;
2359 lc->requested_speed = cap;
2360 lc->advertising = 0;
2361 } else {
2362 cap = to_fw_linkcaps(cmd->advertising);
2363 if (!(lc->supported & cap))
2364 return -EINVAL;
2365 lc->requested_speed = 0;
2366 lc->advertising = cap | FW_PORT_CAP_ANEG;
2367 }
2368 lc->autoneg = cmd->autoneg;
2369
2370 if (netif_running(dev))
060e0c75
DM
2371 return t4_link_start(p->adapter, p->adapter->fn, p->tx_chan,
2372 lc);
b8ff05a9
DM
2373 return 0;
2374}
2375
2376static void get_pauseparam(struct net_device *dev,
2377 struct ethtool_pauseparam *epause)
2378{
2379 struct port_info *p = netdev_priv(dev);
2380
2381 epause->autoneg = (p->link_cfg.requested_fc & PAUSE_AUTONEG) != 0;
2382 epause->rx_pause = (p->link_cfg.fc & PAUSE_RX) != 0;
2383 epause->tx_pause = (p->link_cfg.fc & PAUSE_TX) != 0;
2384}
2385
2386static int set_pauseparam(struct net_device *dev,
2387 struct ethtool_pauseparam *epause)
2388{
2389 struct port_info *p = netdev_priv(dev);
2390 struct link_config *lc = &p->link_cfg;
2391
2392 if (epause->autoneg == AUTONEG_DISABLE)
2393 lc->requested_fc = 0;
2394 else if (lc->supported & FW_PORT_CAP_ANEG)
2395 lc->requested_fc = PAUSE_AUTONEG;
2396 else
2397 return -EINVAL;
2398
2399 if (epause->rx_pause)
2400 lc->requested_fc |= PAUSE_RX;
2401 if (epause->tx_pause)
2402 lc->requested_fc |= PAUSE_TX;
2403 if (netif_running(dev))
060e0c75
DM
2404 return t4_link_start(p->adapter, p->adapter->fn, p->tx_chan,
2405 lc);
b8ff05a9
DM
2406 return 0;
2407}
2408
b8ff05a9
DM
2409static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
2410{
2411 const struct port_info *pi = netdev_priv(dev);
2412 const struct sge *s = &pi->adapter->sge;
2413
2414 e->rx_max_pending = MAX_RX_BUFFERS;
2415 e->rx_mini_max_pending = MAX_RSPQ_ENTRIES;
2416 e->rx_jumbo_max_pending = 0;
2417 e->tx_max_pending = MAX_TXQ_ENTRIES;
2418
2419 e->rx_pending = s->ethrxq[pi->first_qset].fl.size - 8;
2420 e->rx_mini_pending = s->ethrxq[pi->first_qset].rspq.size;
2421 e->rx_jumbo_pending = 0;
2422 e->tx_pending = s->ethtxq[pi->first_qset].q.size;
2423}
2424
2425static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
2426{
2427 int i;
2428 const struct port_info *pi = netdev_priv(dev);
2429 struct adapter *adapter = pi->adapter;
2430 struct sge *s = &adapter->sge;
2431
2432 if (e->rx_pending > MAX_RX_BUFFERS || e->rx_jumbo_pending ||
2433 e->tx_pending > MAX_TXQ_ENTRIES ||
2434 e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
2435 e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
2436 e->rx_pending < MIN_FL_ENTRIES || e->tx_pending < MIN_TXQ_ENTRIES)
2437 return -EINVAL;
2438
2439 if (adapter->flags & FULL_INIT_DONE)
2440 return -EBUSY;
2441
2442 for (i = 0; i < pi->nqsets; ++i) {
2443 s->ethtxq[pi->first_qset + i].q.size = e->tx_pending;
2444 s->ethrxq[pi->first_qset + i].fl.size = e->rx_pending + 8;
2445 s->ethrxq[pi->first_qset + i].rspq.size = e->rx_mini_pending;
2446 }
2447 return 0;
2448}
2449
2450static int closest_timer(const struct sge *s, int time)
2451{
2452 int i, delta, match = 0, min_delta = INT_MAX;
2453
2454 for (i = 0; i < ARRAY_SIZE(s->timer_val); i++) {
2455 delta = time - s->timer_val[i];
2456 if (delta < 0)
2457 delta = -delta;
2458 if (delta < min_delta) {
2459 min_delta = delta;
2460 match = i;
2461 }
2462 }
2463 return match;
2464}
2465
2466static int closest_thres(const struct sge *s, int thres)
2467{
2468 int i, delta, match = 0, min_delta = INT_MAX;
2469
2470 for (i = 0; i < ARRAY_SIZE(s->counter_val); i++) {
2471 delta = thres - s->counter_val[i];
2472 if (delta < 0)
2473 delta = -delta;
2474 if (delta < min_delta) {
2475 min_delta = delta;
2476 match = i;
2477 }
2478 }
2479 return match;
2480}
2481
2482/*
2483 * Return a queue's interrupt hold-off time in us. 0 means no timer.
2484 */
2485static unsigned int qtimer_val(const struct adapter *adap,
2486 const struct sge_rspq *q)
2487{
2488 unsigned int idx = q->intr_params >> 1;
2489
2490 return idx < SGE_NTIMERS ? adap->sge.timer_val[idx] : 0;
2491}
2492
2493/**
2494 * set_rxq_intr_params - set a queue's interrupt holdoff parameters
2495 * @adap: the adapter
2496 * @q: the Rx queue
2497 * @us: the hold-off time in us, or 0 to disable timer
2498 * @cnt: the hold-off packet count, or 0 to disable counter
2499 *
2500 * Sets an Rx queue's interrupt hold-off time and packet count. At least
2501 * one of the two needs to be enabled for the queue to generate interrupts.
2502 */
2503static int set_rxq_intr_params(struct adapter *adap, struct sge_rspq *q,
2504 unsigned int us, unsigned int cnt)
2505{
2506 if ((us | cnt) == 0)
2507 cnt = 1;
2508
2509 if (cnt) {
2510 int err;
2511 u32 v, new_idx;
2512
2513 new_idx = closest_thres(&adap->sge, cnt);
2514 if (q->desc && q->pktcnt_idx != new_idx) {
2515 /* the queue has already been created, update it */
2516 v = FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) |
2517 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH) |
2518 FW_PARAMS_PARAM_YZ(q->cntxt_id);
060e0c75
DM
2519 err = t4_set_params(adap, adap->fn, adap->fn, 0, 1, &v,
2520 &new_idx);
b8ff05a9
DM
2521 if (err)
2522 return err;
2523 }
2524 q->pktcnt_idx = new_idx;
2525 }
2526
2527 us = us == 0 ? 6 : closest_timer(&adap->sge, us);
2528 q->intr_params = QINTR_TIMER_IDX(us) | (cnt > 0 ? QINTR_CNT_EN : 0);
2529 return 0;
2530}
2531
2532static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
2533{
2534 const struct port_info *pi = netdev_priv(dev);
2535 struct adapter *adap = pi->adapter;
d4fc9dc2
TLSC
2536 struct sge_rspq *q;
2537 int i;
2538 int r = 0;
2539
2540 for (i = pi->first_qset; i < pi->first_qset + pi->nqsets; i++) {
2541 q = &adap->sge.ethrxq[i].rspq;
2542 r = set_rxq_intr_params(adap, q, c->rx_coalesce_usecs,
2543 c->rx_max_coalesced_frames);
2544 if (r) {
2545 dev_err(&dev->dev, "failed to set coalesce %d\n", r);
2546 break;
2547 }
2548 }
2549 return r;
b8ff05a9
DM
2550}
2551
2552static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
2553{
2554 const struct port_info *pi = netdev_priv(dev);
2555 const struct adapter *adap = pi->adapter;
2556 const struct sge_rspq *rq = &adap->sge.ethrxq[pi->first_qset].rspq;
2557
2558 c->rx_coalesce_usecs = qtimer_val(adap, rq);
2559 c->rx_max_coalesced_frames = (rq->intr_params & QINTR_CNT_EN) ?
2560 adap->sge.counter_val[rq->pktcnt_idx] : 0;
2561 return 0;
2562}
2563
1478b3ee
DM
2564/**
2565 * eeprom_ptov - translate a physical EEPROM address to virtual
2566 * @phys_addr: the physical EEPROM address
2567 * @fn: the PCI function number
2568 * @sz: size of function-specific area
2569 *
2570 * Translate a physical EEPROM address to virtual. The first 1K is
2571 * accessed through virtual addresses starting at 31K, the rest is
2572 * accessed through virtual addresses starting at 0.
2573 *
2574 * The mapping is as follows:
2575 * [0..1K) -> [31K..32K)
2576 * [1K..1K+A) -> [31K-A..31K)
2577 * [1K+A..ES) -> [0..ES-A-1K)
2578 *
2579 * where A = @fn * @sz, and ES = EEPROM size.
b8ff05a9 2580 */
1478b3ee 2581static int eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz)
b8ff05a9 2582{
1478b3ee 2583 fn *= sz;
b8ff05a9
DM
2584 if (phys_addr < 1024)
2585 return phys_addr + (31 << 10);
1478b3ee
DM
2586 if (phys_addr < 1024 + fn)
2587 return 31744 - fn + phys_addr - 1024;
b8ff05a9 2588 if (phys_addr < EEPROMSIZE)
1478b3ee 2589 return phys_addr - 1024 - fn;
b8ff05a9
DM
2590 return -EINVAL;
2591}
2592
2593/*
2594 * The next two routines implement eeprom read/write from physical addresses.
b8ff05a9
DM
2595 */
2596static int eeprom_rd_phys(struct adapter *adap, unsigned int phys_addr, u32 *v)
2597{
1478b3ee 2598 int vaddr = eeprom_ptov(phys_addr, adap->fn, EEPROMPFSIZE);
b8ff05a9
DM
2599
2600 if (vaddr >= 0)
2601 vaddr = pci_read_vpd(adap->pdev, vaddr, sizeof(u32), v);
2602 return vaddr < 0 ? vaddr : 0;
2603}
2604
2605static int eeprom_wr_phys(struct adapter *adap, unsigned int phys_addr, u32 v)
2606{
1478b3ee 2607 int vaddr = eeprom_ptov(phys_addr, adap->fn, EEPROMPFSIZE);
b8ff05a9
DM
2608
2609 if (vaddr >= 0)
2610 vaddr = pci_write_vpd(adap->pdev, vaddr, sizeof(u32), &v);
2611 return vaddr < 0 ? vaddr : 0;
2612}
2613
2614#define EEPROM_MAGIC 0x38E2F10C
2615
2616static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
2617 u8 *data)
2618{
2619 int i, err = 0;
2620 struct adapter *adapter = netdev2adap(dev);
2621
2622 u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
2623 if (!buf)
2624 return -ENOMEM;
2625
2626 e->magic = EEPROM_MAGIC;
2627 for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
2628 err = eeprom_rd_phys(adapter, i, (u32 *)&buf[i]);
2629
2630 if (!err)
2631 memcpy(data, buf + e->offset, e->len);
2632 kfree(buf);
2633 return err;
2634}
2635
2636static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
2637 u8 *data)
2638{
2639 u8 *buf;
2640 int err = 0;
2641 u32 aligned_offset, aligned_len, *p;
2642 struct adapter *adapter = netdev2adap(dev);
2643
2644 if (eeprom->magic != EEPROM_MAGIC)
2645 return -EINVAL;
2646
2647 aligned_offset = eeprom->offset & ~3;
2648 aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
2649
1478b3ee
DM
2650 if (adapter->fn > 0) {
2651 u32 start = 1024 + adapter->fn * EEPROMPFSIZE;
2652
2653 if (aligned_offset < start ||
2654 aligned_offset + aligned_len > start + EEPROMPFSIZE)
2655 return -EPERM;
2656 }
2657
b8ff05a9
DM
2658 if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
2659 /*
2660 * RMW possibly needed for first or last words.
2661 */
2662 buf = kmalloc(aligned_len, GFP_KERNEL);
2663 if (!buf)
2664 return -ENOMEM;
2665 err = eeprom_rd_phys(adapter, aligned_offset, (u32 *)buf);
2666 if (!err && aligned_len > 4)
2667 err = eeprom_rd_phys(adapter,
2668 aligned_offset + aligned_len - 4,
2669 (u32 *)&buf[aligned_len - 4]);
2670 if (err)
2671 goto out;
2672 memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
2673 } else
2674 buf = data;
2675
2676 err = t4_seeprom_wp(adapter, false);
2677 if (err)
2678 goto out;
2679
2680 for (p = (u32 *)buf; !err && aligned_len; aligned_len -= 4, p++) {
2681 err = eeprom_wr_phys(adapter, aligned_offset, *p);
2682 aligned_offset += 4;
2683 }
2684
2685 if (!err)
2686 err = t4_seeprom_wp(adapter, true);
2687out:
2688 if (buf != data)
2689 kfree(buf);
2690 return err;
2691}
2692
2693static int set_flash(struct net_device *netdev, struct ethtool_flash *ef)
2694{
2695 int ret;
2696 const struct firmware *fw;
2697 struct adapter *adap = netdev2adap(netdev);
2698
2699 ef->data[sizeof(ef->data) - 1] = '\0';
2700 ret = request_firmware(&fw, ef->data, adap->pdev_dev);
2701 if (ret < 0)
2702 return ret;
2703
2704 ret = t4_load_fw(adap, fw->data, fw->size);
2705 release_firmware(fw);
2706 if (!ret)
2707 dev_info(adap->pdev_dev, "loaded firmware %s\n", ef->data);
2708 return ret;
2709}
2710
2711#define WOL_SUPPORTED (WAKE_BCAST | WAKE_MAGIC)
2712#define BCAST_CRC 0xa0ccc1a6
2713
2714static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2715{
2716 wol->supported = WAKE_BCAST | WAKE_MAGIC;
2717 wol->wolopts = netdev2adap(dev)->wol;
2718 memset(&wol->sopass, 0, sizeof(wol->sopass));
2719}
2720
2721static int set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2722{
2723 int err = 0;
2724 struct port_info *pi = netdev_priv(dev);
2725
2726 if (wol->wolopts & ~WOL_SUPPORTED)
2727 return -EINVAL;
2728 t4_wol_magic_enable(pi->adapter, pi->tx_chan,
2729 (wol->wolopts & WAKE_MAGIC) ? dev->dev_addr : NULL);
2730 if (wol->wolopts & WAKE_BCAST) {
2731 err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0xfe, ~0ULL,
2732 ~0ULL, 0, false);
2733 if (!err)
2734 err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 1,
2735 ~6ULL, ~0ULL, BCAST_CRC, true);
2736 } else
2737 t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0, 0, 0, 0, false);
2738 return err;
2739}
2740
c8f44aff 2741static int cxgb_set_features(struct net_device *dev, netdev_features_t features)
87b6cf51 2742{
2ed28baa 2743 const struct port_info *pi = netdev_priv(dev);
c8f44aff 2744 netdev_features_t changed = dev->features ^ features;
19ecae2c 2745 int err;
19ecae2c 2746
f646968f 2747 if (!(changed & NETIF_F_HW_VLAN_CTAG_RX))
2ed28baa 2748 return 0;
19ecae2c 2749
2ed28baa
MM
2750 err = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, -1,
2751 -1, -1, -1,
f646968f 2752 !!(features & NETIF_F_HW_VLAN_CTAG_RX), true);
2ed28baa 2753 if (unlikely(err))
f646968f 2754 dev->features = features ^ NETIF_F_HW_VLAN_CTAG_RX;
19ecae2c 2755 return err;
87b6cf51
DM
2756}
2757
7850f63f 2758static u32 get_rss_table_size(struct net_device *dev)
671b0060
DM
2759{
2760 const struct port_info *pi = netdev_priv(dev);
671b0060 2761
7850f63f
BH
2762 return pi->rss_size;
2763}
2764
2765static int get_rss_table(struct net_device *dev, u32 *p)
2766{
2767 const struct port_info *pi = netdev_priv(dev);
2768 unsigned int n = pi->rss_size;
2769
671b0060 2770 while (n--)
7850f63f 2771 p[n] = pi->rss[n];
671b0060
DM
2772 return 0;
2773}
2774
7850f63f 2775static int set_rss_table(struct net_device *dev, const u32 *p)
671b0060
DM
2776{
2777 unsigned int i;
2778 struct port_info *pi = netdev_priv(dev);
2779
7850f63f
BH
2780 for (i = 0; i < pi->rss_size; i++)
2781 pi->rss[i] = p[i];
671b0060
DM
2782 if (pi->adapter->flags & FULL_INIT_DONE)
2783 return write_rss(pi, pi->rss);
2784 return 0;
2785}
2786
2787static int get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
815c7db5 2788 u32 *rules)
671b0060 2789{
f796564a
DM
2790 const struct port_info *pi = netdev_priv(dev);
2791
671b0060 2792 switch (info->cmd) {
f796564a
DM
2793 case ETHTOOL_GRXFH: {
2794 unsigned int v = pi->rss_mode;
2795
2796 info->data = 0;
2797 switch (info->flow_type) {
2798 case TCP_V4_FLOW:
2799 if (v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN)
2800 info->data = RXH_IP_SRC | RXH_IP_DST |
2801 RXH_L4_B_0_1 | RXH_L4_B_2_3;
2802 else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
2803 info->data = RXH_IP_SRC | RXH_IP_DST;
2804 break;
2805 case UDP_V4_FLOW:
2806 if ((v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN) &&
2807 (v & FW_RSS_VI_CONFIG_CMD_UDPEN))
2808 info->data = RXH_IP_SRC | RXH_IP_DST |
2809 RXH_L4_B_0_1 | RXH_L4_B_2_3;
2810 else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
2811 info->data = RXH_IP_SRC | RXH_IP_DST;
2812 break;
2813 case SCTP_V4_FLOW:
2814 case AH_ESP_V4_FLOW:
2815 case IPV4_FLOW:
2816 if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
2817 info->data = RXH_IP_SRC | RXH_IP_DST;
2818 break;
2819 case TCP_V6_FLOW:
2820 if (v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN)
2821 info->data = RXH_IP_SRC | RXH_IP_DST |
2822 RXH_L4_B_0_1 | RXH_L4_B_2_3;
2823 else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
2824 info->data = RXH_IP_SRC | RXH_IP_DST;
2825 break;
2826 case UDP_V6_FLOW:
2827 if ((v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN) &&
2828 (v & FW_RSS_VI_CONFIG_CMD_UDPEN))
2829 info->data = RXH_IP_SRC | RXH_IP_DST |
2830 RXH_L4_B_0_1 | RXH_L4_B_2_3;
2831 else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
2832 info->data = RXH_IP_SRC | RXH_IP_DST;
2833 break;
2834 case SCTP_V6_FLOW:
2835 case AH_ESP_V6_FLOW:
2836 case IPV6_FLOW:
2837 if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
2838 info->data = RXH_IP_SRC | RXH_IP_DST;
2839 break;
2840 }
2841 return 0;
2842 }
671b0060 2843 case ETHTOOL_GRXRINGS:
f796564a 2844 info->data = pi->nqsets;
671b0060
DM
2845 return 0;
2846 }
2847 return -EOPNOTSUPP;
2848}
2849
9b07be4b 2850static const struct ethtool_ops cxgb_ethtool_ops = {
b8ff05a9
DM
2851 .get_settings = get_settings,
2852 .set_settings = set_settings,
2853 .get_drvinfo = get_drvinfo,
2854 .get_msglevel = get_msglevel,
2855 .set_msglevel = set_msglevel,
2856 .get_ringparam = get_sge_param,
2857 .set_ringparam = set_sge_param,
2858 .get_coalesce = get_coalesce,
2859 .set_coalesce = set_coalesce,
2860 .get_eeprom_len = get_eeprom_len,
2861 .get_eeprom = get_eeprom,
2862 .set_eeprom = set_eeprom,
2863 .get_pauseparam = get_pauseparam,
2864 .set_pauseparam = set_pauseparam,
b8ff05a9
DM
2865 .get_link = ethtool_op_get_link,
2866 .get_strings = get_strings,
c5e06360 2867 .set_phys_id = identify_port,
b8ff05a9
DM
2868 .nway_reset = restart_autoneg,
2869 .get_sset_count = get_sset_count,
2870 .get_ethtool_stats = get_stats,
2871 .get_regs_len = get_regs_len,
2872 .get_regs = get_regs,
2873 .get_wol = get_wol,
2874 .set_wol = set_wol,
671b0060 2875 .get_rxnfc = get_rxnfc,
7850f63f 2876 .get_rxfh_indir_size = get_rss_table_size,
671b0060
DM
2877 .get_rxfh_indir = get_rss_table,
2878 .set_rxfh_indir = set_rss_table,
b8ff05a9
DM
2879 .flash_device = set_flash,
2880};
2881
2882/*
2883 * debugfs support
2884 */
b8ff05a9
DM
2885static ssize_t mem_read(struct file *file, char __user *buf, size_t count,
2886 loff_t *ppos)
2887{
2888 loff_t pos = *ppos;
496ad9aa 2889 loff_t avail = file_inode(file)->i_size;
b8ff05a9
DM
2890 unsigned int mem = (uintptr_t)file->private_data & 3;
2891 struct adapter *adap = file->private_data - mem;
2892
2893 if (pos < 0)
2894 return -EINVAL;
2895 if (pos >= avail)
2896 return 0;
2897 if (count > avail - pos)
2898 count = avail - pos;
2899
2900 while (count) {
2901 size_t len;
2902 int ret, ofst;
2903 __be32 data[16];
2904
19dd37ba
SR
2905 if ((mem == MEM_MC) || (mem == MEM_MC1))
2906 ret = t4_mc_read(adap, mem % MEM_MC, pos, data, NULL);
b8ff05a9
DM
2907 else
2908 ret = t4_edc_read(adap, mem, pos, data, NULL);
2909 if (ret)
2910 return ret;
2911
2912 ofst = pos % sizeof(data);
2913 len = min(count, sizeof(data) - ofst);
2914 if (copy_to_user(buf, (u8 *)data + ofst, len))
2915 return -EFAULT;
2916
2917 buf += len;
2918 pos += len;
2919 count -= len;
2920 }
2921 count = pos - *ppos;
2922 *ppos = pos;
2923 return count;
2924}
2925
2926static const struct file_operations mem_debugfs_fops = {
2927 .owner = THIS_MODULE,
234e3405 2928 .open = simple_open,
b8ff05a9 2929 .read = mem_read,
6038f373 2930 .llseek = default_llseek,
b8ff05a9
DM
2931};
2932
91744948 2933static void add_debugfs_mem(struct adapter *adap, const char *name,
1dd06ae8 2934 unsigned int idx, unsigned int size_mb)
b8ff05a9
DM
2935{
2936 struct dentry *de;
2937
2938 de = debugfs_create_file(name, S_IRUSR, adap->debugfs_root,
2939 (void *)adap + idx, &mem_debugfs_fops);
2940 if (de && de->d_inode)
2941 de->d_inode->i_size = size_mb << 20;
2942}
2943
91744948 2944static int setup_debugfs(struct adapter *adap)
b8ff05a9
DM
2945{
2946 int i;
19dd37ba 2947 u32 size;
b8ff05a9
DM
2948
2949 if (IS_ERR_OR_NULL(adap->debugfs_root))
2950 return -1;
2951
2952 i = t4_read_reg(adap, MA_TARGET_MEM_ENABLE);
19dd37ba
SR
2953 if (i & EDRAM0_ENABLE) {
2954 size = t4_read_reg(adap, MA_EDRAM0_BAR);
2955 add_debugfs_mem(adap, "edc0", MEM_EDC0, EDRAM_SIZE_GET(size));
2956 }
2957 if (i & EDRAM1_ENABLE) {
2958 size = t4_read_reg(adap, MA_EDRAM1_BAR);
2959 add_debugfs_mem(adap, "edc1", MEM_EDC1, EDRAM_SIZE_GET(size));
2960 }
2961 if (is_t4(adap->chip)) {
2962 size = t4_read_reg(adap, MA_EXT_MEMORY_BAR);
2963 if (i & EXT_MEM_ENABLE)
2964 add_debugfs_mem(adap, "mc", MEM_MC,
2965 EXT_MEM_SIZE_GET(size));
2966 } else {
2967 if (i & EXT_MEM_ENABLE) {
2968 size = t4_read_reg(adap, MA_EXT_MEMORY_BAR);
2969 add_debugfs_mem(adap, "mc0", MEM_MC0,
2970 EXT_MEM_SIZE_GET(size));
2971 }
2972 if (i & EXT_MEM1_ENABLE) {
2973 size = t4_read_reg(adap, MA_EXT_MEMORY1_BAR);
2974 add_debugfs_mem(adap, "mc1", MEM_MC1,
2975 EXT_MEM_SIZE_GET(size));
2976 }
2977 }
b8ff05a9
DM
2978 if (adap->l2t)
2979 debugfs_create_file("l2t", S_IRUSR, adap->debugfs_root, adap,
2980 &t4_l2t_fops);
2981 return 0;
2982}
2983
2984/*
2985 * upper-layer driver support
2986 */
2987
2988/*
2989 * Allocate an active-open TID and set it to the supplied value.
2990 */
2991int cxgb4_alloc_atid(struct tid_info *t, void *data)
2992{
2993 int atid = -1;
2994
2995 spin_lock_bh(&t->atid_lock);
2996 if (t->afree) {
2997 union aopen_entry *p = t->afree;
2998
f2b7e78d 2999 atid = (p - t->atid_tab) + t->atid_base;
b8ff05a9
DM
3000 t->afree = p->next;
3001 p->data = data;
3002 t->atids_in_use++;
3003 }
3004 spin_unlock_bh(&t->atid_lock);
3005 return atid;
3006}
3007EXPORT_SYMBOL(cxgb4_alloc_atid);
3008
3009/*
3010 * Release an active-open TID.
3011 */
3012void cxgb4_free_atid(struct tid_info *t, unsigned int atid)
3013{
f2b7e78d 3014 union aopen_entry *p = &t->atid_tab[atid - t->atid_base];
b8ff05a9
DM
3015
3016 spin_lock_bh(&t->atid_lock);
3017 p->next = t->afree;
3018 t->afree = p;
3019 t->atids_in_use--;
3020 spin_unlock_bh(&t->atid_lock);
3021}
3022EXPORT_SYMBOL(cxgb4_free_atid);
3023
3024/*
3025 * Allocate a server TID and set it to the supplied value.
3026 */
3027int cxgb4_alloc_stid(struct tid_info *t, int family, void *data)
3028{
3029 int stid;
3030
3031 spin_lock_bh(&t->stid_lock);
3032 if (family == PF_INET) {
3033 stid = find_first_zero_bit(t->stid_bmap, t->nstids);
3034 if (stid < t->nstids)
3035 __set_bit(stid, t->stid_bmap);
3036 else
3037 stid = -1;
3038 } else {
3039 stid = bitmap_find_free_region(t->stid_bmap, t->nstids, 2);
3040 if (stid < 0)
3041 stid = -1;
3042 }
3043 if (stid >= 0) {
3044 t->stid_tab[stid].data = data;
3045 stid += t->stid_base;
3046 t->stids_in_use++;
3047 }
3048 spin_unlock_bh(&t->stid_lock);
3049 return stid;
3050}
3051EXPORT_SYMBOL(cxgb4_alloc_stid);
3052
dca4faeb
VP
3053/* Allocate a server filter TID and set it to the supplied value.
3054 */
3055int cxgb4_alloc_sftid(struct tid_info *t, int family, void *data)
3056{
3057 int stid;
3058
3059 spin_lock_bh(&t->stid_lock);
3060 if (family == PF_INET) {
3061 stid = find_next_zero_bit(t->stid_bmap,
3062 t->nstids + t->nsftids, t->nstids);
3063 if (stid < (t->nstids + t->nsftids))
3064 __set_bit(stid, t->stid_bmap);
3065 else
3066 stid = -1;
3067 } else {
3068 stid = -1;
3069 }
3070 if (stid >= 0) {
3071 t->stid_tab[stid].data = data;
3072 stid += t->stid_base;
3073 t->stids_in_use++;
3074 }
3075 spin_unlock_bh(&t->stid_lock);
3076 return stid;
3077}
3078EXPORT_SYMBOL(cxgb4_alloc_sftid);
3079
3080/* Release a server TID.
b8ff05a9
DM
3081 */
3082void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family)
3083{
3084 stid -= t->stid_base;
3085 spin_lock_bh(&t->stid_lock);
3086 if (family == PF_INET)
3087 __clear_bit(stid, t->stid_bmap);
3088 else
3089 bitmap_release_region(t->stid_bmap, stid, 2);
3090 t->stid_tab[stid].data = NULL;
3091 t->stids_in_use--;
3092 spin_unlock_bh(&t->stid_lock);
3093}
3094EXPORT_SYMBOL(cxgb4_free_stid);
3095
3096/*
3097 * Populate a TID_RELEASE WR. Caller must properly size the skb.
3098 */
3099static void mk_tid_release(struct sk_buff *skb, unsigned int chan,
3100 unsigned int tid)
3101{
3102 struct cpl_tid_release *req;
3103
3104 set_wr_txq(skb, CPL_PRIORITY_SETUP, chan);
3105 req = (struct cpl_tid_release *)__skb_put(skb, sizeof(*req));
3106 INIT_TP_WR(req, tid);
3107 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, tid));
3108}
3109
3110/*
3111 * Queue a TID release request and if necessary schedule a work queue to
3112 * process it.
3113 */
31b9c19b 3114static void cxgb4_queue_tid_release(struct tid_info *t, unsigned int chan,
3115 unsigned int tid)
b8ff05a9
DM
3116{
3117 void **p = &t->tid_tab[tid];
3118 struct adapter *adap = container_of(t, struct adapter, tids);
3119
3120 spin_lock_bh(&adap->tid_release_lock);
3121 *p = adap->tid_release_head;
3122 /* Low 2 bits encode the Tx channel number */
3123 adap->tid_release_head = (void **)((uintptr_t)p | chan);
3124 if (!adap->tid_release_task_busy) {
3125 adap->tid_release_task_busy = true;
3069ee9b 3126 queue_work(workq, &adap->tid_release_task);
b8ff05a9
DM
3127 }
3128 spin_unlock_bh(&adap->tid_release_lock);
3129}
b8ff05a9
DM
3130
3131/*
3132 * Process the list of pending TID release requests.
3133 */
3134static void process_tid_release_list(struct work_struct *work)
3135{
3136 struct sk_buff *skb;
3137 struct adapter *adap;
3138
3139 adap = container_of(work, struct adapter, tid_release_task);
3140
3141 spin_lock_bh(&adap->tid_release_lock);
3142 while (adap->tid_release_head) {
3143 void **p = adap->tid_release_head;
3144 unsigned int chan = (uintptr_t)p & 3;
3145 p = (void *)p - chan;
3146
3147 adap->tid_release_head = *p;
3148 *p = NULL;
3149 spin_unlock_bh(&adap->tid_release_lock);
3150
3151 while (!(skb = alloc_skb(sizeof(struct cpl_tid_release),
3152 GFP_KERNEL)))
3153 schedule_timeout_uninterruptible(1);
3154
3155 mk_tid_release(skb, chan, p - adap->tids.tid_tab);
3156 t4_ofld_send(adap, skb);
3157 spin_lock_bh(&adap->tid_release_lock);
3158 }
3159 adap->tid_release_task_busy = false;
3160 spin_unlock_bh(&adap->tid_release_lock);
3161}
3162
3163/*
3164 * Release a TID and inform HW. If we are unable to allocate the release
3165 * message we defer to a work queue.
3166 */
3167void cxgb4_remove_tid(struct tid_info *t, unsigned int chan, unsigned int tid)
3168{
3169 void *old;
3170 struct sk_buff *skb;
3171 struct adapter *adap = container_of(t, struct adapter, tids);
3172
3173 old = t->tid_tab[tid];
3174 skb = alloc_skb(sizeof(struct cpl_tid_release), GFP_ATOMIC);
3175 if (likely(skb)) {
3176 t->tid_tab[tid] = NULL;
3177 mk_tid_release(skb, chan, tid);
3178 t4_ofld_send(adap, skb);
3179 } else
3180 cxgb4_queue_tid_release(t, chan, tid);
3181 if (old)
3182 atomic_dec(&t->tids_in_use);
3183}
3184EXPORT_SYMBOL(cxgb4_remove_tid);
3185
3186/*
3187 * Allocate and initialize the TID tables. Returns 0 on success.
3188 */
3189static int tid_init(struct tid_info *t)
3190{
3191 size_t size;
f2b7e78d 3192 unsigned int stid_bmap_size;
b8ff05a9
DM
3193 unsigned int natids = t->natids;
3194
dca4faeb 3195 stid_bmap_size = BITS_TO_LONGS(t->nstids + t->nsftids);
f2b7e78d
VP
3196 size = t->ntids * sizeof(*t->tid_tab) +
3197 natids * sizeof(*t->atid_tab) +
b8ff05a9 3198 t->nstids * sizeof(*t->stid_tab) +
dca4faeb 3199 t->nsftids * sizeof(*t->stid_tab) +
f2b7e78d 3200 stid_bmap_size * sizeof(long) +
dca4faeb
VP
3201 t->nftids * sizeof(*t->ftid_tab) +
3202 t->nsftids * sizeof(*t->ftid_tab);
f2b7e78d 3203
b8ff05a9
DM
3204 t->tid_tab = t4_alloc_mem(size);
3205 if (!t->tid_tab)
3206 return -ENOMEM;
3207
3208 t->atid_tab = (union aopen_entry *)&t->tid_tab[t->ntids];
3209 t->stid_tab = (struct serv_entry *)&t->atid_tab[natids];
dca4faeb 3210 t->stid_bmap = (unsigned long *)&t->stid_tab[t->nstids + t->nsftids];
f2b7e78d 3211 t->ftid_tab = (struct filter_entry *)&t->stid_bmap[stid_bmap_size];
b8ff05a9
DM
3212 spin_lock_init(&t->stid_lock);
3213 spin_lock_init(&t->atid_lock);
3214
3215 t->stids_in_use = 0;
3216 t->afree = NULL;
3217 t->atids_in_use = 0;
3218 atomic_set(&t->tids_in_use, 0);
3219
3220 /* Setup the free list for atid_tab and clear the stid bitmap. */
3221 if (natids) {
3222 while (--natids)
3223 t->atid_tab[natids - 1].next = &t->atid_tab[natids];
3224 t->afree = t->atid_tab;
3225 }
dca4faeb 3226 bitmap_zero(t->stid_bmap, t->nstids + t->nsftids);
b8ff05a9
DM
3227 return 0;
3228}
3229
3230/**
3231 * cxgb4_create_server - create an IP server
3232 * @dev: the device
3233 * @stid: the server TID
3234 * @sip: local IP address to bind server to
3235 * @sport: the server's TCP port
3236 * @queue: queue to direct messages from this server to
3237 *
3238 * Create an IP server for the given port and address.
3239 * Returns <0 on error and one of the %NET_XMIT_* values on success.
3240 */
3241int cxgb4_create_server(const struct net_device *dev, unsigned int stid,
793dad94
VP
3242 __be32 sip, __be16 sport, __be16 vlan,
3243 unsigned int queue)
b8ff05a9
DM
3244{
3245 unsigned int chan;
3246 struct sk_buff *skb;
3247 struct adapter *adap;
3248 struct cpl_pass_open_req *req;
80f40c1f 3249 int ret;
b8ff05a9
DM
3250
3251 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
3252 if (!skb)
3253 return -ENOMEM;
3254
3255 adap = netdev2adap(dev);
3256 req = (struct cpl_pass_open_req *)__skb_put(skb, sizeof(*req));
3257 INIT_TP_WR(req, 0);
3258 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ, stid));
3259 req->local_port = sport;
3260 req->peer_port = htons(0);
3261 req->local_ip = sip;
3262 req->peer_ip = htonl(0);
e46dab4d 3263 chan = rxq_to_chan(&adap->sge, queue);
b8ff05a9
DM
3264 req->opt0 = cpu_to_be64(TX_CHAN(chan));
3265 req->opt1 = cpu_to_be64(CONN_POLICY_ASK |
3266 SYN_RSS_ENABLE | SYN_RSS_QUEUE(queue));
80f40c1f
VP
3267 ret = t4_mgmt_tx(adap, skb);
3268 return net_xmit_eval(ret);
b8ff05a9
DM
3269}
3270EXPORT_SYMBOL(cxgb4_create_server);
3271
80f40c1f
VP
3272/* cxgb4_create_server6 - create an IPv6 server
3273 * @dev: the device
3274 * @stid: the server TID
3275 * @sip: local IPv6 address to bind server to
3276 * @sport: the server's TCP port
3277 * @queue: queue to direct messages from this server to
3278 *
3279 * Create an IPv6 server for the given port and address.
3280 * Returns <0 on error and one of the %NET_XMIT_* values on success.
3281 */
3282int cxgb4_create_server6(const struct net_device *dev, unsigned int stid,
3283 const struct in6_addr *sip, __be16 sport,
3284 unsigned int queue)
3285{
3286 unsigned int chan;
3287 struct sk_buff *skb;
3288 struct adapter *adap;
3289 struct cpl_pass_open_req6 *req;
3290 int ret;
3291
3292 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
3293 if (!skb)
3294 return -ENOMEM;
3295
3296 adap = netdev2adap(dev);
3297 req = (struct cpl_pass_open_req6 *)__skb_put(skb, sizeof(*req));
3298 INIT_TP_WR(req, 0);
3299 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ6, stid));
3300 req->local_port = sport;
3301 req->peer_port = htons(0);
3302 req->local_ip_hi = *(__be64 *)(sip->s6_addr);
3303 req->local_ip_lo = *(__be64 *)(sip->s6_addr + 8);
3304 req->peer_ip_hi = cpu_to_be64(0);
3305 req->peer_ip_lo = cpu_to_be64(0);
3306 chan = rxq_to_chan(&adap->sge, queue);
3307 req->opt0 = cpu_to_be64(TX_CHAN(chan));
3308 req->opt1 = cpu_to_be64(CONN_POLICY_ASK |
3309 SYN_RSS_ENABLE | SYN_RSS_QUEUE(queue));
3310 ret = t4_mgmt_tx(adap, skb);
3311 return net_xmit_eval(ret);
3312}
3313EXPORT_SYMBOL(cxgb4_create_server6);
3314
3315int cxgb4_remove_server(const struct net_device *dev, unsigned int stid,
3316 unsigned int queue, bool ipv6)
3317{
3318 struct sk_buff *skb;
3319 struct adapter *adap;
3320 struct cpl_close_listsvr_req *req;
3321 int ret;
3322
3323 adap = netdev2adap(dev);
3324
3325 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
3326 if (!skb)
3327 return -ENOMEM;
3328
3329 req = (struct cpl_close_listsvr_req *)__skb_put(skb, sizeof(*req));
3330 INIT_TP_WR(req, 0);
3331 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ, stid));
3332 req->reply_ctrl = htons(NO_REPLY(0) | (ipv6 ? LISTSVR_IPV6(1) :
3333 LISTSVR_IPV6(0)) | QUEUENO(queue));
3334 ret = t4_mgmt_tx(adap, skb);
3335 return net_xmit_eval(ret);
3336}
3337EXPORT_SYMBOL(cxgb4_remove_server);
3338
b8ff05a9
DM
3339/**
3340 * cxgb4_best_mtu - find the entry in the MTU table closest to an MTU
3341 * @mtus: the HW MTU table
3342 * @mtu: the target MTU
3343 * @idx: index of selected entry in the MTU table
3344 *
3345 * Returns the index and the value in the HW MTU table that is closest to
3346 * but does not exceed @mtu, unless @mtu is smaller than any value in the
3347 * table, in which case that smallest available value is selected.
3348 */
3349unsigned int cxgb4_best_mtu(const unsigned short *mtus, unsigned short mtu,
3350 unsigned int *idx)
3351{
3352 unsigned int i = 0;
3353
3354 while (i < NMTUS - 1 && mtus[i + 1] <= mtu)
3355 ++i;
3356 if (idx)
3357 *idx = i;
3358 return mtus[i];
3359}
3360EXPORT_SYMBOL(cxgb4_best_mtu);
3361
3362/**
3363 * cxgb4_port_chan - get the HW channel of a port
3364 * @dev: the net device for the port
3365 *
3366 * Return the HW Tx channel of the given port.
3367 */
3368unsigned int cxgb4_port_chan(const struct net_device *dev)
3369{
3370 return netdev2pinfo(dev)->tx_chan;
3371}
3372EXPORT_SYMBOL(cxgb4_port_chan);
3373
881806bc
VP
3374unsigned int cxgb4_dbfifo_count(const struct net_device *dev, int lpfifo)
3375{
3376 struct adapter *adap = netdev2adap(dev);
2cc301d2 3377 u32 v1, v2, lp_count, hp_count;
881806bc 3378
2cc301d2
SR
3379 v1 = t4_read_reg(adap, A_SGE_DBFIFO_STATUS);
3380 v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2);
3381 if (is_t4(adap->chip)) {
3382 lp_count = G_LP_COUNT(v1);
3383 hp_count = G_HP_COUNT(v1);
3384 } else {
3385 lp_count = G_LP_COUNT_T5(v1);
3386 hp_count = G_HP_COUNT_T5(v2);
3387 }
3388 return lpfifo ? lp_count : hp_count;
881806bc
VP
3389}
3390EXPORT_SYMBOL(cxgb4_dbfifo_count);
3391
b8ff05a9
DM
3392/**
3393 * cxgb4_port_viid - get the VI id of a port
3394 * @dev: the net device for the port
3395 *
3396 * Return the VI id of the given port.
3397 */
3398unsigned int cxgb4_port_viid(const struct net_device *dev)
3399{
3400 return netdev2pinfo(dev)->viid;
3401}
3402EXPORT_SYMBOL(cxgb4_port_viid);
3403
3404/**
3405 * cxgb4_port_idx - get the index of a port
3406 * @dev: the net device for the port
3407 *
3408 * Return the index of the given port.
3409 */
3410unsigned int cxgb4_port_idx(const struct net_device *dev)
3411{
3412 return netdev2pinfo(dev)->port_id;
3413}
3414EXPORT_SYMBOL(cxgb4_port_idx);
3415
b8ff05a9
DM
3416void cxgb4_get_tcp_stats(struct pci_dev *pdev, struct tp_tcp_stats *v4,
3417 struct tp_tcp_stats *v6)
3418{
3419 struct adapter *adap = pci_get_drvdata(pdev);
3420
3421 spin_lock(&adap->stats_lock);
3422 t4_tp_get_tcp_stats(adap, v4, v6);
3423 spin_unlock(&adap->stats_lock);
3424}
3425EXPORT_SYMBOL(cxgb4_get_tcp_stats);
3426
3427void cxgb4_iscsi_init(struct net_device *dev, unsigned int tag_mask,
3428 const unsigned int *pgsz_order)
3429{
3430 struct adapter *adap = netdev2adap(dev);
3431
3432 t4_write_reg(adap, ULP_RX_ISCSI_TAGMASK, tag_mask);
3433 t4_write_reg(adap, ULP_RX_ISCSI_PSZ, HPZ0(pgsz_order[0]) |
3434 HPZ1(pgsz_order[1]) | HPZ2(pgsz_order[2]) |
3435 HPZ3(pgsz_order[3]));
3436}
3437EXPORT_SYMBOL(cxgb4_iscsi_init);
3438
3069ee9b
VP
3439int cxgb4_flush_eq_cache(struct net_device *dev)
3440{
3441 struct adapter *adap = netdev2adap(dev);
3442 int ret;
3443
3444 ret = t4_fwaddrspace_write(adap, adap->mbox,
3445 0xe1000000 + A_SGE_CTXT_CMD, 0x20000000);
3446 return ret;
3447}
3448EXPORT_SYMBOL(cxgb4_flush_eq_cache);
3449
3450static int read_eq_indices(struct adapter *adap, u16 qid, u16 *pidx, u16 *cidx)
3451{
3452 u32 addr = t4_read_reg(adap, A_SGE_DBQ_CTXT_BADDR) + 24 * qid + 8;
3453 __be64 indices;
3454 int ret;
3455
3456 ret = t4_mem_win_read_len(adap, addr, (__be32 *)&indices, 8);
3457 if (!ret) {
404d9e3f
VP
3458 *cidx = (be64_to_cpu(indices) >> 25) & 0xffff;
3459 *pidx = (be64_to_cpu(indices) >> 9) & 0xffff;
3069ee9b
VP
3460 }
3461 return ret;
3462}
3463
3464int cxgb4_sync_txq_pidx(struct net_device *dev, u16 qid, u16 pidx,
3465 u16 size)
3466{
3467 struct adapter *adap = netdev2adap(dev);
3468 u16 hw_pidx, hw_cidx;
3469 int ret;
3470
3471 ret = read_eq_indices(adap, qid, &hw_pidx, &hw_cidx);
3472 if (ret)
3473 goto out;
3474
3475 if (pidx != hw_pidx) {
3476 u16 delta;
3477
3478 if (pidx >= hw_pidx)
3479 delta = pidx - hw_pidx;
3480 else
3481 delta = size - hw_pidx + pidx;
3482 wmb();
840f3000
VP
3483 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
3484 QID(qid) | PIDX(delta));
3069ee9b
VP
3485 }
3486out:
3487 return ret;
3488}
3489EXPORT_SYMBOL(cxgb4_sync_txq_pidx);
3490
3cbdb928
VP
3491void cxgb4_disable_db_coalescing(struct net_device *dev)
3492{
3493 struct adapter *adap;
3494
3495 adap = netdev2adap(dev);
3496 t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_NOCOALESCE,
3497 F_NOCOALESCE);
3498}
3499EXPORT_SYMBOL(cxgb4_disable_db_coalescing);
3500
3501void cxgb4_enable_db_coalescing(struct net_device *dev)
3502{
3503 struct adapter *adap;
3504
3505 adap = netdev2adap(dev);
3506 t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_NOCOALESCE, 0);
3507}
3508EXPORT_SYMBOL(cxgb4_enable_db_coalescing);
3509
b8ff05a9
DM
3510static struct pci_driver cxgb4_driver;
3511
3512static void check_neigh_update(struct neighbour *neigh)
3513{
3514 const struct device *parent;
3515 const struct net_device *netdev = neigh->dev;
3516
3517 if (netdev->priv_flags & IFF_802_1Q_VLAN)
3518 netdev = vlan_dev_real_dev(netdev);
3519 parent = netdev->dev.parent;
3520 if (parent && parent->driver == &cxgb4_driver.driver)
3521 t4_l2t_update(dev_get_drvdata(parent), neigh);
3522}
3523
3524static int netevent_cb(struct notifier_block *nb, unsigned long event,
3525 void *data)
3526{
3527 switch (event) {
3528 case NETEVENT_NEIGH_UPDATE:
3529 check_neigh_update(data);
3530 break;
b8ff05a9
DM
3531 case NETEVENT_REDIRECT:
3532 default:
3533 break;
3534 }
3535 return 0;
3536}
3537
3538static bool netevent_registered;
3539static struct notifier_block cxgb4_netevent_nb = {
3540 .notifier_call = netevent_cb
3541};
3542
3069ee9b
VP
3543static void drain_db_fifo(struct adapter *adap, int usecs)
3544{
2cc301d2 3545 u32 v1, v2, lp_count, hp_count;
3069ee9b
VP
3546
3547 do {
2cc301d2
SR
3548 v1 = t4_read_reg(adap, A_SGE_DBFIFO_STATUS);
3549 v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2);
3550 if (is_t4(adap->chip)) {
3551 lp_count = G_LP_COUNT(v1);
3552 hp_count = G_HP_COUNT(v1);
3553 } else {
3554 lp_count = G_LP_COUNT_T5(v1);
3555 hp_count = G_HP_COUNT_T5(v2);
3556 }
3557
3558 if (lp_count == 0 && hp_count == 0)
3559 break;
3069ee9b
VP
3560 set_current_state(TASK_UNINTERRUPTIBLE);
3561 schedule_timeout(usecs_to_jiffies(usecs));
3069ee9b
VP
3562 } while (1);
3563}
3564
3565static void disable_txq_db(struct sge_txq *q)
3566{
3567 spin_lock_irq(&q->db_lock);
3568 q->db_disabled = 1;
3569 spin_unlock_irq(&q->db_lock);
3570}
3571
3572static void enable_txq_db(struct sge_txq *q)
3573{
3574 spin_lock_irq(&q->db_lock);
3575 q->db_disabled = 0;
3576 spin_unlock_irq(&q->db_lock);
3577}
3578
3579static void disable_dbs(struct adapter *adap)
3580{
3581 int i;
3582
3583 for_each_ethrxq(&adap->sge, i)
3584 disable_txq_db(&adap->sge.ethtxq[i].q);
3585 for_each_ofldrxq(&adap->sge, i)
3586 disable_txq_db(&adap->sge.ofldtxq[i].q);
3587 for_each_port(adap, i)
3588 disable_txq_db(&adap->sge.ctrlq[i].q);
3589}
3590
3591static void enable_dbs(struct adapter *adap)
3592{
3593 int i;
3594
3595 for_each_ethrxq(&adap->sge, i)
3596 enable_txq_db(&adap->sge.ethtxq[i].q);
3597 for_each_ofldrxq(&adap->sge, i)
3598 enable_txq_db(&adap->sge.ofldtxq[i].q);
3599 for_each_port(adap, i)
3600 enable_txq_db(&adap->sge.ctrlq[i].q);
3601}
3602
3603static void sync_txq_pidx(struct adapter *adap, struct sge_txq *q)
3604{
3605 u16 hw_pidx, hw_cidx;
3606 int ret;
3607
3608 spin_lock_bh(&q->db_lock);
3609 ret = read_eq_indices(adap, (u16)q->cntxt_id, &hw_pidx, &hw_cidx);
3610 if (ret)
3611 goto out;
3612 if (q->db_pidx != hw_pidx) {
3613 u16 delta;
3614
3615 if (q->db_pidx >= hw_pidx)
3616 delta = q->db_pidx - hw_pidx;
3617 else
3618 delta = q->size - hw_pidx + q->db_pidx;
3619 wmb();
840f3000
VP
3620 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
3621 QID(q->cntxt_id) | PIDX(delta));
3069ee9b
VP
3622 }
3623out:
3624 q->db_disabled = 0;
3625 spin_unlock_bh(&q->db_lock);
3626 if (ret)
3627 CH_WARN(adap, "DB drop recovery failed.\n");
3628}
3629static void recover_all_queues(struct adapter *adap)
3630{
3631 int i;
3632
3633 for_each_ethrxq(&adap->sge, i)
3634 sync_txq_pidx(adap, &adap->sge.ethtxq[i].q);
3635 for_each_ofldrxq(&adap->sge, i)
3636 sync_txq_pidx(adap, &adap->sge.ofldtxq[i].q);
3637 for_each_port(adap, i)
3638 sync_txq_pidx(adap, &adap->sge.ctrlq[i].q);
3639}
3640
881806bc
VP
3641static void notify_rdma_uld(struct adapter *adap, enum cxgb4_control cmd)
3642{
3643 mutex_lock(&uld_mutex);
3644 if (adap->uld_handle[CXGB4_ULD_RDMA])
3645 ulds[CXGB4_ULD_RDMA].control(adap->uld_handle[CXGB4_ULD_RDMA],
3646 cmd);
3647 mutex_unlock(&uld_mutex);
3648}
3649
3650static void process_db_full(struct work_struct *work)
3651{
3652 struct adapter *adap;
881806bc
VP
3653
3654 adap = container_of(work, struct adapter, db_full_task);
3655
881806bc 3656 notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL);
3069ee9b 3657 drain_db_fifo(adap, dbfifo_drain_delay);
840f3000
VP
3658 t4_set_reg_field(adap, SGE_INT_ENABLE3,
3659 DBFIFO_HP_INT | DBFIFO_LP_INT,
3660 DBFIFO_HP_INT | DBFIFO_LP_INT);
881806bc 3661 notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY);
881806bc
VP
3662}
3663
3664static void process_db_drop(struct work_struct *work)
3665{
3666 struct adapter *adap;
881806bc 3667
3069ee9b 3668 adap = container_of(work, struct adapter, db_drop_task);
881806bc 3669
2cc301d2
SR
3670 if (is_t4(adap->chip)) {
3671 disable_dbs(adap);
3672 notify_rdma_uld(adap, CXGB4_CONTROL_DB_DROP);
3673 drain_db_fifo(adap, 1);
3674 recover_all_queues(adap);
3675 enable_dbs(adap);
3676 } else {
3677 u32 dropped_db = t4_read_reg(adap, 0x010ac);
3678 u16 qid = (dropped_db >> 15) & 0x1ffff;
3679 u16 pidx_inc = dropped_db & 0x1fff;
3680 unsigned int s_qpp;
3681 unsigned short udb_density;
3682 unsigned long qpshift;
3683 int page;
3684 u32 udb;
3685
3686 dev_warn(adap->pdev_dev,
3687 "Dropped DB 0x%x qid %d bar2 %d coalesce %d pidx %d\n",
3688 dropped_db, qid,
3689 (dropped_db >> 14) & 1,
3690 (dropped_db >> 13) & 1,
3691 pidx_inc);
3692
3693 drain_db_fifo(adap, 1);
3694
3695 s_qpp = QUEUESPERPAGEPF1 * adap->fn;
3696 udb_density = 1 << QUEUESPERPAGEPF0_GET(t4_read_reg(adap,
3697 SGE_EGRESS_QUEUES_PER_PAGE_PF) >> s_qpp);
3698 qpshift = PAGE_SHIFT - ilog2(udb_density);
3699 udb = qid << qpshift;
3700 udb &= PAGE_MASK;
3701 page = udb / PAGE_SIZE;
3702 udb += (qid - (page * udb_density)) * 128;
3703
3704 writel(PIDX(pidx_inc), adap->bar2 + udb + 8);
3705
3706 /* Re-enable BAR2 WC */
3707 t4_set_reg_field(adap, 0x10b0, 1<<15, 1<<15);
3708 }
3709
3069ee9b 3710 t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_DROPPED_DB, 0);
881806bc
VP
3711}
3712
3713void t4_db_full(struct adapter *adap)
3714{
2cc301d2
SR
3715 if (is_t4(adap->chip)) {
3716 t4_set_reg_field(adap, SGE_INT_ENABLE3,
3717 DBFIFO_HP_INT | DBFIFO_LP_INT, 0);
3718 queue_work(workq, &adap->db_full_task);
3719 }
881806bc
VP
3720}
3721
3722void t4_db_dropped(struct adapter *adap)
3723{
2cc301d2
SR
3724 if (is_t4(adap->chip))
3725 queue_work(workq, &adap->db_drop_task);
881806bc
VP
3726}
3727
b8ff05a9
DM
3728static void uld_attach(struct adapter *adap, unsigned int uld)
3729{
3730 void *handle;
3731 struct cxgb4_lld_info lli;
dca4faeb 3732 unsigned short i;
b8ff05a9
DM
3733
3734 lli.pdev = adap->pdev;
3735 lli.l2t = adap->l2t;
3736 lli.tids = &adap->tids;
3737 lli.ports = adap->port;
3738 lli.vr = &adap->vres;
3739 lli.mtus = adap->params.mtus;
3740 if (uld == CXGB4_ULD_RDMA) {
3741 lli.rxq_ids = adap->sge.rdma_rxq;
3742 lli.nrxq = adap->sge.rdmaqs;
3743 } else if (uld == CXGB4_ULD_ISCSI) {
3744 lli.rxq_ids = adap->sge.ofld_rxq;
3745 lli.nrxq = adap->sge.ofldqsets;
3746 }
3747 lli.ntxq = adap->sge.ofldqsets;
3748 lli.nchan = adap->params.nports;
3749 lli.nports = adap->params.nports;
3750 lli.wr_cred = adap->params.ofldq_wr_cred;
3751 lli.adapter_type = adap->params.rev;
3752 lli.iscsi_iolen = MAXRXDATA_GET(t4_read_reg(adap, TP_PARA_REG2));
3753 lli.udb_density = 1 << QUEUESPERPAGEPF0_GET(
060e0c75
DM
3754 t4_read_reg(adap, SGE_EGRESS_QUEUES_PER_PAGE_PF) >>
3755 (adap->fn * 4));
b8ff05a9 3756 lli.ucq_density = 1 << QUEUESPERPAGEPF0_GET(
060e0c75
DM
3757 t4_read_reg(adap, SGE_INGRESS_QUEUES_PER_PAGE_PF) >>
3758 (adap->fn * 4));
793dad94 3759 lli.filt_mode = adap->filter_mode;
dca4faeb
VP
3760 /* MODQ_REQ_MAP sets queues 0-3 to chan 0-3 */
3761 for (i = 0; i < NCHAN; i++)
3762 lli.tx_modq[i] = i;
b8ff05a9
DM
3763 lli.gts_reg = adap->regs + MYPF_REG(SGE_PF_GTS);
3764 lli.db_reg = adap->regs + MYPF_REG(SGE_PF_KDOORBELL);
3765 lli.fw_vers = adap->params.fw_vers;
3069ee9b 3766 lli.dbfifo_int_thresh = dbfifo_int_thresh;
dca4faeb
VP
3767 lli.sge_pktshift = adap->sge.pktshift;
3768 lli.enable_fw_ofld_conn = adap->flags & FW_OFLD_CONN;
b8ff05a9
DM
3769
3770 handle = ulds[uld].add(&lli);
3771 if (IS_ERR(handle)) {
3772 dev_warn(adap->pdev_dev,
3773 "could not attach to the %s driver, error %ld\n",
3774 uld_str[uld], PTR_ERR(handle));
3775 return;
3776 }
3777
3778 adap->uld_handle[uld] = handle;
3779
3780 if (!netevent_registered) {
3781 register_netevent_notifier(&cxgb4_netevent_nb);
3782 netevent_registered = true;
3783 }
e29f5dbc
DM
3784
3785 if (adap->flags & FULL_INIT_DONE)
3786 ulds[uld].state_change(handle, CXGB4_STATE_UP);
b8ff05a9
DM
3787}
3788
3789static void attach_ulds(struct adapter *adap)
3790{
3791 unsigned int i;
3792
3793 mutex_lock(&uld_mutex);
3794 list_add_tail(&adap->list_node, &adapter_list);
3795 for (i = 0; i < CXGB4_ULD_MAX; i++)
3796 if (ulds[i].add)
3797 uld_attach(adap, i);
3798 mutex_unlock(&uld_mutex);
3799}
3800
3801static void detach_ulds(struct adapter *adap)
3802{
3803 unsigned int i;
3804
3805 mutex_lock(&uld_mutex);
3806 list_del(&adap->list_node);
3807 for (i = 0; i < CXGB4_ULD_MAX; i++)
3808 if (adap->uld_handle[i]) {
3809 ulds[i].state_change(adap->uld_handle[i],
3810 CXGB4_STATE_DETACH);
3811 adap->uld_handle[i] = NULL;
3812 }
3813 if (netevent_registered && list_empty(&adapter_list)) {
3814 unregister_netevent_notifier(&cxgb4_netevent_nb);
3815 netevent_registered = false;
3816 }
3817 mutex_unlock(&uld_mutex);
3818}
3819
3820static void notify_ulds(struct adapter *adap, enum cxgb4_state new_state)
3821{
3822 unsigned int i;
3823
3824 mutex_lock(&uld_mutex);
3825 for (i = 0; i < CXGB4_ULD_MAX; i++)
3826 if (adap->uld_handle[i])
3827 ulds[i].state_change(adap->uld_handle[i], new_state);
3828 mutex_unlock(&uld_mutex);
3829}
3830
3831/**
3832 * cxgb4_register_uld - register an upper-layer driver
3833 * @type: the ULD type
3834 * @p: the ULD methods
3835 *
3836 * Registers an upper-layer driver with this driver and notifies the ULD
3837 * about any presently available devices that support its type. Returns
3838 * %-EBUSY if a ULD of the same type is already registered.
3839 */
3840int cxgb4_register_uld(enum cxgb4_uld type, const struct cxgb4_uld_info *p)
3841{
3842 int ret = 0;
3843 struct adapter *adap;
3844
3845 if (type >= CXGB4_ULD_MAX)
3846 return -EINVAL;
3847 mutex_lock(&uld_mutex);
3848 if (ulds[type].add) {
3849 ret = -EBUSY;
3850 goto out;
3851 }
3852 ulds[type] = *p;
3853 list_for_each_entry(adap, &adapter_list, list_node)
3854 uld_attach(adap, type);
3855out: mutex_unlock(&uld_mutex);
3856 return ret;
3857}
3858EXPORT_SYMBOL(cxgb4_register_uld);
3859
3860/**
3861 * cxgb4_unregister_uld - unregister an upper-layer driver
3862 * @type: the ULD type
3863 *
3864 * Unregisters an existing upper-layer driver.
3865 */
3866int cxgb4_unregister_uld(enum cxgb4_uld type)
3867{
3868 struct adapter *adap;
3869
3870 if (type >= CXGB4_ULD_MAX)
3871 return -EINVAL;
3872 mutex_lock(&uld_mutex);
3873 list_for_each_entry(adap, &adapter_list, list_node)
3874 adap->uld_handle[type] = NULL;
3875 ulds[type].add = NULL;
3876 mutex_unlock(&uld_mutex);
3877 return 0;
3878}
3879EXPORT_SYMBOL(cxgb4_unregister_uld);
3880
3881/**
3882 * cxgb_up - enable the adapter
3883 * @adap: adapter being enabled
3884 *
3885 * Called when the first port is enabled, this function performs the
3886 * actions necessary to make an adapter operational, such as completing
3887 * the initialization of HW modules, and enabling interrupts.
3888 *
3889 * Must be called with the rtnl lock held.
3890 */
3891static int cxgb_up(struct adapter *adap)
3892{
aaefae9b 3893 int err;
b8ff05a9 3894
aaefae9b
DM
3895 err = setup_sge_queues(adap);
3896 if (err)
3897 goto out;
3898 err = setup_rss(adap);
3899 if (err)
3900 goto freeq;
b8ff05a9
DM
3901
3902 if (adap->flags & USING_MSIX) {
aaefae9b 3903 name_msix_vecs(adap);
b8ff05a9
DM
3904 err = request_irq(adap->msix_info[0].vec, t4_nondata_intr, 0,
3905 adap->msix_info[0].desc, adap);
3906 if (err)
3907 goto irq_err;
3908
3909 err = request_msix_queue_irqs(adap);
3910 if (err) {
3911 free_irq(adap->msix_info[0].vec, adap);
3912 goto irq_err;
3913 }
3914 } else {
3915 err = request_irq(adap->pdev->irq, t4_intr_handler(adap),
3916 (adap->flags & USING_MSI) ? 0 : IRQF_SHARED,
b1a3c2b6 3917 adap->port[0]->name, adap);
b8ff05a9
DM
3918 if (err)
3919 goto irq_err;
3920 }
3921 enable_rx(adap);
3922 t4_sge_start(adap);
3923 t4_intr_enable(adap);
aaefae9b 3924 adap->flags |= FULL_INIT_DONE;
b8ff05a9
DM
3925 notify_ulds(adap, CXGB4_STATE_UP);
3926 out:
3927 return err;
3928 irq_err:
3929 dev_err(adap->pdev_dev, "request_irq failed, err %d\n", err);
aaefae9b
DM
3930 freeq:
3931 t4_free_sge_resources(adap);
b8ff05a9
DM
3932 goto out;
3933}
3934
3935static void cxgb_down(struct adapter *adapter)
3936{
3937 t4_intr_disable(adapter);
3938 cancel_work_sync(&adapter->tid_release_task);
881806bc
VP
3939 cancel_work_sync(&adapter->db_full_task);
3940 cancel_work_sync(&adapter->db_drop_task);
b8ff05a9 3941 adapter->tid_release_task_busy = false;
204dc3c0 3942 adapter->tid_release_head = NULL;
b8ff05a9
DM
3943
3944 if (adapter->flags & USING_MSIX) {
3945 free_msix_queue_irqs(adapter);
3946 free_irq(adapter->msix_info[0].vec, adapter);
3947 } else
3948 free_irq(adapter->pdev->irq, adapter);
3949 quiesce_rx(adapter);
aaefae9b
DM
3950 t4_sge_stop(adapter);
3951 t4_free_sge_resources(adapter);
3952 adapter->flags &= ~FULL_INIT_DONE;
b8ff05a9
DM
3953}
3954
3955/*
3956 * net_device operations
3957 */
3958static int cxgb_open(struct net_device *dev)
3959{
3960 int err;
3961 struct port_info *pi = netdev_priv(dev);
3962 struct adapter *adapter = pi->adapter;
3963
6a3c869a
DM
3964 netif_carrier_off(dev);
3965
aaefae9b
DM
3966 if (!(adapter->flags & FULL_INIT_DONE)) {
3967 err = cxgb_up(adapter);
3968 if (err < 0)
3969 return err;
3970 }
b8ff05a9 3971
f68707b8
DM
3972 err = link_start(dev);
3973 if (!err)
3974 netif_tx_start_all_queues(dev);
3975 return err;
b8ff05a9
DM
3976}
3977
3978static int cxgb_close(struct net_device *dev)
3979{
b8ff05a9
DM
3980 struct port_info *pi = netdev_priv(dev);
3981 struct adapter *adapter = pi->adapter;
3982
3983 netif_tx_stop_all_queues(dev);
3984 netif_carrier_off(dev);
060e0c75 3985 return t4_enable_vi(adapter, adapter->fn, pi->viid, false, false);
b8ff05a9
DM
3986}
3987
f2b7e78d
VP
3988/* Return an error number if the indicated filter isn't writable ...
3989 */
3990static int writable_filter(struct filter_entry *f)
3991{
3992 if (f->locked)
3993 return -EPERM;
3994 if (f->pending)
3995 return -EBUSY;
3996
3997 return 0;
3998}
3999
4000/* Delete the filter at the specified index (if valid). The checks for all
4001 * the common problems with doing this like the filter being locked, currently
4002 * pending in another operation, etc.
4003 */
4004static int delete_filter(struct adapter *adapter, unsigned int fidx)
4005{
4006 struct filter_entry *f;
4007 int ret;
4008
dca4faeb 4009 if (fidx >= adapter->tids.nftids + adapter->tids.nsftids)
f2b7e78d
VP
4010 return -EINVAL;
4011
4012 f = &adapter->tids.ftid_tab[fidx];
4013 ret = writable_filter(f);
4014 if (ret)
4015 return ret;
4016 if (f->valid)
4017 return del_filter_wr(adapter, fidx);
4018
4019 return 0;
4020}
4021
dca4faeb 4022int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid,
793dad94
VP
4023 __be32 sip, __be16 sport, __be16 vlan,
4024 unsigned int queue, unsigned char port, unsigned char mask)
dca4faeb
VP
4025{
4026 int ret;
4027 struct filter_entry *f;
4028 struct adapter *adap;
4029 int i;
4030 u8 *val;
4031
4032 adap = netdev2adap(dev);
4033
1cab775c
VP
4034 /* Adjust stid to correct filter index */
4035 stid -= adap->tids.nstids;
4036 stid += adap->tids.nftids;
4037
dca4faeb
VP
4038 /* Check to make sure the filter requested is writable ...
4039 */
4040 f = &adap->tids.ftid_tab[stid];
4041 ret = writable_filter(f);
4042 if (ret)
4043 return ret;
4044
4045 /* Clear out any old resources being used by the filter before
4046 * we start constructing the new filter.
4047 */
4048 if (f->valid)
4049 clear_filter(adap, f);
4050
4051 /* Clear out filter specifications */
4052 memset(&f->fs, 0, sizeof(struct ch_filter_specification));
4053 f->fs.val.lport = cpu_to_be16(sport);
4054 f->fs.mask.lport = ~0;
4055 val = (u8 *)&sip;
793dad94 4056 if ((val[0] | val[1] | val[2] | val[3]) != 0) {
dca4faeb
VP
4057 for (i = 0; i < 4; i++) {
4058 f->fs.val.lip[i] = val[i];
4059 f->fs.mask.lip[i] = ~0;
4060 }
793dad94
VP
4061 if (adap->filter_mode & F_PORT) {
4062 f->fs.val.iport = port;
4063 f->fs.mask.iport = mask;
4064 }
4065 }
dca4faeb
VP
4066
4067 f->fs.dirsteer = 1;
4068 f->fs.iq = queue;
4069 /* Mark filter as locked */
4070 f->locked = 1;
4071 f->fs.rpttid = 1;
4072
4073 ret = set_filter_wr(adap, stid);
4074 if (ret) {
4075 clear_filter(adap, f);
4076 return ret;
4077 }
4078
4079 return 0;
4080}
4081EXPORT_SYMBOL(cxgb4_create_server_filter);
4082
4083int cxgb4_remove_server_filter(const struct net_device *dev, unsigned int stid,
4084 unsigned int queue, bool ipv6)
4085{
4086 int ret;
4087 struct filter_entry *f;
4088 struct adapter *adap;
4089
4090 adap = netdev2adap(dev);
1cab775c
VP
4091
4092 /* Adjust stid to correct filter index */
4093 stid -= adap->tids.nstids;
4094 stid += adap->tids.nftids;
4095
dca4faeb
VP
4096 f = &adap->tids.ftid_tab[stid];
4097 /* Unlock the filter */
4098 f->locked = 0;
4099
4100 ret = delete_filter(adap, stid);
4101 if (ret)
4102 return ret;
4103
4104 return 0;
4105}
4106EXPORT_SYMBOL(cxgb4_remove_server_filter);
4107
f5152c90
DM
4108static struct rtnl_link_stats64 *cxgb_get_stats(struct net_device *dev,
4109 struct rtnl_link_stats64 *ns)
b8ff05a9
DM
4110{
4111 struct port_stats stats;
4112 struct port_info *p = netdev_priv(dev);
4113 struct adapter *adapter = p->adapter;
b8ff05a9
DM
4114
4115 spin_lock(&adapter->stats_lock);
4116 t4_get_port_stats(adapter, p->tx_chan, &stats);
4117 spin_unlock(&adapter->stats_lock);
4118
4119 ns->tx_bytes = stats.tx_octets;
4120 ns->tx_packets = stats.tx_frames;
4121 ns->rx_bytes = stats.rx_octets;
4122 ns->rx_packets = stats.rx_frames;
4123 ns->multicast = stats.rx_mcast_frames;
4124
4125 /* detailed rx_errors */
4126 ns->rx_length_errors = stats.rx_jabber + stats.rx_too_long +
4127 stats.rx_runt;
4128 ns->rx_over_errors = 0;
4129 ns->rx_crc_errors = stats.rx_fcs_err;
4130 ns->rx_frame_errors = stats.rx_symbol_err;
4131 ns->rx_fifo_errors = stats.rx_ovflow0 + stats.rx_ovflow1 +
4132 stats.rx_ovflow2 + stats.rx_ovflow3 +
4133 stats.rx_trunc0 + stats.rx_trunc1 +
4134 stats.rx_trunc2 + stats.rx_trunc3;
4135 ns->rx_missed_errors = 0;
4136
4137 /* detailed tx_errors */
4138 ns->tx_aborted_errors = 0;
4139 ns->tx_carrier_errors = 0;
4140 ns->tx_fifo_errors = 0;
4141 ns->tx_heartbeat_errors = 0;
4142 ns->tx_window_errors = 0;
4143
4144 ns->tx_errors = stats.tx_error_frames;
4145 ns->rx_errors = stats.rx_symbol_err + stats.rx_fcs_err +
4146 ns->rx_length_errors + stats.rx_len_err + ns->rx_fifo_errors;
4147 return ns;
4148}
4149
4150static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
4151{
060e0c75 4152 unsigned int mbox;
b8ff05a9
DM
4153 int ret = 0, prtad, devad;
4154 struct port_info *pi = netdev_priv(dev);
4155 struct mii_ioctl_data *data = (struct mii_ioctl_data *)&req->ifr_data;
4156
4157 switch (cmd) {
4158 case SIOCGMIIPHY:
4159 if (pi->mdio_addr < 0)
4160 return -EOPNOTSUPP;
4161 data->phy_id = pi->mdio_addr;
4162 break;
4163 case SIOCGMIIREG:
4164 case SIOCSMIIREG:
4165 if (mdio_phy_id_is_c45(data->phy_id)) {
4166 prtad = mdio_phy_id_prtad(data->phy_id);
4167 devad = mdio_phy_id_devad(data->phy_id);
4168 } else if (data->phy_id < 32) {
4169 prtad = data->phy_id;
4170 devad = 0;
4171 data->reg_num &= 0x1f;
4172 } else
4173 return -EINVAL;
4174
060e0c75 4175 mbox = pi->adapter->fn;
b8ff05a9 4176 if (cmd == SIOCGMIIREG)
060e0c75 4177 ret = t4_mdio_rd(pi->adapter, mbox, prtad, devad,
b8ff05a9
DM
4178 data->reg_num, &data->val_out);
4179 else
060e0c75 4180 ret = t4_mdio_wr(pi->adapter, mbox, prtad, devad,
b8ff05a9
DM
4181 data->reg_num, data->val_in);
4182 break;
4183 default:
4184 return -EOPNOTSUPP;
4185 }
4186 return ret;
4187}
4188
4189static void cxgb_set_rxmode(struct net_device *dev)
4190{
4191 /* unfortunately we can't return errors to the stack */
4192 set_rxmode(dev, -1, false);
4193}
4194
4195static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
4196{
4197 int ret;
4198 struct port_info *pi = netdev_priv(dev);
4199
4200 if (new_mtu < 81 || new_mtu > MAX_MTU) /* accommodate SACK */
4201 return -EINVAL;
060e0c75
DM
4202 ret = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, new_mtu, -1,
4203 -1, -1, -1, true);
b8ff05a9
DM
4204 if (!ret)
4205 dev->mtu = new_mtu;
4206 return ret;
4207}
4208
4209static int cxgb_set_mac_addr(struct net_device *dev, void *p)
4210{
4211 int ret;
4212 struct sockaddr *addr = p;
4213 struct port_info *pi = netdev_priv(dev);
4214
4215 if (!is_valid_ether_addr(addr->sa_data))
504f9b5a 4216 return -EADDRNOTAVAIL;
b8ff05a9 4217
060e0c75
DM
4218 ret = t4_change_mac(pi->adapter, pi->adapter->fn, pi->viid,
4219 pi->xact_addr_filt, addr->sa_data, true, true);
b8ff05a9
DM
4220 if (ret < 0)
4221 return ret;
4222
4223 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
4224 pi->xact_addr_filt = ret;
4225 return 0;
4226}
4227
b8ff05a9
DM
4228#ifdef CONFIG_NET_POLL_CONTROLLER
4229static void cxgb_netpoll(struct net_device *dev)
4230{
4231 struct port_info *pi = netdev_priv(dev);
4232 struct adapter *adap = pi->adapter;
4233
4234 if (adap->flags & USING_MSIX) {
4235 int i;
4236 struct sge_eth_rxq *rx = &adap->sge.ethrxq[pi->first_qset];
4237
4238 for (i = pi->nqsets; i; i--, rx++)
4239 t4_sge_intr_msix(0, &rx->rspq);
4240 } else
4241 t4_intr_handler(adap)(0, adap);
4242}
4243#endif
4244
4245static const struct net_device_ops cxgb4_netdev_ops = {
4246 .ndo_open = cxgb_open,
4247 .ndo_stop = cxgb_close,
4248 .ndo_start_xmit = t4_eth_xmit,
9be793bf 4249 .ndo_get_stats64 = cxgb_get_stats,
b8ff05a9
DM
4250 .ndo_set_rx_mode = cxgb_set_rxmode,
4251 .ndo_set_mac_address = cxgb_set_mac_addr,
2ed28baa 4252 .ndo_set_features = cxgb_set_features,
b8ff05a9
DM
4253 .ndo_validate_addr = eth_validate_addr,
4254 .ndo_do_ioctl = cxgb_ioctl,
4255 .ndo_change_mtu = cxgb_change_mtu,
b8ff05a9
DM
4256#ifdef CONFIG_NET_POLL_CONTROLLER
4257 .ndo_poll_controller = cxgb_netpoll,
4258#endif
4259};
4260
4261void t4_fatal_err(struct adapter *adap)
4262{
4263 t4_set_reg_field(adap, SGE_CONTROL, GLOBALENABLE, 0);
4264 t4_intr_disable(adap);
4265 dev_alert(adap->pdev_dev, "encountered fatal error, adapter stopped\n");
4266}
4267
4268static void setup_memwin(struct adapter *adap)
4269{
19dd37ba 4270 u32 bar0, mem_win0_base, mem_win1_base, mem_win2_base;
b8ff05a9
DM
4271
4272 bar0 = pci_resource_start(adap->pdev, 0); /* truncation intentional */
19dd37ba
SR
4273 if (is_t4(adap->chip)) {
4274 mem_win0_base = bar0 + MEMWIN0_BASE;
4275 mem_win1_base = bar0 + MEMWIN1_BASE;
4276 mem_win2_base = bar0 + MEMWIN2_BASE;
4277 } else {
4278 /* For T5, only relative offset inside the PCIe BAR is passed */
4279 mem_win0_base = MEMWIN0_BASE;
4280 mem_win1_base = MEMWIN1_BASE_T5;
4281 mem_win2_base = MEMWIN2_BASE_T5;
4282 }
b8ff05a9 4283 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 0),
19dd37ba 4284 mem_win0_base | BIR(0) |
b8ff05a9
DM
4285 WINDOW(ilog2(MEMWIN0_APERTURE) - 10));
4286 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 1),
19dd37ba 4287 mem_win1_base | BIR(0) |
b8ff05a9
DM
4288 WINDOW(ilog2(MEMWIN1_APERTURE) - 10));
4289 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 2),
19dd37ba 4290 mem_win2_base | BIR(0) |
b8ff05a9 4291 WINDOW(ilog2(MEMWIN2_APERTURE) - 10));
636f9d37
VP
4292}
4293
4294static void setup_memwin_rdma(struct adapter *adap)
4295{
1ae970e0
DM
4296 if (adap->vres.ocq.size) {
4297 unsigned int start, sz_kb;
4298
4299 start = pci_resource_start(adap->pdev, 2) +
4300 OCQ_WIN_OFFSET(adap->pdev, &adap->vres);
4301 sz_kb = roundup_pow_of_two(adap->vres.ocq.size) >> 10;
4302 t4_write_reg(adap,
4303 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 3),
4304 start | BIR(1) | WINDOW(ilog2(sz_kb)));
4305 t4_write_reg(adap,
4306 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, 3),
4307 adap->vres.ocq.start);
4308 t4_read_reg(adap,
4309 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, 3));
4310 }
b8ff05a9
DM
4311}
4312
02b5fb8e
DM
4313static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c)
4314{
4315 u32 v;
4316 int ret;
4317
4318 /* get device capabilities */
4319 memset(c, 0, sizeof(*c));
4320 c->op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
4321 FW_CMD_REQUEST | FW_CMD_READ);
ce91a923 4322 c->cfvalid_to_len16 = htonl(FW_LEN16(*c));
060e0c75 4323 ret = t4_wr_mbox(adap, adap->fn, c, sizeof(*c), c);
02b5fb8e
DM
4324 if (ret < 0)
4325 return ret;
4326
4327 /* select capabilities we'll be using */
4328 if (c->niccaps & htons(FW_CAPS_CONFIG_NIC_VM)) {
4329 if (!vf_acls)
4330 c->niccaps ^= htons(FW_CAPS_CONFIG_NIC_VM);
4331 else
4332 c->niccaps = htons(FW_CAPS_CONFIG_NIC_VM);
4333 } else if (vf_acls) {
4334 dev_err(adap->pdev_dev, "virtualization ACLs not supported");
4335 return ret;
4336 }
4337 c->op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
4338 FW_CMD_REQUEST | FW_CMD_WRITE);
060e0c75 4339 ret = t4_wr_mbox(adap, adap->fn, c, sizeof(*c), NULL);
02b5fb8e
DM
4340 if (ret < 0)
4341 return ret;
4342
060e0c75 4343 ret = t4_config_glbl_rss(adap, adap->fn,
02b5fb8e
DM
4344 FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL,
4345 FW_RSS_GLB_CONFIG_CMD_TNLMAPEN |
4346 FW_RSS_GLB_CONFIG_CMD_TNLALLLKP);
4347 if (ret < 0)
4348 return ret;
4349
060e0c75
DM
4350 ret = t4_cfg_pfvf(adap, adap->fn, adap->fn, 0, MAX_EGRQ, 64, MAX_INGQ,
4351 0, 0, 4, 0xf, 0xf, 16, FW_CMD_CAP_PF, FW_CMD_CAP_PF);
02b5fb8e
DM
4352 if (ret < 0)
4353 return ret;
4354
4355 t4_sge_init(adap);
4356
02b5fb8e
DM
4357 /* tweak some settings */
4358 t4_write_reg(adap, TP_SHIFT_CNT, 0x64f8849);
4359 t4_write_reg(adap, ULP_RX_TDDP_PSZ, HPZ0(PAGE_SHIFT - 12));
4360 t4_write_reg(adap, TP_PIO_ADDR, TP_INGRESS_CONFIG);
4361 v = t4_read_reg(adap, TP_PIO_DATA);
4362 t4_write_reg(adap, TP_PIO_DATA, v & ~CSUM_HAS_PSEUDO_HDR);
060e0c75 4363
dca4faeb
VP
4364 /* first 4 Tx modulation queues point to consecutive Tx channels */
4365 adap->params.tp.tx_modq_map = 0xE4;
4366 t4_write_reg(adap, A_TP_TX_MOD_QUEUE_REQ_MAP,
4367 V_TX_MOD_QUEUE_REQ_MAP(adap->params.tp.tx_modq_map));
4368
4369 /* associate each Tx modulation queue with consecutive Tx channels */
4370 v = 0x84218421;
4371 t4_write_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
4372 &v, 1, A_TP_TX_SCHED_HDR);
4373 t4_write_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
4374 &v, 1, A_TP_TX_SCHED_FIFO);
4375 t4_write_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
4376 &v, 1, A_TP_TX_SCHED_PCMD);
4377
4378#define T4_TX_MODQ_10G_WEIGHT_DEFAULT 16 /* in KB units */
4379 if (is_offload(adap)) {
4380 t4_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT0,
4381 V_TX_MODQ_WEIGHT0(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4382 V_TX_MODQ_WEIGHT1(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4383 V_TX_MODQ_WEIGHT2(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4384 V_TX_MODQ_WEIGHT3(T4_TX_MODQ_10G_WEIGHT_DEFAULT));
4385 t4_write_reg(adap, A_TP_TX_MOD_CHANNEL_WEIGHT,
4386 V_TX_MODQ_WEIGHT0(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4387 V_TX_MODQ_WEIGHT1(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4388 V_TX_MODQ_WEIGHT2(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4389 V_TX_MODQ_WEIGHT3(T4_TX_MODQ_10G_WEIGHT_DEFAULT));
4390 }
4391
060e0c75
DM
4392 /* get basic stuff going */
4393 return t4_early_init(adap, adap->fn);
02b5fb8e
DM
4394}
4395
b8ff05a9
DM
4396/*
4397 * Max # of ATIDs. The absolute HW max is 16K but we keep it lower.
4398 */
4399#define MAX_ATIDS 8192U
4400
636f9d37
VP
4401/*
4402 * Phase 0 of initialization: contact FW, obtain config, perform basic init.
4403 *
4404 * If the firmware we're dealing with has Configuration File support, then
4405 * we use that to perform all configuration
4406 */
4407
4408/*
4409 * Tweak configuration based on module parameters, etc. Most of these have
4410 * defaults assigned to them by Firmware Configuration Files (if we're using
4411 * them) but need to be explicitly set if we're using hard-coded
4412 * initialization. But even in the case of using Firmware Configuration
4413 * Files, we'd like to expose the ability to change these via module
4414 * parameters so these are essentially common tweaks/settings for
4415 * Configuration Files and hard-coded initialization ...
4416 */
4417static int adap_init0_tweaks(struct adapter *adapter)
4418{
4419 /*
4420 * Fix up various Host-Dependent Parameters like Page Size, Cache
4421 * Line Size, etc. The firmware default is for a 4KB Page Size and
4422 * 64B Cache Line Size ...
4423 */
4424 t4_fixup_host_params(adapter, PAGE_SIZE, L1_CACHE_BYTES);
4425
4426 /*
4427 * Process module parameters which affect early initialization.
4428 */
4429 if (rx_dma_offset != 2 && rx_dma_offset != 0) {
4430 dev_err(&adapter->pdev->dev,
4431 "Ignoring illegal rx_dma_offset=%d, using 2\n",
4432 rx_dma_offset);
4433 rx_dma_offset = 2;
4434 }
4435 t4_set_reg_field(adapter, SGE_CONTROL,
4436 PKTSHIFT_MASK,
4437 PKTSHIFT(rx_dma_offset));
4438
4439 /*
4440 * Don't include the "IP Pseudo Header" in CPL_RX_PKT checksums: Linux
4441 * adds the pseudo header itself.
4442 */
4443 t4_tp_wr_bits_indirect(adapter, TP_INGRESS_CONFIG,
4444 CSUM_HAS_PSEUDO_HDR, 0);
4445
4446 return 0;
4447}
4448
4449/*
4450 * Attempt to initialize the adapter via a Firmware Configuration File.
4451 */
4452static int adap_init0_config(struct adapter *adapter, int reset)
4453{
4454 struct fw_caps_config_cmd caps_cmd;
4455 const struct firmware *cf;
4456 unsigned long mtype = 0, maddr = 0;
4457 u32 finiver, finicsum, cfcsum;
4458 int ret, using_flash;
0a57a536 4459 char *fw_config_file, fw_config_file_path[256];
636f9d37
VP
4460
4461 /*
4462 * Reset device if necessary.
4463 */
4464 if (reset) {
4465 ret = t4_fw_reset(adapter, adapter->mbox,
4466 PIORSTMODE | PIORST);
4467 if (ret < 0)
4468 goto bye;
4469 }
4470
4471 /*
4472 * If we have a T4 configuration file under /lib/firmware/cxgb4/,
4473 * then use that. Otherwise, use the configuration file stored
4474 * in the adapter flash ...
4475 */
0a57a536
SR
4476 switch (CHELSIO_CHIP_VERSION(adapter->chip)) {
4477 case CHELSIO_T4:
4478 fw_config_file = FW_CFNAME;
4479 break;
4480 case CHELSIO_T5:
4481 fw_config_file = FW5_CFNAME;
4482 break;
4483 default:
4484 dev_err(adapter->pdev_dev, "Device %d is not supported\n",
4485 adapter->pdev->device);
4486 ret = -EINVAL;
4487 goto bye;
4488 }
4489
4490 ret = request_firmware(&cf, fw_config_file, adapter->pdev_dev);
636f9d37
VP
4491 if (ret < 0) {
4492 using_flash = 1;
4493 mtype = FW_MEMTYPE_CF_FLASH;
4494 maddr = t4_flash_cfg_addr(adapter);
4495 } else {
4496 u32 params[7], val[7];
4497
4498 using_flash = 0;
4499 if (cf->size >= FLASH_CFG_MAX_SIZE)
4500 ret = -ENOMEM;
4501 else {
4502 params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
4503 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CF));
4504 ret = t4_query_params(adapter, adapter->mbox,
4505 adapter->fn, 0, 1, params, val);
4506 if (ret == 0) {
4507 /*
4508 * For t4_memory_write() below addresses and
4509 * sizes have to be in terms of multiples of 4
4510 * bytes. So, if the Configuration File isn't
4511 * a multiple of 4 bytes in length we'll have
4512 * to write that out separately since we can't
4513 * guarantee that the bytes following the
4514 * residual byte in the buffer returned by
4515 * request_firmware() are zeroed out ...
4516 */
4517 size_t resid = cf->size & 0x3;
4518 size_t size = cf->size & ~0x3;
4519 __be32 *data = (__be32 *)cf->data;
4520
4521 mtype = FW_PARAMS_PARAM_Y_GET(val[0]);
4522 maddr = FW_PARAMS_PARAM_Z_GET(val[0]) << 16;
4523
4524 ret = t4_memory_write(adapter, mtype, maddr,
4525 size, data);
4526 if (ret == 0 && resid != 0) {
4527 union {
4528 __be32 word;
4529 char buf[4];
4530 } last;
4531 int i;
4532
4533 last.word = data[size >> 2];
4534 for (i = resid; i < 4; i++)
4535 last.buf[i] = 0;
4536 ret = t4_memory_write(adapter, mtype,
4537 maddr + size,
4538 4, &last.word);
4539 }
4540 }
4541 }
4542
4543 release_firmware(cf);
4544 if (ret)
4545 goto bye;
4546 }
4547
4548 /*
4549 * Issue a Capability Configuration command to the firmware to get it
4550 * to parse the Configuration File. We don't use t4_fw_config_file()
4551 * because we want the ability to modify various features after we've
4552 * processed the configuration file ...
4553 */
4554 memset(&caps_cmd, 0, sizeof(caps_cmd));
4555 caps_cmd.op_to_write =
4556 htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
4557 FW_CMD_REQUEST |
4558 FW_CMD_READ);
ce91a923 4559 caps_cmd.cfvalid_to_len16 =
636f9d37
VP
4560 htonl(FW_CAPS_CONFIG_CMD_CFVALID |
4561 FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) |
4562 FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(maddr >> 16) |
4563 FW_LEN16(caps_cmd));
4564 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
4565 &caps_cmd);
4566 if (ret < 0)
4567 goto bye;
4568
4569 finiver = ntohl(caps_cmd.finiver);
4570 finicsum = ntohl(caps_cmd.finicsum);
4571 cfcsum = ntohl(caps_cmd.cfcsum);
4572 if (finicsum != cfcsum)
4573 dev_warn(adapter->pdev_dev, "Configuration File checksum "\
4574 "mismatch: [fini] csum=%#x, computed csum=%#x\n",
4575 finicsum, cfcsum);
4576
636f9d37
VP
4577 /*
4578 * And now tell the firmware to use the configuration we just loaded.
4579 */
4580 caps_cmd.op_to_write =
4581 htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
4582 FW_CMD_REQUEST |
4583 FW_CMD_WRITE);
ce91a923 4584 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
636f9d37
VP
4585 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
4586 NULL);
4587 if (ret < 0)
4588 goto bye;
4589
4590 /*
4591 * Tweak configuration based on system architecture, module
4592 * parameters, etc.
4593 */
4594 ret = adap_init0_tweaks(adapter);
4595 if (ret < 0)
4596 goto bye;
4597
4598 /*
4599 * And finally tell the firmware to initialize itself using the
4600 * parameters from the Configuration File.
4601 */
4602 ret = t4_fw_initialize(adapter, adapter->mbox);
4603 if (ret < 0)
4604 goto bye;
4605
0a57a536 4606 sprintf(fw_config_file_path, "/lib/firmware/%s", fw_config_file);
636f9d37
VP
4607 /*
4608 * Return successfully and note that we're operating with parameters
4609 * not supplied by the driver, rather than from hard-wired
4610 * initialization constants burried in the driver.
4611 */
4612 adapter->flags |= USING_SOFT_PARAMS;
4613 dev_info(adapter->pdev_dev, "Successfully configured using Firmware "\
4614 "Configuration File %s, version %#x, computed checksum %#x\n",
4615 (using_flash
4616 ? "in device FLASH"
0a57a536 4617 : fw_config_file_path),
636f9d37
VP
4618 finiver, cfcsum);
4619 return 0;
4620
4621 /*
4622 * Something bad happened. Return the error ... (If the "error"
4623 * is that there's no Configuration File on the adapter we don't
4624 * want to issue a warning since this is fairly common.)
4625 */
4626bye:
4627 if (ret != -ENOENT)
4628 dev_warn(adapter->pdev_dev, "Configuration file error %d\n",
4629 -ret);
4630 return ret;
4631}
4632
13ee15d3
VP
4633/*
4634 * Attempt to initialize the adapter via hard-coded, driver supplied
4635 * parameters ...
4636 */
4637static int adap_init0_no_config(struct adapter *adapter, int reset)
4638{
4639 struct sge *s = &adapter->sge;
4640 struct fw_caps_config_cmd caps_cmd;
4641 u32 v;
4642 int i, ret;
4643
4644 /*
4645 * Reset device if necessary
4646 */
4647 if (reset) {
4648 ret = t4_fw_reset(adapter, adapter->mbox,
4649 PIORSTMODE | PIORST);
4650 if (ret < 0)
4651 goto bye;
4652 }
4653
4654 /*
4655 * Get device capabilities and select which we'll be using.
4656 */
4657 memset(&caps_cmd, 0, sizeof(caps_cmd));
4658 caps_cmd.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
4659 FW_CMD_REQUEST | FW_CMD_READ);
ce91a923 4660 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
13ee15d3
VP
4661 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
4662 &caps_cmd);
4663 if (ret < 0)
4664 goto bye;
4665
13ee15d3
VP
4666 if (caps_cmd.niccaps & htons(FW_CAPS_CONFIG_NIC_VM)) {
4667 if (!vf_acls)
4668 caps_cmd.niccaps ^= htons(FW_CAPS_CONFIG_NIC_VM);
4669 else
4670 caps_cmd.niccaps = htons(FW_CAPS_CONFIG_NIC_VM);
4671 } else if (vf_acls) {
4672 dev_err(adapter->pdev_dev, "virtualization ACLs not supported");
4673 goto bye;
4674 }
4675 caps_cmd.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
4676 FW_CMD_REQUEST | FW_CMD_WRITE);
4677 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
4678 NULL);
4679 if (ret < 0)
4680 goto bye;
4681
4682 /*
4683 * Tweak configuration based on system architecture, module
4684 * parameters, etc.
4685 */
4686 ret = adap_init0_tweaks(adapter);
4687 if (ret < 0)
4688 goto bye;
4689
4690 /*
4691 * Select RSS Global Mode we want to use. We use "Basic Virtual"
4692 * mode which maps each Virtual Interface to its own section of
4693 * the RSS Table and we turn on all map and hash enables ...
4694 */
4695 adapter->flags |= RSS_TNLALLLOOKUP;
4696 ret = t4_config_glbl_rss(adapter, adapter->mbox,
4697 FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL,
4698 FW_RSS_GLB_CONFIG_CMD_TNLMAPEN |
4699 FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ |
4700 ((adapter->flags & RSS_TNLALLLOOKUP) ?
4701 FW_RSS_GLB_CONFIG_CMD_TNLALLLKP : 0));
4702 if (ret < 0)
4703 goto bye;
4704
4705 /*
4706 * Set up our own fundamental resource provisioning ...
4707 */
4708 ret = t4_cfg_pfvf(adapter, adapter->mbox, adapter->fn, 0,
4709 PFRES_NEQ, PFRES_NETHCTRL,
4710 PFRES_NIQFLINT, PFRES_NIQ,
4711 PFRES_TC, PFRES_NVI,
4712 FW_PFVF_CMD_CMASK_MASK,
4713 pfvfres_pmask(adapter, adapter->fn, 0),
4714 PFRES_NEXACTF,
4715 PFRES_R_CAPS, PFRES_WX_CAPS);
4716 if (ret < 0)
4717 goto bye;
4718
4719 /*
4720 * Perform low level SGE initialization. We need to do this before we
4721 * send the firmware the INITIALIZE command because that will cause
4722 * any other PF Drivers which are waiting for the Master
4723 * Initialization to proceed forward.
4724 */
4725 for (i = 0; i < SGE_NTIMERS - 1; i++)
4726 s->timer_val[i] = min(intr_holdoff[i], MAX_SGE_TIMERVAL);
4727 s->timer_val[SGE_NTIMERS - 1] = MAX_SGE_TIMERVAL;
4728 s->counter_val[0] = 1;
4729 for (i = 1; i < SGE_NCOUNTERS; i++)
4730 s->counter_val[i] = min(intr_cnt[i - 1],
4731 THRESHOLD_0_GET(THRESHOLD_0_MASK));
4732 t4_sge_init(adapter);
4733
4734#ifdef CONFIG_PCI_IOV
4735 /*
4736 * Provision resource limits for Virtual Functions. We currently
4737 * grant them all the same static resource limits except for the Port
4738 * Access Rights Mask which we're assigning based on the PF. All of
4739 * the static provisioning stuff for both the PF and VF really needs
4740 * to be managed in a persistent manner for each device which the
4741 * firmware controls.
4742 */
4743 {
4744 int pf, vf;
4745
7d6727cf 4746 for (pf = 0; pf < ARRAY_SIZE(num_vf); pf++) {
13ee15d3
VP
4747 if (num_vf[pf] <= 0)
4748 continue;
4749
4750 /* VF numbering starts at 1! */
4751 for (vf = 1; vf <= num_vf[pf]; vf++) {
4752 ret = t4_cfg_pfvf(adapter, adapter->mbox,
4753 pf, vf,
4754 VFRES_NEQ, VFRES_NETHCTRL,
4755 VFRES_NIQFLINT, VFRES_NIQ,
4756 VFRES_TC, VFRES_NVI,
1f1e4958 4757 FW_PFVF_CMD_CMASK_MASK,
13ee15d3
VP
4758 pfvfres_pmask(
4759 adapter, pf, vf),
4760 VFRES_NEXACTF,
4761 VFRES_R_CAPS, VFRES_WX_CAPS);
4762 if (ret < 0)
4763 dev_warn(adapter->pdev_dev,
4764 "failed to "\
4765 "provision pf/vf=%d/%d; "
4766 "err=%d\n", pf, vf, ret);
4767 }
4768 }
4769 }
4770#endif
4771
4772 /*
4773 * Set up the default filter mode. Later we'll want to implement this
4774 * via a firmware command, etc. ... This needs to be done before the
4775 * firmare initialization command ... If the selected set of fields
4776 * isn't equal to the default value, we'll need to make sure that the
4777 * field selections will fit in the 36-bit budget.
4778 */
4779 if (tp_vlan_pri_map != TP_VLAN_PRI_MAP_DEFAULT) {
404d9e3f 4780 int j, bits = 0;
13ee15d3 4781
404d9e3f
VP
4782 for (j = TP_VLAN_PRI_MAP_FIRST; j <= TP_VLAN_PRI_MAP_LAST; j++)
4783 switch (tp_vlan_pri_map & (1 << j)) {
13ee15d3
VP
4784 case 0:
4785 /* compressed filter field not enabled */
4786 break;
4787 case FCOE_MASK:
4788 bits += 1;
4789 break;
4790 case PORT_MASK:
4791 bits += 3;
4792 break;
4793 case VNIC_ID_MASK:
4794 bits += 17;
4795 break;
4796 case VLAN_MASK:
4797 bits += 17;
4798 break;
4799 case TOS_MASK:
4800 bits += 8;
4801 break;
4802 case PROTOCOL_MASK:
4803 bits += 8;
4804 break;
4805 case ETHERTYPE_MASK:
4806 bits += 16;
4807 break;
4808 case MACMATCH_MASK:
4809 bits += 9;
4810 break;
4811 case MPSHITTYPE_MASK:
4812 bits += 3;
4813 break;
4814 case FRAGMENTATION_MASK:
4815 bits += 1;
4816 break;
4817 }
4818
4819 if (bits > 36) {
4820 dev_err(adapter->pdev_dev,
4821 "tp_vlan_pri_map=%#x needs %d bits > 36;"\
4822 " using %#x\n", tp_vlan_pri_map, bits,
4823 TP_VLAN_PRI_MAP_DEFAULT);
4824 tp_vlan_pri_map = TP_VLAN_PRI_MAP_DEFAULT;
4825 }
4826 }
4827 v = tp_vlan_pri_map;
4828 t4_write_indirect(adapter, TP_PIO_ADDR, TP_PIO_DATA,
4829 &v, 1, TP_VLAN_PRI_MAP);
4830
4831 /*
4832 * We need Five Tuple Lookup mode to be set in TP_GLOBAL_CONFIG order
4833 * to support any of the compressed filter fields above. Newer
4834 * versions of the firmware do this automatically but it doesn't hurt
4835 * to set it here. Meanwhile, we do _not_ need to set Lookup Every
4836 * Packet in TP_INGRESS_CONFIG to support matching non-TCP packets
4837 * since the firmware automatically turns this on and off when we have
4838 * a non-zero number of filters active (since it does have a
4839 * performance impact).
4840 */
4841 if (tp_vlan_pri_map)
4842 t4_set_reg_field(adapter, TP_GLOBAL_CONFIG,
4843 FIVETUPLELOOKUP_MASK,
4844 FIVETUPLELOOKUP_MASK);
4845
4846 /*
4847 * Tweak some settings.
4848 */
4849 t4_write_reg(adapter, TP_SHIFT_CNT, SYNSHIFTMAX(6) |
4850 RXTSHIFTMAXR1(4) | RXTSHIFTMAXR2(15) |
4851 PERSHIFTBACKOFFMAX(8) | PERSHIFTMAX(8) |
4852 KEEPALIVEMAXR1(4) | KEEPALIVEMAXR2(9));
4853
4854 /*
4855 * Get basic stuff going by issuing the Firmware Initialize command.
4856 * Note that this _must_ be after all PFVF commands ...
4857 */
4858 ret = t4_fw_initialize(adapter, adapter->mbox);
4859 if (ret < 0)
4860 goto bye;
4861
4862 /*
4863 * Return successfully!
4864 */
4865 dev_info(adapter->pdev_dev, "Successfully configured using built-in "\
4866 "driver parameters\n");
4867 return 0;
4868
4869 /*
4870 * Something bad happened. Return the error ...
4871 */
4872bye:
4873 return ret;
4874}
4875
b8ff05a9
DM
4876/*
4877 * Phase 0 of initialization: contact FW, obtain config, perform basic init.
4878 */
4879static int adap_init0(struct adapter *adap)
4880{
4881 int ret;
4882 u32 v, port_vec;
4883 enum dev_state state;
4884 u32 params[7], val[7];
9a4da2cd 4885 struct fw_caps_config_cmd caps_cmd;
636f9d37 4886 int reset = 1, j;
b8ff05a9 4887
636f9d37
VP
4888 /*
4889 * Contact FW, advertising Master capability (and potentially forcing
4890 * ourselves as the Master PF if our module parameter force_init is
4891 * set).
4892 */
4893 ret = t4_fw_hello(adap, adap->mbox, adap->fn,
4894 force_init ? MASTER_MUST : MASTER_MAY,
4895 &state);
b8ff05a9
DM
4896 if (ret < 0) {
4897 dev_err(adap->pdev_dev, "could not connect to FW, error %d\n",
4898 ret);
4899 return ret;
4900 }
636f9d37
VP
4901 if (ret == adap->mbox)
4902 adap->flags |= MASTER_PF;
4903 if (force_init && state == DEV_STATE_INIT)
4904 state = DEV_STATE_UNINIT;
b8ff05a9 4905
636f9d37
VP
4906 /*
4907 * If we're the Master PF Driver and the device is uninitialized,
4908 * then let's consider upgrading the firmware ... (We always want
4909 * to check the firmware version number in order to A. get it for
4910 * later reporting and B. to warn if the currently loaded firmware
4911 * is excessively mismatched relative to the driver.)
4912 */
4913 ret = t4_check_fw_version(adap);
e69972f5
JH
4914
4915 /* The error code -EFAULT is returned by t4_check_fw_version() if
4916 * firmware on adapter < supported firmware. If firmware on adapter
4917 * is too old (not supported by driver) and we're the MASTER_PF set
4918 * adapter state to DEV_STATE_UNINIT to force firmware upgrade
4919 * and reinitialization.
4920 */
4921 if ((adap->flags & MASTER_PF) && ret == -EFAULT)
4922 state = DEV_STATE_UNINIT;
636f9d37 4923 if ((adap->flags & MASTER_PF) && state != DEV_STATE_INIT) {
e69972f5 4924 if (ret == -EINVAL || ret == -EFAULT || ret > 0) {
636f9d37
VP
4925 if (upgrade_fw(adap) >= 0) {
4926 /*
4927 * Note that the chip was reset as part of the
4928 * firmware upgrade so we don't reset it again
4929 * below and grab the new firmware version.
4930 */
4931 reset = 0;
4932 ret = t4_check_fw_version(adap);
e69972f5
JH
4933 } else
4934 if (ret == -EFAULT) {
4935 /*
4936 * Firmware is old but still might
4937 * work if we force reinitialization
4938 * of the adapter. Ignoring FW upgrade
4939 * failure.
4940 */
4941 dev_warn(adap->pdev_dev,
4942 "Ignoring firmware upgrade "
4943 "failure, and forcing driver "
4944 "to reinitialize the "
4945 "adapter.\n");
4946 ret = 0;
4947 }
636f9d37
VP
4948 }
4949 if (ret < 0)
4950 return ret;
4951 }
b8ff05a9 4952
636f9d37
VP
4953 /*
4954 * Grab VPD parameters. This should be done after we establish a
4955 * connection to the firmware since some of the VPD parameters
4956 * (notably the Core Clock frequency) are retrieved via requests to
4957 * the firmware. On the other hand, we need these fairly early on
4958 * so we do this right after getting ahold of the firmware.
4959 */
4960 ret = get_vpd_params(adap, &adap->params.vpd);
a0881cab
DM
4961 if (ret < 0)
4962 goto bye;
a0881cab 4963
636f9d37 4964 /*
13ee15d3
VP
4965 * Find out what ports are available to us. Note that we need to do
4966 * this before calling adap_init0_no_config() since it needs nports
4967 * and portvec ...
636f9d37
VP
4968 */
4969 v =
4970 FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
4971 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_PORTVEC);
4972 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 1, &v, &port_vec);
a0881cab
DM
4973 if (ret < 0)
4974 goto bye;
4975
636f9d37
VP
4976 adap->params.nports = hweight32(port_vec);
4977 adap->params.portvec = port_vec;
4978
4979 /*
4980 * If the firmware is initialized already (and we're not forcing a
4981 * master initialization), note that we're living with existing
4982 * adapter parameters. Otherwise, it's time to try initializing the
4983 * adapter ...
4984 */
4985 if (state == DEV_STATE_INIT) {
4986 dev_info(adap->pdev_dev, "Coming up as %s: "\
4987 "Adapter already initialized\n",
4988 adap->flags & MASTER_PF ? "MASTER" : "SLAVE");
4989 adap->flags |= USING_SOFT_PARAMS;
4990 } else {
4991 dev_info(adap->pdev_dev, "Coming up as MASTER: "\
4992 "Initializing adapter\n");
636f9d37
VP
4993
4994 /*
4995 * If the firmware doesn't support Configuration
4996 * Files warn user and exit,
4997 */
4998 if (ret < 0)
13ee15d3 4999 dev_warn(adap->pdev_dev, "Firmware doesn't support "
636f9d37 5000 "configuration file.\n");
13ee15d3
VP
5001 if (force_old_init)
5002 ret = adap_init0_no_config(adap, reset);
636f9d37
VP
5003 else {
5004 /*
13ee15d3
VP
5005 * Find out whether we're dealing with a version of
5006 * the firmware which has configuration file support.
636f9d37 5007 */
13ee15d3
VP
5008 params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
5009 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CF));
5010 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 1,
5011 params, val);
636f9d37 5012
13ee15d3
VP
5013 /*
5014 * If the firmware doesn't support Configuration
5015 * Files, use the old Driver-based, hard-wired
5016 * initialization. Otherwise, try using the
5017 * Configuration File support and fall back to the
5018 * Driver-based initialization if there's no
5019 * Configuration File found.
5020 */
5021 if (ret < 0)
5022 ret = adap_init0_no_config(adap, reset);
5023 else {
5024 /*
5025 * The firmware provides us with a memory
5026 * buffer where we can load a Configuration
5027 * File from the host if we want to override
5028 * the Configuration File in flash.
5029 */
5030
5031 ret = adap_init0_config(adap, reset);
5032 if (ret == -ENOENT) {
5033 dev_info(adap->pdev_dev,
5034 "No Configuration File present "
5035 "on adapter. Using hard-wired "
5036 "configuration parameters.\n");
5037 ret = adap_init0_no_config(adap, reset);
5038 }
636f9d37
VP
5039 }
5040 }
5041 if (ret < 0) {
5042 dev_err(adap->pdev_dev,
5043 "could not initialize adapter, error %d\n",
5044 -ret);
5045 goto bye;
5046 }
5047 }
5048
5049 /*
5050 * If we're living with non-hard-coded parameters (either from a
5051 * Firmware Configuration File or values programmed by a different PF
5052 * Driver), give the SGE code a chance to pull in anything that it
5053 * needs ... Note that this must be called after we retrieve our VPD
5054 * parameters in order to know how to convert core ticks to seconds.
5055 */
5056 if (adap->flags & USING_SOFT_PARAMS) {
5057 ret = t4_sge_init(adap);
5058 if (ret < 0)
5059 goto bye;
5060 }
5061
9a4da2cd
VP
5062 if (is_bypass_device(adap->pdev->device))
5063 adap->params.bypass = 1;
5064
636f9d37
VP
5065 /*
5066 * Grab some of our basic fundamental operating parameters.
5067 */
5068#define FW_PARAM_DEV(param) \
5069 (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
5070 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
5071
b8ff05a9 5072#define FW_PARAM_PFVF(param) \
636f9d37
VP
5073 FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
5074 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param)| \
5075 FW_PARAMS_PARAM_Y(0) | \
5076 FW_PARAMS_PARAM_Z(0)
b8ff05a9 5077
636f9d37 5078 params[0] = FW_PARAM_PFVF(EQ_START);
b8ff05a9
DM
5079 params[1] = FW_PARAM_PFVF(L2T_START);
5080 params[2] = FW_PARAM_PFVF(L2T_END);
5081 params[3] = FW_PARAM_PFVF(FILTER_START);
5082 params[4] = FW_PARAM_PFVF(FILTER_END);
e46dab4d 5083 params[5] = FW_PARAM_PFVF(IQFLINT_START);
636f9d37 5084 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6, params, val);
b8ff05a9
DM
5085 if (ret < 0)
5086 goto bye;
636f9d37
VP
5087 adap->sge.egr_start = val[0];
5088 adap->l2t_start = val[1];
5089 adap->l2t_end = val[2];
b8ff05a9
DM
5090 adap->tids.ftid_base = val[3];
5091 adap->tids.nftids = val[4] - val[3] + 1;
e46dab4d 5092 adap->sge.ingr_start = val[5];
b8ff05a9 5093
636f9d37
VP
5094 /* query params related to active filter region */
5095 params[0] = FW_PARAM_PFVF(ACTIVE_FILTER_START);
5096 params[1] = FW_PARAM_PFVF(ACTIVE_FILTER_END);
5097 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, params, val);
5098 /* If Active filter size is set we enable establishing
5099 * offload connection through firmware work request
5100 */
5101 if ((val[0] != val[1]) && (ret >= 0)) {
5102 adap->flags |= FW_OFLD_CONN;
5103 adap->tids.aftid_base = val[0];
5104 adap->tids.aftid_end = val[1];
5105 }
5106
b407a4a9
VP
5107 /* If we're running on newer firmware, let it know that we're
5108 * prepared to deal with encapsulated CPL messages. Older
5109 * firmware won't understand this and we'll just get
5110 * unencapsulated messages ...
5111 */
5112 params[0] = FW_PARAM_PFVF(CPLFW4MSG_ENCAP);
5113 val[0] = 1;
5114 (void) t4_set_params(adap, adap->mbox, adap->fn, 0, 1, params, val);
5115
636f9d37
VP
5116 /*
5117 * Get device capabilities so we can determine what resources we need
5118 * to manage.
5119 */
5120 memset(&caps_cmd, 0, sizeof(caps_cmd));
9a4da2cd 5121 caps_cmd.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
13ee15d3 5122 FW_CMD_REQUEST | FW_CMD_READ);
ce91a923 5123 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
636f9d37
VP
5124 ret = t4_wr_mbox(adap, adap->mbox, &caps_cmd, sizeof(caps_cmd),
5125 &caps_cmd);
5126 if (ret < 0)
5127 goto bye;
5128
13ee15d3 5129 if (caps_cmd.ofldcaps) {
b8ff05a9
DM
5130 /* query offload-related parameters */
5131 params[0] = FW_PARAM_DEV(NTID);
5132 params[1] = FW_PARAM_PFVF(SERVER_START);
5133 params[2] = FW_PARAM_PFVF(SERVER_END);
5134 params[3] = FW_PARAM_PFVF(TDDP_START);
5135 params[4] = FW_PARAM_PFVF(TDDP_END);
5136 params[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
636f9d37
VP
5137 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6,
5138 params, val);
b8ff05a9
DM
5139 if (ret < 0)
5140 goto bye;
5141 adap->tids.ntids = val[0];
5142 adap->tids.natids = min(adap->tids.ntids / 2, MAX_ATIDS);
5143 adap->tids.stid_base = val[1];
5144 adap->tids.nstids = val[2] - val[1] + 1;
636f9d37
VP
5145 /*
5146 * Setup server filter region. Divide the availble filter
5147 * region into two parts. Regular filters get 1/3rd and server
5148 * filters get 2/3rd part. This is only enabled if workarond
5149 * path is enabled.
5150 * 1. For regular filters.
5151 * 2. Server filter: This are special filters which are used
5152 * to redirect SYN packets to offload queue.
5153 */
5154 if (adap->flags & FW_OFLD_CONN && !is_bypass(adap)) {
5155 adap->tids.sftid_base = adap->tids.ftid_base +
5156 DIV_ROUND_UP(adap->tids.nftids, 3);
5157 adap->tids.nsftids = adap->tids.nftids -
5158 DIV_ROUND_UP(adap->tids.nftids, 3);
5159 adap->tids.nftids = adap->tids.sftid_base -
5160 adap->tids.ftid_base;
5161 }
b8ff05a9
DM
5162 adap->vres.ddp.start = val[3];
5163 adap->vres.ddp.size = val[4] - val[3] + 1;
5164 adap->params.ofldq_wr_cred = val[5];
636f9d37 5165
b8ff05a9
DM
5166 adap->params.offload = 1;
5167 }
636f9d37 5168 if (caps_cmd.rdmacaps) {
b8ff05a9
DM
5169 params[0] = FW_PARAM_PFVF(STAG_START);
5170 params[1] = FW_PARAM_PFVF(STAG_END);
5171 params[2] = FW_PARAM_PFVF(RQ_START);
5172 params[3] = FW_PARAM_PFVF(RQ_END);
5173 params[4] = FW_PARAM_PFVF(PBL_START);
5174 params[5] = FW_PARAM_PFVF(PBL_END);
636f9d37
VP
5175 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6,
5176 params, val);
b8ff05a9
DM
5177 if (ret < 0)
5178 goto bye;
5179 adap->vres.stag.start = val[0];
5180 adap->vres.stag.size = val[1] - val[0] + 1;
5181 adap->vres.rq.start = val[2];
5182 adap->vres.rq.size = val[3] - val[2] + 1;
5183 adap->vres.pbl.start = val[4];
5184 adap->vres.pbl.size = val[5] - val[4] + 1;
a0881cab
DM
5185
5186 params[0] = FW_PARAM_PFVF(SQRQ_START);
5187 params[1] = FW_PARAM_PFVF(SQRQ_END);
5188 params[2] = FW_PARAM_PFVF(CQ_START);
5189 params[3] = FW_PARAM_PFVF(CQ_END);
1ae970e0
DM
5190 params[4] = FW_PARAM_PFVF(OCQ_START);
5191 params[5] = FW_PARAM_PFVF(OCQ_END);
636f9d37 5192 ret = t4_query_params(adap, 0, 0, 0, 6, params, val);
a0881cab
DM
5193 if (ret < 0)
5194 goto bye;
5195 adap->vres.qp.start = val[0];
5196 adap->vres.qp.size = val[1] - val[0] + 1;
5197 adap->vres.cq.start = val[2];
5198 adap->vres.cq.size = val[3] - val[2] + 1;
1ae970e0
DM
5199 adap->vres.ocq.start = val[4];
5200 adap->vres.ocq.size = val[5] - val[4] + 1;
b8ff05a9 5201 }
636f9d37 5202 if (caps_cmd.iscsicaps) {
b8ff05a9
DM
5203 params[0] = FW_PARAM_PFVF(ISCSI_START);
5204 params[1] = FW_PARAM_PFVF(ISCSI_END);
636f9d37
VP
5205 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2,
5206 params, val);
b8ff05a9
DM
5207 if (ret < 0)
5208 goto bye;
5209 adap->vres.iscsi.start = val[0];
5210 adap->vres.iscsi.size = val[1] - val[0] + 1;
5211 }
5212#undef FW_PARAM_PFVF
5213#undef FW_PARAM_DEV
5214
636f9d37
VP
5215 /*
5216 * These are finalized by FW initialization, load their values now.
5217 */
b8ff05a9
DM
5218 v = t4_read_reg(adap, TP_TIMER_RESOLUTION);
5219 adap->params.tp.tre = TIMERRESOLUTION_GET(v);
636f9d37 5220 adap->params.tp.dack_re = DELAYEDACKRESOLUTION_GET(v);
b8ff05a9
DM
5221 t4_read_mtu_tbl(adap, adap->params.mtus, NULL);
5222 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
5223 adap->params.b_wnd);
7ee9ff94 5224
636f9d37
VP
5225 /* MODQ_REQ_MAP defaults to setting queues 0-3 to chan 0-3 */
5226 for (j = 0; j < NCHAN; j++)
5227 adap->params.tp.tx_modq[j] = j;
7ee9ff94 5228
793dad94
VP
5229 t4_read_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
5230 &adap->filter_mode, 1,
5231 TP_VLAN_PRI_MAP);
5232
636f9d37 5233 adap->flags |= FW_OK;
b8ff05a9
DM
5234 return 0;
5235
5236 /*
636f9d37
VP
5237 * Something bad happened. If a command timed out or failed with EIO
5238 * FW does not operate within its spec or something catastrophic
5239 * happened to HW/FW, stop issuing commands.
b8ff05a9 5240 */
636f9d37
VP
5241bye:
5242 if (ret != -ETIMEDOUT && ret != -EIO)
5243 t4_fw_bye(adap, adap->mbox);
b8ff05a9
DM
5244 return ret;
5245}
5246
204dc3c0
DM
5247/* EEH callbacks */
5248
5249static pci_ers_result_t eeh_err_detected(struct pci_dev *pdev,
5250 pci_channel_state_t state)
5251{
5252 int i;
5253 struct adapter *adap = pci_get_drvdata(pdev);
5254
5255 if (!adap)
5256 goto out;
5257
5258 rtnl_lock();
5259 adap->flags &= ~FW_OK;
5260 notify_ulds(adap, CXGB4_STATE_START_RECOVERY);
5261 for_each_port(adap, i) {
5262 struct net_device *dev = adap->port[i];
5263
5264 netif_device_detach(dev);
5265 netif_carrier_off(dev);
5266 }
5267 if (adap->flags & FULL_INIT_DONE)
5268 cxgb_down(adap);
5269 rtnl_unlock();
5270 pci_disable_device(pdev);
5271out: return state == pci_channel_io_perm_failure ?
5272 PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET;
5273}
5274
5275static pci_ers_result_t eeh_slot_reset(struct pci_dev *pdev)
5276{
5277 int i, ret;
5278 struct fw_caps_config_cmd c;
5279 struct adapter *adap = pci_get_drvdata(pdev);
5280
5281 if (!adap) {
5282 pci_restore_state(pdev);
5283 pci_save_state(pdev);
5284 return PCI_ERS_RESULT_RECOVERED;
5285 }
5286
5287 if (pci_enable_device(pdev)) {
5288 dev_err(&pdev->dev, "cannot reenable PCI device after reset\n");
5289 return PCI_ERS_RESULT_DISCONNECT;
5290 }
5291
5292 pci_set_master(pdev);
5293 pci_restore_state(pdev);
5294 pci_save_state(pdev);
5295 pci_cleanup_aer_uncorrect_error_status(pdev);
5296
5297 if (t4_wait_dev_ready(adap) < 0)
5298 return PCI_ERS_RESULT_DISCONNECT;
777c2300 5299 if (t4_fw_hello(adap, adap->fn, adap->fn, MASTER_MUST, NULL) < 0)
204dc3c0
DM
5300 return PCI_ERS_RESULT_DISCONNECT;
5301 adap->flags |= FW_OK;
5302 if (adap_init1(adap, &c))
5303 return PCI_ERS_RESULT_DISCONNECT;
5304
5305 for_each_port(adap, i) {
5306 struct port_info *p = adap2pinfo(adap, i);
5307
060e0c75
DM
5308 ret = t4_alloc_vi(adap, adap->fn, p->tx_chan, adap->fn, 0, 1,
5309 NULL, NULL);
204dc3c0
DM
5310 if (ret < 0)
5311 return PCI_ERS_RESULT_DISCONNECT;
5312 p->viid = ret;
5313 p->xact_addr_filt = -1;
5314 }
5315
5316 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
5317 adap->params.b_wnd);
1ae970e0 5318 setup_memwin(adap);
204dc3c0
DM
5319 if (cxgb_up(adap))
5320 return PCI_ERS_RESULT_DISCONNECT;
5321 return PCI_ERS_RESULT_RECOVERED;
5322}
5323
5324static void eeh_resume(struct pci_dev *pdev)
5325{
5326 int i;
5327 struct adapter *adap = pci_get_drvdata(pdev);
5328
5329 if (!adap)
5330 return;
5331
5332 rtnl_lock();
5333 for_each_port(adap, i) {
5334 struct net_device *dev = adap->port[i];
5335
5336 if (netif_running(dev)) {
5337 link_start(dev);
5338 cxgb_set_rxmode(dev);
5339 }
5340 netif_device_attach(dev);
5341 }
5342 rtnl_unlock();
5343}
5344
3646f0e5 5345static const struct pci_error_handlers cxgb4_eeh = {
204dc3c0
DM
5346 .error_detected = eeh_err_detected,
5347 .slot_reset = eeh_slot_reset,
5348 .resume = eeh_resume,
5349};
5350
b8ff05a9
DM
5351static inline bool is_10g_port(const struct link_config *lc)
5352{
5353 return (lc->supported & FW_PORT_CAP_SPEED_10G) != 0;
5354}
5355
5356static inline void init_rspq(struct sge_rspq *q, u8 timer_idx, u8 pkt_cnt_idx,
5357 unsigned int size, unsigned int iqe_size)
5358{
5359 q->intr_params = QINTR_TIMER_IDX(timer_idx) |
5360 (pkt_cnt_idx < SGE_NCOUNTERS ? QINTR_CNT_EN : 0);
5361 q->pktcnt_idx = pkt_cnt_idx < SGE_NCOUNTERS ? pkt_cnt_idx : 0;
5362 q->iqe_len = iqe_size;
5363 q->size = size;
5364}
5365
5366/*
5367 * Perform default configuration of DMA queues depending on the number and type
5368 * of ports we found and the number of available CPUs. Most settings can be
5369 * modified by the admin prior to actual use.
5370 */
91744948 5371static void cfg_queues(struct adapter *adap)
b8ff05a9
DM
5372{
5373 struct sge *s = &adap->sge;
5374 int i, q10g = 0, n10g = 0, qidx = 0;
5375
5376 for_each_port(adap, i)
5377 n10g += is_10g_port(&adap2pinfo(adap, i)->link_cfg);
5378
5379 /*
5380 * We default to 1 queue per non-10G port and up to # of cores queues
5381 * per 10G port.
5382 */
5383 if (n10g)
5384 q10g = (MAX_ETH_QSETS - (adap->params.nports - n10g)) / n10g;
5952dde7
YM
5385 if (q10g > netif_get_num_default_rss_queues())
5386 q10g = netif_get_num_default_rss_queues();
b8ff05a9
DM
5387
5388 for_each_port(adap, i) {
5389 struct port_info *pi = adap2pinfo(adap, i);
5390
5391 pi->first_qset = qidx;
5392 pi->nqsets = is_10g_port(&pi->link_cfg) ? q10g : 1;
5393 qidx += pi->nqsets;
5394 }
5395
5396 s->ethqsets = qidx;
5397 s->max_ethqsets = qidx; /* MSI-X may lower it later */
5398
5399 if (is_offload(adap)) {
5400 /*
5401 * For offload we use 1 queue/channel if all ports are up to 1G,
5402 * otherwise we divide all available queues amongst the channels
5403 * capped by the number of available cores.
5404 */
5405 if (n10g) {
5406 i = min_t(int, ARRAY_SIZE(s->ofldrxq),
5407 num_online_cpus());
5408 s->ofldqsets = roundup(i, adap->params.nports);
5409 } else
5410 s->ofldqsets = adap->params.nports;
5411 /* For RDMA one Rx queue per channel suffices */
5412 s->rdmaqs = adap->params.nports;
5413 }
5414
5415 for (i = 0; i < ARRAY_SIZE(s->ethrxq); i++) {
5416 struct sge_eth_rxq *r = &s->ethrxq[i];
5417
5418 init_rspq(&r->rspq, 0, 0, 1024, 64);
5419 r->fl.size = 72;
5420 }
5421
5422 for (i = 0; i < ARRAY_SIZE(s->ethtxq); i++)
5423 s->ethtxq[i].q.size = 1024;
5424
5425 for (i = 0; i < ARRAY_SIZE(s->ctrlq); i++)
5426 s->ctrlq[i].q.size = 512;
5427
5428 for (i = 0; i < ARRAY_SIZE(s->ofldtxq); i++)
5429 s->ofldtxq[i].q.size = 1024;
5430
5431 for (i = 0; i < ARRAY_SIZE(s->ofldrxq); i++) {
5432 struct sge_ofld_rxq *r = &s->ofldrxq[i];
5433
5434 init_rspq(&r->rspq, 0, 0, 1024, 64);
5435 r->rspq.uld = CXGB4_ULD_ISCSI;
5436 r->fl.size = 72;
5437 }
5438
5439 for (i = 0; i < ARRAY_SIZE(s->rdmarxq); i++) {
5440 struct sge_ofld_rxq *r = &s->rdmarxq[i];
5441
5442 init_rspq(&r->rspq, 0, 0, 511, 64);
5443 r->rspq.uld = CXGB4_ULD_RDMA;
5444 r->fl.size = 72;
5445 }
5446
5447 init_rspq(&s->fw_evtq, 6, 0, 512, 64);
5448 init_rspq(&s->intrq, 6, 0, 2 * MAX_INGQ, 64);
5449}
5450
5451/*
5452 * Reduce the number of Ethernet queues across all ports to at most n.
5453 * n provides at least one queue per port.
5454 */
91744948 5455static void reduce_ethqs(struct adapter *adap, int n)
b8ff05a9
DM
5456{
5457 int i;
5458 struct port_info *pi;
5459
5460 while (n < adap->sge.ethqsets)
5461 for_each_port(adap, i) {
5462 pi = adap2pinfo(adap, i);
5463 if (pi->nqsets > 1) {
5464 pi->nqsets--;
5465 adap->sge.ethqsets--;
5466 if (adap->sge.ethqsets <= n)
5467 break;
5468 }
5469 }
5470
5471 n = 0;
5472 for_each_port(adap, i) {
5473 pi = adap2pinfo(adap, i);
5474 pi->first_qset = n;
5475 n += pi->nqsets;
5476 }
5477}
5478
5479/* 2 MSI-X vectors needed for the FW queue and non-data interrupts */
5480#define EXTRA_VECS 2
5481
91744948 5482static int enable_msix(struct adapter *adap)
b8ff05a9
DM
5483{
5484 int ofld_need = 0;
5485 int i, err, want, need;
5486 struct sge *s = &adap->sge;
5487 unsigned int nchan = adap->params.nports;
5488 struct msix_entry entries[MAX_INGQ + 1];
5489
5490 for (i = 0; i < ARRAY_SIZE(entries); ++i)
5491 entries[i].entry = i;
5492
5493 want = s->max_ethqsets + EXTRA_VECS;
5494 if (is_offload(adap)) {
5495 want += s->rdmaqs + s->ofldqsets;
5496 /* need nchan for each possible ULD */
5497 ofld_need = 2 * nchan;
5498 }
5499 need = adap->params.nports + EXTRA_VECS + ofld_need;
5500
5501 while ((err = pci_enable_msix(adap->pdev, entries, want)) >= need)
5502 want = err;
5503
5504 if (!err) {
5505 /*
5506 * Distribute available vectors to the various queue groups.
5507 * Every group gets its minimum requirement and NIC gets top
5508 * priority for leftovers.
5509 */
5510 i = want - EXTRA_VECS - ofld_need;
5511 if (i < s->max_ethqsets) {
5512 s->max_ethqsets = i;
5513 if (i < s->ethqsets)
5514 reduce_ethqs(adap, i);
5515 }
5516 if (is_offload(adap)) {
5517 i = want - EXTRA_VECS - s->max_ethqsets;
5518 i -= ofld_need - nchan;
5519 s->ofldqsets = (i / nchan) * nchan; /* round down */
5520 }
5521 for (i = 0; i < want; ++i)
5522 adap->msix_info[i].vec = entries[i].vector;
5523 } else if (err > 0)
5524 dev_info(adap->pdev_dev,
5525 "only %d MSI-X vectors left, not using MSI-X\n", err);
5526 return err;
5527}
5528
5529#undef EXTRA_VECS
5530
91744948 5531static int init_rss(struct adapter *adap)
671b0060
DM
5532{
5533 unsigned int i, j;
5534
5535 for_each_port(adap, i) {
5536 struct port_info *pi = adap2pinfo(adap, i);
5537
5538 pi->rss = kcalloc(pi->rss_size, sizeof(u16), GFP_KERNEL);
5539 if (!pi->rss)
5540 return -ENOMEM;
5541 for (j = 0; j < pi->rss_size; j++)
278bc429 5542 pi->rss[j] = ethtool_rxfh_indir_default(j, pi->nqsets);
671b0060
DM
5543 }
5544 return 0;
5545}
5546
91744948 5547static void print_port_info(const struct net_device *dev)
b8ff05a9
DM
5548{
5549 static const char *base[] = {
a0881cab 5550 "R XFI", "R XAUI", "T SGMII", "T XFI", "T XAUI", "KX4", "CX4",
7d5e77aa 5551 "KX", "KR", "R SFP+", "KR/KX", "KR/KX/KX4"
b8ff05a9
DM
5552 };
5553
b8ff05a9 5554 char buf[80];
118969ed 5555 char *bufp = buf;
f1a051b9 5556 const char *spd = "";
118969ed
DM
5557 const struct port_info *pi = netdev_priv(dev);
5558 const struct adapter *adap = pi->adapter;
f1a051b9
DM
5559
5560 if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_2_5GB)
5561 spd = " 2.5 GT/s";
5562 else if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_5_0GB)
5563 spd = " 5 GT/s";
b8ff05a9 5564
118969ed
DM
5565 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_100M)
5566 bufp += sprintf(bufp, "100/");
5567 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_1G)
5568 bufp += sprintf(bufp, "1000/");
5569 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_10G)
5570 bufp += sprintf(bufp, "10G/");
5571 if (bufp != buf)
5572 --bufp;
5573 sprintf(bufp, "BASE-%s", base[pi->port_type]);
5574
5575 netdev_info(dev, "Chelsio %s rev %d %s %sNIC PCIe x%d%s%s\n",
0a57a536
SR
5576 adap->params.vpd.id,
5577 CHELSIO_CHIP_RELEASE(adap->params.rev), buf,
118969ed
DM
5578 is_offload(adap) ? "R" : "", adap->params.pci.width, spd,
5579 (adap->flags & USING_MSIX) ? " MSI-X" :
5580 (adap->flags & USING_MSI) ? " MSI" : "");
5581 netdev_info(dev, "S/N: %s, E/C: %s\n",
5582 adap->params.vpd.sn, adap->params.vpd.ec);
b8ff05a9
DM
5583}
5584
91744948 5585static void enable_pcie_relaxed_ordering(struct pci_dev *dev)
ef306b50 5586{
e5c8ae5f 5587 pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_RELAX_EN);
ef306b50
DM
5588}
5589
06546391
DM
5590/*
5591 * Free the following resources:
5592 * - memory used for tables
5593 * - MSI/MSI-X
5594 * - net devices
5595 * - resources FW is holding for us
5596 */
5597static void free_some_resources(struct adapter *adapter)
5598{
5599 unsigned int i;
5600
5601 t4_free_mem(adapter->l2t);
5602 t4_free_mem(adapter->tids.tid_tab);
5603 disable_msi(adapter);
5604
5605 for_each_port(adapter, i)
671b0060
DM
5606 if (adapter->port[i]) {
5607 kfree(adap2pinfo(adapter, i)->rss);
06546391 5608 free_netdev(adapter->port[i]);
671b0060 5609 }
06546391 5610 if (adapter->flags & FW_OK)
060e0c75 5611 t4_fw_bye(adapter, adapter->fn);
06546391
DM
5612}
5613
2ed28baa 5614#define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
35d35682 5615#define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \
b8ff05a9 5616 NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
22adfe0a 5617#define SEGMENT_SIZE 128
b8ff05a9 5618
1dd06ae8 5619static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
b8ff05a9 5620{
22adfe0a 5621 int func, i, err, s_qpp, qpp, num_seg;
b8ff05a9 5622 struct port_info *pi;
c8f44aff 5623 bool highdma = false;
b8ff05a9
DM
5624 struct adapter *adapter = NULL;
5625
5626 printk_once(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
5627
5628 err = pci_request_regions(pdev, KBUILD_MODNAME);
5629 if (err) {
5630 /* Just info, some other driver may have claimed the device. */
5631 dev_info(&pdev->dev, "cannot obtain PCI resources\n");
5632 return err;
5633 }
5634
060e0c75 5635 /* We control everything through one PF */
b8ff05a9 5636 func = PCI_FUNC(pdev->devfn);
060e0c75 5637 if (func != ent->driver_data) {
204dc3c0 5638 pci_save_state(pdev); /* to restore SR-IOV later */
b8ff05a9 5639 goto sriov;
204dc3c0 5640 }
b8ff05a9
DM
5641
5642 err = pci_enable_device(pdev);
5643 if (err) {
5644 dev_err(&pdev->dev, "cannot enable PCI device\n");
5645 goto out_release_regions;
5646 }
5647
5648 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
c8f44aff 5649 highdma = true;
b8ff05a9
DM
5650 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
5651 if (err) {
5652 dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
5653 "coherent allocations\n");
5654 goto out_disable_device;
5655 }
5656 } else {
5657 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
5658 if (err) {
5659 dev_err(&pdev->dev, "no usable DMA configuration\n");
5660 goto out_disable_device;
5661 }
5662 }
5663
5664 pci_enable_pcie_error_reporting(pdev);
ef306b50 5665 enable_pcie_relaxed_ordering(pdev);
b8ff05a9
DM
5666 pci_set_master(pdev);
5667 pci_save_state(pdev);
5668
5669 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
5670 if (!adapter) {
5671 err = -ENOMEM;
5672 goto out_disable_device;
5673 }
5674
5675 adapter->regs = pci_ioremap_bar(pdev, 0);
5676 if (!adapter->regs) {
5677 dev_err(&pdev->dev, "cannot map device registers\n");
5678 err = -ENOMEM;
5679 goto out_free_adapter;
5680 }
5681
5682 adapter->pdev = pdev;
5683 adapter->pdev_dev = &pdev->dev;
3069ee9b 5684 adapter->mbox = func;
060e0c75 5685 adapter->fn = func;
b8ff05a9
DM
5686 adapter->msg_enable = dflt_msg_enable;
5687 memset(adapter->chan_map, 0xff, sizeof(adapter->chan_map));
5688
5689 spin_lock_init(&adapter->stats_lock);
5690 spin_lock_init(&adapter->tid_release_lock);
5691
5692 INIT_WORK(&adapter->tid_release_task, process_tid_release_list);
881806bc
VP
5693 INIT_WORK(&adapter->db_full_task, process_db_full);
5694 INIT_WORK(&adapter->db_drop_task, process_db_drop);
b8ff05a9
DM
5695
5696 err = t4_prep_adapter(adapter);
5697 if (err)
22adfe0a
SR
5698 goto out_unmap_bar0;
5699
5700 if (!is_t4(adapter->chip)) {
5701 s_qpp = QUEUESPERPAGEPF1 * adapter->fn;
5702 qpp = 1 << QUEUESPERPAGEPF0_GET(t4_read_reg(adapter,
5703 SGE_EGRESS_QUEUES_PER_PAGE_PF) >> s_qpp);
5704 num_seg = PAGE_SIZE / SEGMENT_SIZE;
5705
5706 /* Each segment size is 128B. Write coalescing is enabled only
5707 * when SGE_EGRESS_QUEUES_PER_PAGE_PF reg value for the
5708 * queue is less no of segments that can be accommodated in
5709 * a page size.
5710 */
5711 if (qpp > num_seg) {
5712 dev_err(&pdev->dev,
5713 "Incorrect number of egress queues per page\n");
5714 err = -EINVAL;
5715 goto out_unmap_bar0;
5716 }
5717 adapter->bar2 = ioremap_wc(pci_resource_start(pdev, 2),
5718 pci_resource_len(pdev, 2));
5719 if (!adapter->bar2) {
5720 dev_err(&pdev->dev, "cannot map device bar2 region\n");
5721 err = -ENOMEM;
5722 goto out_unmap_bar0;
5723 }
5724 }
5725
636f9d37 5726 setup_memwin(adapter);
b8ff05a9 5727 err = adap_init0(adapter);
636f9d37 5728 setup_memwin_rdma(adapter);
b8ff05a9
DM
5729 if (err)
5730 goto out_unmap_bar;
5731
5732 for_each_port(adapter, i) {
5733 struct net_device *netdev;
5734
5735 netdev = alloc_etherdev_mq(sizeof(struct port_info),
5736 MAX_ETH_QSETS);
5737 if (!netdev) {
5738 err = -ENOMEM;
5739 goto out_free_dev;
5740 }
5741
5742 SET_NETDEV_DEV(netdev, &pdev->dev);
5743
5744 adapter->port[i] = netdev;
5745 pi = netdev_priv(netdev);
5746 pi->adapter = adapter;
5747 pi->xact_addr_filt = -1;
b8ff05a9 5748 pi->port_id = i;
b8ff05a9
DM
5749 netdev->irq = pdev->irq;
5750
2ed28baa
MM
5751 netdev->hw_features = NETIF_F_SG | TSO_FLAGS |
5752 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
5753 NETIF_F_RXCSUM | NETIF_F_RXHASH |
f646968f 5754 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
c8f44aff
MM
5755 if (highdma)
5756 netdev->hw_features |= NETIF_F_HIGHDMA;
5757 netdev->features |= netdev->hw_features;
b8ff05a9
DM
5758 netdev->vlan_features = netdev->features & VLAN_FEAT;
5759
01789349
JP
5760 netdev->priv_flags |= IFF_UNICAST_FLT;
5761
b8ff05a9
DM
5762 netdev->netdev_ops = &cxgb4_netdev_ops;
5763 SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops);
5764 }
5765
5766 pci_set_drvdata(pdev, adapter);
5767
5768 if (adapter->flags & FW_OK) {
060e0c75 5769 err = t4_port_init(adapter, func, func, 0);
b8ff05a9
DM
5770 if (err)
5771 goto out_free_dev;
5772 }
5773
5774 /*
5775 * Configure queues and allocate tables now, they can be needed as
5776 * soon as the first register_netdev completes.
5777 */
5778 cfg_queues(adapter);
5779
5780 adapter->l2t = t4_init_l2t();
5781 if (!adapter->l2t) {
5782 /* We tolerate a lack of L2T, giving up some functionality */
5783 dev_warn(&pdev->dev, "could not allocate L2T, continuing\n");
5784 adapter->params.offload = 0;
5785 }
5786
5787 if (is_offload(adapter) && tid_init(&adapter->tids) < 0) {
5788 dev_warn(&pdev->dev, "could not allocate TID table, "
5789 "continuing\n");
5790 adapter->params.offload = 0;
5791 }
5792
f7cabcdd
DM
5793 /* See what interrupts we'll be using */
5794 if (msi > 1 && enable_msix(adapter) == 0)
5795 adapter->flags |= USING_MSIX;
5796 else if (msi > 0 && pci_enable_msi(pdev) == 0)
5797 adapter->flags |= USING_MSI;
5798
671b0060
DM
5799 err = init_rss(adapter);
5800 if (err)
5801 goto out_free_dev;
5802
b8ff05a9
DM
5803 /*
5804 * The card is now ready to go. If any errors occur during device
5805 * registration we do not fail the whole card but rather proceed only
5806 * with the ports we manage to register successfully. However we must
5807 * register at least one net device.
5808 */
5809 for_each_port(adapter, i) {
a57cabe0
DM
5810 pi = adap2pinfo(adapter, i);
5811 netif_set_real_num_tx_queues(adapter->port[i], pi->nqsets);
5812 netif_set_real_num_rx_queues(adapter->port[i], pi->nqsets);
5813
b8ff05a9
DM
5814 err = register_netdev(adapter->port[i]);
5815 if (err)
b1a3c2b6 5816 break;
b1a3c2b6
DM
5817 adapter->chan_map[pi->tx_chan] = i;
5818 print_port_info(adapter->port[i]);
b8ff05a9 5819 }
b1a3c2b6 5820 if (i == 0) {
b8ff05a9
DM
5821 dev_err(&pdev->dev, "could not register any net devices\n");
5822 goto out_free_dev;
5823 }
b1a3c2b6
DM
5824 if (err) {
5825 dev_warn(&pdev->dev, "only %d net devices registered\n", i);
5826 err = 0;
6403eab1 5827 }
b8ff05a9
DM
5828
5829 if (cxgb4_debugfs_root) {
5830 adapter->debugfs_root = debugfs_create_dir(pci_name(pdev),
5831 cxgb4_debugfs_root);
5832 setup_debugfs(adapter);
5833 }
5834
6482aa7c
DLR
5835 /* PCIe EEH recovery on powerpc platforms needs fundamental reset */
5836 pdev->needs_freset = 1;
5837
b8ff05a9
DM
5838 if (is_offload(adapter))
5839 attach_ulds(adapter);
5840
b8ff05a9
DM
5841sriov:
5842#ifdef CONFIG_PCI_IOV
7d6727cf 5843 if (func < ARRAY_SIZE(num_vf) && num_vf[func] > 0)
b8ff05a9
DM
5844 if (pci_enable_sriov(pdev, num_vf[func]) == 0)
5845 dev_info(&pdev->dev,
5846 "instantiated %u virtual functions\n",
5847 num_vf[func]);
5848#endif
5849 return 0;
5850
5851 out_free_dev:
06546391 5852 free_some_resources(adapter);
b8ff05a9 5853 out_unmap_bar:
22adfe0a
SR
5854 if (!is_t4(adapter->chip))
5855 iounmap(adapter->bar2);
5856 out_unmap_bar0:
b8ff05a9
DM
5857 iounmap(adapter->regs);
5858 out_free_adapter:
5859 kfree(adapter);
5860 out_disable_device:
5861 pci_disable_pcie_error_reporting(pdev);
5862 pci_disable_device(pdev);
5863 out_release_regions:
5864 pci_release_regions(pdev);
5865 pci_set_drvdata(pdev, NULL);
5866 return err;
5867}
5868
91744948 5869static void remove_one(struct pci_dev *pdev)
b8ff05a9
DM
5870{
5871 struct adapter *adapter = pci_get_drvdata(pdev);
5872
636f9d37 5873#ifdef CONFIG_PCI_IOV
b8ff05a9
DM
5874 pci_disable_sriov(pdev);
5875
636f9d37
VP
5876#endif
5877
b8ff05a9
DM
5878 if (adapter) {
5879 int i;
5880
5881 if (is_offload(adapter))
5882 detach_ulds(adapter);
5883
5884 for_each_port(adapter, i)
8f3a7676 5885 if (adapter->port[i]->reg_state == NETREG_REGISTERED)
b8ff05a9
DM
5886 unregister_netdev(adapter->port[i]);
5887
5888 if (adapter->debugfs_root)
5889 debugfs_remove_recursive(adapter->debugfs_root);
5890
f2b7e78d
VP
5891 /* If we allocated filters, free up state associated with any
5892 * valid filters ...
5893 */
5894 if (adapter->tids.ftid_tab) {
5895 struct filter_entry *f = &adapter->tids.ftid_tab[0];
dca4faeb
VP
5896 for (i = 0; i < (adapter->tids.nftids +
5897 adapter->tids.nsftids); i++, f++)
f2b7e78d
VP
5898 if (f->valid)
5899 clear_filter(adapter, f);
5900 }
5901
aaefae9b
DM
5902 if (adapter->flags & FULL_INIT_DONE)
5903 cxgb_down(adapter);
b8ff05a9 5904
06546391 5905 free_some_resources(adapter);
b8ff05a9 5906 iounmap(adapter->regs);
22adfe0a
SR
5907 if (!is_t4(adapter->chip))
5908 iounmap(adapter->bar2);
b8ff05a9
DM
5909 kfree(adapter);
5910 pci_disable_pcie_error_reporting(pdev);
5911 pci_disable_device(pdev);
5912 pci_release_regions(pdev);
5913 pci_set_drvdata(pdev, NULL);
a069ec91 5914 } else
b8ff05a9
DM
5915 pci_release_regions(pdev);
5916}
5917
5918static struct pci_driver cxgb4_driver = {
5919 .name = KBUILD_MODNAME,
5920 .id_table = cxgb4_pci_tbl,
5921 .probe = init_one,
91744948 5922 .remove = remove_one,
204dc3c0 5923 .err_handler = &cxgb4_eeh,
b8ff05a9
DM
5924};
5925
5926static int __init cxgb4_init_module(void)
5927{
5928 int ret;
5929
3069ee9b
VP
5930 workq = create_singlethread_workqueue("cxgb4");
5931 if (!workq)
5932 return -ENOMEM;
5933
b8ff05a9
DM
5934 /* Debugfs support is optional, just warn if this fails */
5935 cxgb4_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
5936 if (!cxgb4_debugfs_root)
428ac43f 5937 pr_warn("could not create debugfs entry, continuing\n");
b8ff05a9
DM
5938
5939 ret = pci_register_driver(&cxgb4_driver);
5940 if (ret < 0)
5941 debugfs_remove(cxgb4_debugfs_root);
5942 return ret;
5943}
5944
5945static void __exit cxgb4_cleanup_module(void)
5946{
5947 pci_unregister_driver(&cxgb4_driver);
5948 debugfs_remove(cxgb4_debugfs_root); /* NULL ok */
3069ee9b
VP
5949 flush_workqueue(workq);
5950 destroy_workqueue(workq);
b8ff05a9
DM
5951}
5952
5953module_init(cxgb4_init_module);
5954module_exit(cxgb4_cleanup_module);