]> git.ipfire.org Git - thirdparty/linux.git/blame - drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
cxgb4: Common platform specific changes for DB Drop Recovery
[thirdparty/linux.git] / drivers / net / ethernet / chelsio / cxgb4 / cxgb4_main.c
CommitLineData
b8ff05a9
DM
1/*
2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
3 *
4 * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
36
37#include <linux/bitmap.h>
38#include <linux/crc32.h>
39#include <linux/ctype.h>
40#include <linux/debugfs.h>
41#include <linux/err.h>
42#include <linux/etherdevice.h>
43#include <linux/firmware.h>
01789349 44#include <linux/if.h>
b8ff05a9
DM
45#include <linux/if_vlan.h>
46#include <linux/init.h>
47#include <linux/log2.h>
48#include <linux/mdio.h>
49#include <linux/module.h>
50#include <linux/moduleparam.h>
51#include <linux/mutex.h>
52#include <linux/netdevice.h>
53#include <linux/pci.h>
54#include <linux/aer.h>
55#include <linux/rtnetlink.h>
56#include <linux/sched.h>
57#include <linux/seq_file.h>
58#include <linux/sockios.h>
59#include <linux/vmalloc.h>
60#include <linux/workqueue.h>
61#include <net/neighbour.h>
62#include <net/netevent.h>
63#include <asm/uaccess.h>
64
65#include "cxgb4.h"
66#include "t4_regs.h"
67#include "t4_msg.h"
68#include "t4fw_api.h"
69#include "l2t.h"
70
99e6d065 71#define DRV_VERSION "1.3.0-ko"
b8ff05a9
DM
72#define DRV_DESC "Chelsio T4 Network Driver"
73
74/*
75 * Max interrupt hold-off timer value in us. Queues fall back to this value
76 * under extreme memory pressure so it's largish to give the system time to
77 * recover.
78 */
79#define MAX_SGE_TIMERVAL 200U
80
7ee9ff94
CL
81#ifdef CONFIG_PCI_IOV
82/*
83 * Virtual Function provisioning constants. We need two extra Ingress Queues
84 * with Interrupt capability to serve as the VF's Firmware Event Queue and
85 * Forwarded Interrupt Queue (when using MSI mode) -- neither will have Free
86 * Lists associated with them). For each Ethernet/Control Egress Queue and
87 * for each Free List, we need an Egress Context.
88 */
89enum {
90 VFRES_NPORTS = 1, /* # of "ports" per VF */
91 VFRES_NQSETS = 2, /* # of "Queue Sets" per VF */
92
93 VFRES_NVI = VFRES_NPORTS, /* # of Virtual Interfaces */
94 VFRES_NETHCTRL = VFRES_NQSETS, /* # of EQs used for ETH or CTRL Qs */
95 VFRES_NIQFLINT = VFRES_NQSETS+2,/* # of ingress Qs/w Free List(s)/intr */
96 VFRES_NIQ = 0, /* # of non-fl/int ingress queues */
97 VFRES_NEQ = VFRES_NQSETS*2, /* # of egress queues */
98 VFRES_TC = 0, /* PCI-E traffic class */
99 VFRES_NEXACTF = 16, /* # of exact MPS filters */
100
101 VFRES_R_CAPS = FW_CMD_CAP_DMAQ|FW_CMD_CAP_VF|FW_CMD_CAP_PORT,
102 VFRES_WX_CAPS = FW_CMD_CAP_DMAQ|FW_CMD_CAP_VF,
103};
104
105/*
106 * Provide a Port Access Rights Mask for the specified PF/VF. This is very
107 * static and likely not to be useful in the long run. We really need to
108 * implement some form of persistent configuration which the firmware
109 * controls.
110 */
111static unsigned int pfvfres_pmask(struct adapter *adapter,
112 unsigned int pf, unsigned int vf)
113{
114 unsigned int portn, portvec;
115
116 /*
117 * Give PF's access to all of the ports.
118 */
119 if (vf == 0)
120 return FW_PFVF_CMD_PMASK_MASK;
121
122 /*
123 * For VFs, we'll assign them access to the ports based purely on the
124 * PF. We assign active ports in order, wrapping around if there are
125 * fewer active ports than PFs: e.g. active port[pf % nports].
126 * Unfortunately the adapter's port_info structs haven't been
127 * initialized yet so we have to compute this.
128 */
129 if (adapter->params.nports == 0)
130 return 0;
131
132 portn = pf % adapter->params.nports;
133 portvec = adapter->params.portvec;
134 for (;;) {
135 /*
136 * Isolate the lowest set bit in the port vector. If we're at
137 * the port number that we want, return that as the pmask.
138 * otherwise mask that bit out of the port vector and
139 * decrement our port number ...
140 */
141 unsigned int pmask = portvec ^ (portvec & (portvec-1));
142 if (portn == 0)
143 return pmask;
144 portn--;
145 portvec &= ~pmask;
146 }
147 /*NOTREACHED*/
148}
149#endif
150
b8ff05a9
DM
151enum {
152 MEMWIN0_APERTURE = 65536,
153 MEMWIN0_BASE = 0x30000,
154 MEMWIN1_APERTURE = 32768,
155 MEMWIN1_BASE = 0x28000,
156 MEMWIN2_APERTURE = 2048,
157 MEMWIN2_BASE = 0x1b800,
158};
159
160enum {
161 MAX_TXQ_ENTRIES = 16384,
162 MAX_CTRL_TXQ_ENTRIES = 1024,
163 MAX_RSPQ_ENTRIES = 16384,
164 MAX_RX_BUFFERS = 16384,
165 MIN_TXQ_ENTRIES = 32,
166 MIN_CTRL_TXQ_ENTRIES = 32,
167 MIN_RSPQ_ENTRIES = 128,
168 MIN_FL_ENTRIES = 16
169};
170
171#define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
172 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
173 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
174
060e0c75 175#define CH_DEVICE(devid, data) { PCI_VDEVICE(CHELSIO, devid), (data) }
b8ff05a9
DM
176
177static DEFINE_PCI_DEVICE_TABLE(cxgb4_pci_tbl) = {
060e0c75 178 CH_DEVICE(0xa000, 0), /* PE10K */
ccea790e
DM
179 CH_DEVICE(0x4001, -1),
180 CH_DEVICE(0x4002, -1),
181 CH_DEVICE(0x4003, -1),
182 CH_DEVICE(0x4004, -1),
183 CH_DEVICE(0x4005, -1),
184 CH_DEVICE(0x4006, -1),
185 CH_DEVICE(0x4007, -1),
186 CH_DEVICE(0x4008, -1),
187 CH_DEVICE(0x4009, -1),
188 CH_DEVICE(0x400a, -1),
189 CH_DEVICE(0x4401, 4),
190 CH_DEVICE(0x4402, 4),
191 CH_DEVICE(0x4403, 4),
192 CH_DEVICE(0x4404, 4),
193 CH_DEVICE(0x4405, 4),
194 CH_DEVICE(0x4406, 4),
195 CH_DEVICE(0x4407, 4),
196 CH_DEVICE(0x4408, 4),
197 CH_DEVICE(0x4409, 4),
198 CH_DEVICE(0x440a, 4),
f637d577
VP
199 CH_DEVICE(0x440d, 4),
200 CH_DEVICE(0x440e, 4),
b8ff05a9
DM
201 { 0, }
202};
203
204#define FW_FNAME "cxgb4/t4fw.bin"
205
206MODULE_DESCRIPTION(DRV_DESC);
207MODULE_AUTHOR("Chelsio Communications");
208MODULE_LICENSE("Dual BSD/GPL");
209MODULE_VERSION(DRV_VERSION);
210MODULE_DEVICE_TABLE(pci, cxgb4_pci_tbl);
211MODULE_FIRMWARE(FW_FNAME);
212
213static int dflt_msg_enable = DFLT_MSG_ENABLE;
214
215module_param(dflt_msg_enable, int, 0644);
216MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T4 default message enable bitmap");
217
218/*
219 * The driver uses the best interrupt scheme available on a platform in the
220 * order MSI-X, MSI, legacy INTx interrupts. This parameter determines which
221 * of these schemes the driver may consider as follows:
222 *
223 * msi = 2: choose from among all three options
224 * msi = 1: only consider MSI and INTx interrupts
225 * msi = 0: force INTx interrupts
226 */
227static int msi = 2;
228
229module_param(msi, int, 0644);
230MODULE_PARM_DESC(msi, "whether to use INTx (0), MSI (1) or MSI-X (2)");
231
232/*
233 * Queue interrupt hold-off timer values. Queues default to the first of these
234 * upon creation.
235 */
236static unsigned int intr_holdoff[SGE_NTIMERS - 1] = { 5, 10, 20, 50, 100 };
237
238module_param_array(intr_holdoff, uint, NULL, 0644);
239MODULE_PARM_DESC(intr_holdoff, "values for queue interrupt hold-off timers "
240 "0..4 in microseconds");
241
242static unsigned int intr_cnt[SGE_NCOUNTERS - 1] = { 4, 8, 16 };
243
244module_param_array(intr_cnt, uint, NULL, 0644);
245MODULE_PARM_DESC(intr_cnt,
246 "thresholds 1..3 for queue interrupt packet counters");
247
eb939922 248static bool vf_acls;
b8ff05a9
DM
249
250#ifdef CONFIG_PCI_IOV
251module_param(vf_acls, bool, 0644);
252MODULE_PARM_DESC(vf_acls, "if set enable virtualization L2 ACL enforcement");
253
254static unsigned int num_vf[4];
255
256module_param_array(num_vf, uint, NULL, 0644);
257MODULE_PARM_DESC(num_vf, "number of VFs for each of PFs 0-3");
258#endif
259
260static struct dentry *cxgb4_debugfs_root;
261
262static LIST_HEAD(adapter_list);
263static DEFINE_MUTEX(uld_mutex);
264static struct cxgb4_uld_info ulds[CXGB4_ULD_MAX];
265static const char *uld_str[] = { "RDMA", "iSCSI" };
266
267static void link_report(struct net_device *dev)
268{
269 if (!netif_carrier_ok(dev))
270 netdev_info(dev, "link down\n");
271 else {
272 static const char *fc[] = { "no", "Rx", "Tx", "Tx/Rx" };
273
274 const char *s = "10Mbps";
275 const struct port_info *p = netdev_priv(dev);
276
277 switch (p->link_cfg.speed) {
278 case SPEED_10000:
279 s = "10Gbps";
280 break;
281 case SPEED_1000:
282 s = "1000Mbps";
283 break;
284 case SPEED_100:
285 s = "100Mbps";
286 break;
287 }
288
289 netdev_info(dev, "link up, %s, full-duplex, %s PAUSE\n", s,
290 fc[p->link_cfg.fc]);
291 }
292}
293
294void t4_os_link_changed(struct adapter *adapter, int port_id, int link_stat)
295{
296 struct net_device *dev = adapter->port[port_id];
297
298 /* Skip changes from disabled ports. */
299 if (netif_running(dev) && link_stat != netif_carrier_ok(dev)) {
300 if (link_stat)
301 netif_carrier_on(dev);
302 else
303 netif_carrier_off(dev);
304
305 link_report(dev);
306 }
307}
308
309void t4_os_portmod_changed(const struct adapter *adap, int port_id)
310{
311 static const char *mod_str[] = {
a0881cab 312 NULL, "LR", "SR", "ER", "passive DA", "active DA", "LRM"
b8ff05a9
DM
313 };
314
315 const struct net_device *dev = adap->port[port_id];
316 const struct port_info *pi = netdev_priv(dev);
317
318 if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
319 netdev_info(dev, "port module unplugged\n");
a0881cab 320 else if (pi->mod_type < ARRAY_SIZE(mod_str))
b8ff05a9
DM
321 netdev_info(dev, "%s module inserted\n", mod_str[pi->mod_type]);
322}
323
324/*
325 * Configure the exact and hash address filters to handle a port's multicast
326 * and secondary unicast MAC addresses.
327 */
328static int set_addr_filters(const struct net_device *dev, bool sleep)
329{
330 u64 mhash = 0;
331 u64 uhash = 0;
332 bool free = true;
333 u16 filt_idx[7];
334 const u8 *addr[7];
335 int ret, naddr = 0;
b8ff05a9
DM
336 const struct netdev_hw_addr *ha;
337 int uc_cnt = netdev_uc_count(dev);
4a35ecf8 338 int mc_cnt = netdev_mc_count(dev);
b8ff05a9 339 const struct port_info *pi = netdev_priv(dev);
060e0c75 340 unsigned int mb = pi->adapter->fn;
b8ff05a9
DM
341
342 /* first do the secondary unicast addresses */
343 netdev_for_each_uc_addr(ha, dev) {
344 addr[naddr++] = ha->addr;
345 if (--uc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) {
060e0c75 346 ret = t4_alloc_mac_filt(pi->adapter, mb, pi->viid, free,
b8ff05a9
DM
347 naddr, addr, filt_idx, &uhash, sleep);
348 if (ret < 0)
349 return ret;
350
351 free = false;
352 naddr = 0;
353 }
354 }
355
356 /* next set up the multicast addresses */
4a35ecf8
DM
357 netdev_for_each_mc_addr(ha, dev) {
358 addr[naddr++] = ha->addr;
359 if (--mc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) {
060e0c75 360 ret = t4_alloc_mac_filt(pi->adapter, mb, pi->viid, free,
b8ff05a9
DM
361 naddr, addr, filt_idx, &mhash, sleep);
362 if (ret < 0)
363 return ret;
364
365 free = false;
366 naddr = 0;
367 }
368 }
369
060e0c75 370 return t4_set_addr_hash(pi->adapter, mb, pi->viid, uhash != 0,
b8ff05a9
DM
371 uhash | mhash, sleep);
372}
373
374/*
375 * Set Rx properties of a port, such as promiscruity, address filters, and MTU.
376 * If @mtu is -1 it is left unchanged.
377 */
378static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok)
379{
380 int ret;
381 struct port_info *pi = netdev_priv(dev);
382
383 ret = set_addr_filters(dev, sleep_ok);
384 if (ret == 0)
060e0c75 385 ret = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, mtu,
b8ff05a9 386 (dev->flags & IFF_PROMISC) ? 1 : 0,
f8f5aafa 387 (dev->flags & IFF_ALLMULTI) ? 1 : 0, 1, -1,
b8ff05a9
DM
388 sleep_ok);
389 return ret;
390}
391
392/**
393 * link_start - enable a port
394 * @dev: the port to enable
395 *
396 * Performs the MAC and PHY actions needed to enable a port.
397 */
398static int link_start(struct net_device *dev)
399{
400 int ret;
401 struct port_info *pi = netdev_priv(dev);
060e0c75 402 unsigned int mb = pi->adapter->fn;
b8ff05a9
DM
403
404 /*
405 * We do not set address filters and promiscuity here, the stack does
406 * that step explicitly.
407 */
060e0c75 408 ret = t4_set_rxmode(pi->adapter, mb, pi->viid, dev->mtu, -1, -1, -1,
19ecae2c 409 !!(dev->features & NETIF_F_HW_VLAN_RX), true);
b8ff05a9 410 if (ret == 0) {
060e0c75 411 ret = t4_change_mac(pi->adapter, mb, pi->viid,
b8ff05a9 412 pi->xact_addr_filt, dev->dev_addr, true,
b6bd29e7 413 true);
b8ff05a9
DM
414 if (ret >= 0) {
415 pi->xact_addr_filt = ret;
416 ret = 0;
417 }
418 }
419 if (ret == 0)
060e0c75
DM
420 ret = t4_link_start(pi->adapter, mb, pi->tx_chan,
421 &pi->link_cfg);
b8ff05a9 422 if (ret == 0)
060e0c75 423 ret = t4_enable_vi(pi->adapter, mb, pi->viid, true, true);
b8ff05a9
DM
424 return ret;
425}
426
427/*
428 * Response queue handler for the FW event queue.
429 */
430static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,
431 const struct pkt_gl *gl)
432{
433 u8 opcode = ((const struct rss_header *)rsp)->opcode;
434
435 rsp++; /* skip RSS header */
436 if (likely(opcode == CPL_SGE_EGR_UPDATE)) {
437 const struct cpl_sge_egr_update *p = (void *)rsp;
438 unsigned int qid = EGR_QID(ntohl(p->opcode_qid));
e46dab4d 439 struct sge_txq *txq;
b8ff05a9 440
e46dab4d 441 txq = q->adap->sge.egr_map[qid - q->adap->sge.egr_start];
b8ff05a9 442 txq->restarts++;
e46dab4d 443 if ((u8 *)txq < (u8 *)q->adap->sge.ofldtxq) {
b8ff05a9
DM
444 struct sge_eth_txq *eq;
445
446 eq = container_of(txq, struct sge_eth_txq, q);
447 netif_tx_wake_queue(eq->txq);
448 } else {
449 struct sge_ofld_txq *oq;
450
451 oq = container_of(txq, struct sge_ofld_txq, q);
452 tasklet_schedule(&oq->qresume_tsk);
453 }
454 } else if (opcode == CPL_FW6_MSG || opcode == CPL_FW4_MSG) {
455 const struct cpl_fw6_msg *p = (void *)rsp;
456
457 if (p->type == 0)
458 t4_handle_fw_rpl(q->adap, p->data);
459 } else if (opcode == CPL_L2T_WRITE_RPL) {
460 const struct cpl_l2t_write_rpl *p = (void *)rsp;
461
462 do_l2t_write_rpl(q->adap, p);
463 } else
464 dev_err(q->adap->pdev_dev,
465 "unexpected CPL %#x on FW event queue\n", opcode);
466 return 0;
467}
468
469/**
470 * uldrx_handler - response queue handler for ULD queues
471 * @q: the response queue that received the packet
472 * @rsp: the response queue descriptor holding the offload message
473 * @gl: the gather list of packet fragments
474 *
475 * Deliver an ingress offload packet to a ULD. All processing is done by
476 * the ULD, we just maintain statistics.
477 */
478static int uldrx_handler(struct sge_rspq *q, const __be64 *rsp,
479 const struct pkt_gl *gl)
480{
481 struct sge_ofld_rxq *rxq = container_of(q, struct sge_ofld_rxq, rspq);
482
483 if (ulds[q->uld].rx_handler(q->adap->uld_handle[q->uld], rsp, gl)) {
484 rxq->stats.nomem++;
485 return -1;
486 }
487 if (gl == NULL)
488 rxq->stats.imm++;
489 else if (gl == CXGB4_MSG_AN)
490 rxq->stats.an++;
491 else
492 rxq->stats.pkts++;
493 return 0;
494}
495
496static void disable_msi(struct adapter *adapter)
497{
498 if (adapter->flags & USING_MSIX) {
499 pci_disable_msix(adapter->pdev);
500 adapter->flags &= ~USING_MSIX;
501 } else if (adapter->flags & USING_MSI) {
502 pci_disable_msi(adapter->pdev);
503 adapter->flags &= ~USING_MSI;
504 }
505}
506
507/*
508 * Interrupt handler for non-data events used with MSI-X.
509 */
510static irqreturn_t t4_nondata_intr(int irq, void *cookie)
511{
512 struct adapter *adap = cookie;
513
514 u32 v = t4_read_reg(adap, MYPF_REG(PL_PF_INT_CAUSE));
515 if (v & PFSW) {
516 adap->swintr = 1;
517 t4_write_reg(adap, MYPF_REG(PL_PF_INT_CAUSE), v);
518 }
519 t4_slow_intr_handler(adap);
520 return IRQ_HANDLED;
521}
522
523/*
524 * Name the MSI-X interrupts.
525 */
526static void name_msix_vecs(struct adapter *adap)
527{
ba27816c 528 int i, j, msi_idx = 2, n = sizeof(adap->msix_info[0].desc);
b8ff05a9
DM
529
530 /* non-data interrupts */
b1a3c2b6 531 snprintf(adap->msix_info[0].desc, n, "%s", adap->port[0]->name);
b8ff05a9
DM
532
533 /* FW events */
b1a3c2b6
DM
534 snprintf(adap->msix_info[1].desc, n, "%s-FWeventq",
535 adap->port[0]->name);
b8ff05a9
DM
536
537 /* Ethernet queues */
538 for_each_port(adap, j) {
539 struct net_device *d = adap->port[j];
540 const struct port_info *pi = netdev_priv(d);
541
ba27816c 542 for (i = 0; i < pi->nqsets; i++, msi_idx++)
b8ff05a9
DM
543 snprintf(adap->msix_info[msi_idx].desc, n, "%s-Rx%d",
544 d->name, i);
b8ff05a9
DM
545 }
546
547 /* offload queues */
ba27816c
DM
548 for_each_ofldrxq(&adap->sge, i)
549 snprintf(adap->msix_info[msi_idx++].desc, n, "%s-ofld%d",
b1a3c2b6 550 adap->port[0]->name, i);
ba27816c
DM
551
552 for_each_rdmarxq(&adap->sge, i)
553 snprintf(adap->msix_info[msi_idx++].desc, n, "%s-rdma%d",
b1a3c2b6 554 adap->port[0]->name, i);
b8ff05a9
DM
555}
556
557static int request_msix_queue_irqs(struct adapter *adap)
558{
559 struct sge *s = &adap->sge;
560 int err, ethqidx, ofldqidx = 0, rdmaqidx = 0, msi = 2;
561
562 err = request_irq(adap->msix_info[1].vec, t4_sge_intr_msix, 0,
563 adap->msix_info[1].desc, &s->fw_evtq);
564 if (err)
565 return err;
566
567 for_each_ethrxq(s, ethqidx) {
568 err = request_irq(adap->msix_info[msi].vec, t4_sge_intr_msix, 0,
569 adap->msix_info[msi].desc,
570 &s->ethrxq[ethqidx].rspq);
571 if (err)
572 goto unwind;
573 msi++;
574 }
575 for_each_ofldrxq(s, ofldqidx) {
576 err = request_irq(adap->msix_info[msi].vec, t4_sge_intr_msix, 0,
577 adap->msix_info[msi].desc,
578 &s->ofldrxq[ofldqidx].rspq);
579 if (err)
580 goto unwind;
581 msi++;
582 }
583 for_each_rdmarxq(s, rdmaqidx) {
584 err = request_irq(adap->msix_info[msi].vec, t4_sge_intr_msix, 0,
585 adap->msix_info[msi].desc,
586 &s->rdmarxq[rdmaqidx].rspq);
587 if (err)
588 goto unwind;
589 msi++;
590 }
591 return 0;
592
593unwind:
594 while (--rdmaqidx >= 0)
595 free_irq(adap->msix_info[--msi].vec,
596 &s->rdmarxq[rdmaqidx].rspq);
597 while (--ofldqidx >= 0)
598 free_irq(adap->msix_info[--msi].vec,
599 &s->ofldrxq[ofldqidx].rspq);
600 while (--ethqidx >= 0)
601 free_irq(adap->msix_info[--msi].vec, &s->ethrxq[ethqidx].rspq);
602 free_irq(adap->msix_info[1].vec, &s->fw_evtq);
603 return err;
604}
605
606static void free_msix_queue_irqs(struct adapter *adap)
607{
608 int i, msi = 2;
609 struct sge *s = &adap->sge;
610
611 free_irq(adap->msix_info[1].vec, &s->fw_evtq);
612 for_each_ethrxq(s, i)
613 free_irq(adap->msix_info[msi++].vec, &s->ethrxq[i].rspq);
614 for_each_ofldrxq(s, i)
615 free_irq(adap->msix_info[msi++].vec, &s->ofldrxq[i].rspq);
616 for_each_rdmarxq(s, i)
617 free_irq(adap->msix_info[msi++].vec, &s->rdmarxq[i].rspq);
618}
619
671b0060
DM
620/**
621 * write_rss - write the RSS table for a given port
622 * @pi: the port
623 * @queues: array of queue indices for RSS
624 *
625 * Sets up the portion of the HW RSS table for the port's VI to distribute
626 * packets to the Rx queues in @queues.
627 */
628static int write_rss(const struct port_info *pi, const u16 *queues)
629{
630 u16 *rss;
631 int i, err;
632 const struct sge_eth_rxq *q = &pi->adapter->sge.ethrxq[pi->first_qset];
633
634 rss = kmalloc(pi->rss_size * sizeof(u16), GFP_KERNEL);
635 if (!rss)
636 return -ENOMEM;
637
638 /* map the queue indices to queue ids */
639 for (i = 0; i < pi->rss_size; i++, queues++)
640 rss[i] = q[*queues].rspq.abs_id;
641
060e0c75
DM
642 err = t4_config_rss_range(pi->adapter, pi->adapter->fn, pi->viid, 0,
643 pi->rss_size, rss, pi->rss_size);
671b0060
DM
644 kfree(rss);
645 return err;
646}
647
b8ff05a9
DM
648/**
649 * setup_rss - configure RSS
650 * @adap: the adapter
651 *
671b0060 652 * Sets up RSS for each port.
b8ff05a9
DM
653 */
654static int setup_rss(struct adapter *adap)
655{
671b0060 656 int i, err;
b8ff05a9
DM
657
658 for_each_port(adap, i) {
659 const struct port_info *pi = adap2pinfo(adap, i);
b8ff05a9 660
671b0060 661 err = write_rss(pi, pi->rss);
b8ff05a9
DM
662 if (err)
663 return err;
664 }
665 return 0;
666}
667
e46dab4d
DM
668/*
669 * Return the channel of the ingress queue with the given qid.
670 */
671static unsigned int rxq_to_chan(const struct sge *p, unsigned int qid)
672{
673 qid -= p->ingr_start;
674 return netdev2pinfo(p->ingr_map[qid]->netdev)->tx_chan;
675}
676
b8ff05a9
DM
677/*
678 * Wait until all NAPI handlers are descheduled.
679 */
680static void quiesce_rx(struct adapter *adap)
681{
682 int i;
683
684 for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) {
685 struct sge_rspq *q = adap->sge.ingr_map[i];
686
687 if (q && q->handler)
688 napi_disable(&q->napi);
689 }
690}
691
692/*
693 * Enable NAPI scheduling and interrupt generation for all Rx queues.
694 */
695static void enable_rx(struct adapter *adap)
696{
697 int i;
698
699 for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) {
700 struct sge_rspq *q = adap->sge.ingr_map[i];
701
702 if (!q)
703 continue;
704 if (q->handler)
705 napi_enable(&q->napi);
706 /* 0-increment GTS to start the timer and enable interrupts */
707 t4_write_reg(adap, MYPF_REG(SGE_PF_GTS),
708 SEINTARM(q->intr_params) |
709 INGRESSQID(q->cntxt_id));
710 }
711}
712
713/**
714 * setup_sge_queues - configure SGE Tx/Rx/response queues
715 * @adap: the adapter
716 *
717 * Determines how many sets of SGE queues to use and initializes them.
718 * We support multiple queue sets per port if we have MSI-X, otherwise
719 * just one queue set per port.
720 */
721static int setup_sge_queues(struct adapter *adap)
722{
723 int err, msi_idx, i, j;
724 struct sge *s = &adap->sge;
725
726 bitmap_zero(s->starving_fl, MAX_EGRQ);
727 bitmap_zero(s->txq_maperr, MAX_EGRQ);
728
729 if (adap->flags & USING_MSIX)
730 msi_idx = 1; /* vector 0 is for non-queue interrupts */
731 else {
732 err = t4_sge_alloc_rxq(adap, &s->intrq, false, adap->port[0], 0,
733 NULL, NULL);
734 if (err)
735 return err;
736 msi_idx = -((int)s->intrq.abs_id + 1);
737 }
738
739 err = t4_sge_alloc_rxq(adap, &s->fw_evtq, true, adap->port[0],
740 msi_idx, NULL, fwevtq_handler);
741 if (err) {
742freeout: t4_free_sge_resources(adap);
743 return err;
744 }
745
746 for_each_port(adap, i) {
747 struct net_device *dev = adap->port[i];
748 struct port_info *pi = netdev_priv(dev);
749 struct sge_eth_rxq *q = &s->ethrxq[pi->first_qset];
750 struct sge_eth_txq *t = &s->ethtxq[pi->first_qset];
751
752 for (j = 0; j < pi->nqsets; j++, q++) {
753 if (msi_idx > 0)
754 msi_idx++;
755 err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev,
756 msi_idx, &q->fl,
757 t4_ethrx_handler);
758 if (err)
759 goto freeout;
760 q->rspq.idx = j;
761 memset(&q->stats, 0, sizeof(q->stats));
762 }
763 for (j = 0; j < pi->nqsets; j++, t++) {
764 err = t4_sge_alloc_eth_txq(adap, t, dev,
765 netdev_get_tx_queue(dev, j),
766 s->fw_evtq.cntxt_id);
767 if (err)
768 goto freeout;
769 }
770 }
771
772 j = s->ofldqsets / adap->params.nports; /* ofld queues per channel */
773 for_each_ofldrxq(s, i) {
774 struct sge_ofld_rxq *q = &s->ofldrxq[i];
775 struct net_device *dev = adap->port[i / j];
776
777 if (msi_idx > 0)
778 msi_idx++;
779 err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev, msi_idx,
780 &q->fl, uldrx_handler);
781 if (err)
782 goto freeout;
783 memset(&q->stats, 0, sizeof(q->stats));
784 s->ofld_rxq[i] = q->rspq.abs_id;
785 err = t4_sge_alloc_ofld_txq(adap, &s->ofldtxq[i], dev,
786 s->fw_evtq.cntxt_id);
787 if (err)
788 goto freeout;
789 }
790
791 for_each_rdmarxq(s, i) {
792 struct sge_ofld_rxq *q = &s->rdmarxq[i];
793
794 if (msi_idx > 0)
795 msi_idx++;
796 err = t4_sge_alloc_rxq(adap, &q->rspq, false, adap->port[i],
797 msi_idx, &q->fl, uldrx_handler);
798 if (err)
799 goto freeout;
800 memset(&q->stats, 0, sizeof(q->stats));
801 s->rdma_rxq[i] = q->rspq.abs_id;
802 }
803
804 for_each_port(adap, i) {
805 /*
806 * Note that ->rdmarxq[i].rspq.cntxt_id below is 0 if we don't
807 * have RDMA queues, and that's the right value.
808 */
809 err = t4_sge_alloc_ctrl_txq(adap, &s->ctrlq[i], adap->port[i],
810 s->fw_evtq.cntxt_id,
811 s->rdmarxq[i].rspq.cntxt_id);
812 if (err)
813 goto freeout;
814 }
815
816 t4_write_reg(adap, MPS_TRC_RSS_CONTROL,
817 RSSCONTROL(netdev2pinfo(adap->port[0])->tx_chan) |
818 QUEUENUMBER(s->ethrxq[0].rspq.abs_id));
819 return 0;
820}
821
822/*
823 * Returns 0 if new FW was successfully loaded, a positive errno if a load was
824 * started but failed, and a negative errno if flash load couldn't start.
825 */
826static int upgrade_fw(struct adapter *adap)
827{
828 int ret;
829 u32 vers;
830 const struct fw_hdr *hdr;
831 const struct firmware *fw;
832 struct device *dev = adap->pdev_dev;
833
834 ret = request_firmware(&fw, FW_FNAME, dev);
835 if (ret < 0) {
836 dev_err(dev, "unable to load firmware image " FW_FNAME
837 ", error %d\n", ret);
838 return ret;
839 }
840
841 hdr = (const struct fw_hdr *)fw->data;
842 vers = ntohl(hdr->fw_ver);
843 if (FW_HDR_FW_VER_MAJOR_GET(vers) != FW_VERSION_MAJOR) {
844 ret = -EINVAL; /* wrong major version, won't do */
845 goto out;
846 }
847
848 /*
849 * If the flash FW is unusable or we found something newer, load it.
850 */
851 if (FW_HDR_FW_VER_MAJOR_GET(adap->params.fw_vers) != FW_VERSION_MAJOR ||
852 vers > adap->params.fw_vers) {
853 ret = -t4_load_fw(adap, fw->data, fw->size);
854 if (!ret)
855 dev_info(dev, "firmware upgraded to version %pI4 from "
856 FW_FNAME "\n", &hdr->fw_ver);
857 }
858out: release_firmware(fw);
859 return ret;
860}
861
862/*
863 * Allocate a chunk of memory using kmalloc or, if that fails, vmalloc.
864 * The allocated memory is cleared.
865 */
866void *t4_alloc_mem(size_t size)
867{
89bf67f1 868 void *p = kzalloc(size, GFP_KERNEL);
b8ff05a9
DM
869
870 if (!p)
89bf67f1 871 p = vzalloc(size);
b8ff05a9
DM
872 return p;
873}
874
875/*
876 * Free memory allocated through alloc_mem().
877 */
31b9c19b 878static void t4_free_mem(void *addr)
b8ff05a9
DM
879{
880 if (is_vmalloc_addr(addr))
881 vfree(addr);
882 else
883 kfree(addr);
884}
885
886static inline int is_offload(const struct adapter *adap)
887{
888 return adap->params.offload;
889}
890
891/*
892 * Implementation of ethtool operations.
893 */
894
895static u32 get_msglevel(struct net_device *dev)
896{
897 return netdev2adap(dev)->msg_enable;
898}
899
900static void set_msglevel(struct net_device *dev, u32 val)
901{
902 netdev2adap(dev)->msg_enable = val;
903}
904
905static char stats_strings[][ETH_GSTRING_LEN] = {
906 "TxOctetsOK ",
907 "TxFramesOK ",
908 "TxBroadcastFrames ",
909 "TxMulticastFrames ",
910 "TxUnicastFrames ",
911 "TxErrorFrames ",
912
913 "TxFrames64 ",
914 "TxFrames65To127 ",
915 "TxFrames128To255 ",
916 "TxFrames256To511 ",
917 "TxFrames512To1023 ",
918 "TxFrames1024To1518 ",
919 "TxFrames1519ToMax ",
920
921 "TxFramesDropped ",
922 "TxPauseFrames ",
923 "TxPPP0Frames ",
924 "TxPPP1Frames ",
925 "TxPPP2Frames ",
926 "TxPPP3Frames ",
927 "TxPPP4Frames ",
928 "TxPPP5Frames ",
929 "TxPPP6Frames ",
930 "TxPPP7Frames ",
931
932 "RxOctetsOK ",
933 "RxFramesOK ",
934 "RxBroadcastFrames ",
935 "RxMulticastFrames ",
936 "RxUnicastFrames ",
937
938 "RxFramesTooLong ",
939 "RxJabberErrors ",
940 "RxFCSErrors ",
941 "RxLengthErrors ",
942 "RxSymbolErrors ",
943 "RxRuntFrames ",
944
945 "RxFrames64 ",
946 "RxFrames65To127 ",
947 "RxFrames128To255 ",
948 "RxFrames256To511 ",
949 "RxFrames512To1023 ",
950 "RxFrames1024To1518 ",
951 "RxFrames1519ToMax ",
952
953 "RxPauseFrames ",
954 "RxPPP0Frames ",
955 "RxPPP1Frames ",
956 "RxPPP2Frames ",
957 "RxPPP3Frames ",
958 "RxPPP4Frames ",
959 "RxPPP5Frames ",
960 "RxPPP6Frames ",
961 "RxPPP7Frames ",
962
963 "RxBG0FramesDropped ",
964 "RxBG1FramesDropped ",
965 "RxBG2FramesDropped ",
966 "RxBG3FramesDropped ",
967 "RxBG0FramesTrunc ",
968 "RxBG1FramesTrunc ",
969 "RxBG2FramesTrunc ",
970 "RxBG3FramesTrunc ",
971
972 "TSO ",
973 "TxCsumOffload ",
974 "RxCsumGood ",
975 "VLANextractions ",
976 "VLANinsertions ",
4a6346d4
DM
977 "GROpackets ",
978 "GROmerged ",
b8ff05a9
DM
979};
980
981static int get_sset_count(struct net_device *dev, int sset)
982{
983 switch (sset) {
984 case ETH_SS_STATS:
985 return ARRAY_SIZE(stats_strings);
986 default:
987 return -EOPNOTSUPP;
988 }
989}
990
991#define T4_REGMAP_SIZE (160 * 1024)
992
993static int get_regs_len(struct net_device *dev)
994{
995 return T4_REGMAP_SIZE;
996}
997
998static int get_eeprom_len(struct net_device *dev)
999{
1000 return EEPROMSIZE;
1001}
1002
1003static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1004{
1005 struct adapter *adapter = netdev2adap(dev);
1006
23020ab3
RJ
1007 strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
1008 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
1009 strlcpy(info->bus_info, pci_name(adapter->pdev),
1010 sizeof(info->bus_info));
b8ff05a9 1011
84b40501 1012 if (adapter->params.fw_vers)
b8ff05a9
DM
1013 snprintf(info->fw_version, sizeof(info->fw_version),
1014 "%u.%u.%u.%u, TP %u.%u.%u.%u",
1015 FW_HDR_FW_VER_MAJOR_GET(adapter->params.fw_vers),
1016 FW_HDR_FW_VER_MINOR_GET(adapter->params.fw_vers),
1017 FW_HDR_FW_VER_MICRO_GET(adapter->params.fw_vers),
1018 FW_HDR_FW_VER_BUILD_GET(adapter->params.fw_vers),
1019 FW_HDR_FW_VER_MAJOR_GET(adapter->params.tp_vers),
1020 FW_HDR_FW_VER_MINOR_GET(adapter->params.tp_vers),
1021 FW_HDR_FW_VER_MICRO_GET(adapter->params.tp_vers),
1022 FW_HDR_FW_VER_BUILD_GET(adapter->params.tp_vers));
1023}
1024
1025static void get_strings(struct net_device *dev, u32 stringset, u8 *data)
1026{
1027 if (stringset == ETH_SS_STATS)
1028 memcpy(data, stats_strings, sizeof(stats_strings));
1029}
1030
1031/*
1032 * port stats maintained per queue of the port. They should be in the same
1033 * order as in stats_strings above.
1034 */
1035struct queue_port_stats {
1036 u64 tso;
1037 u64 tx_csum;
1038 u64 rx_csum;
1039 u64 vlan_ex;
1040 u64 vlan_ins;
4a6346d4
DM
1041 u64 gro_pkts;
1042 u64 gro_merged;
b8ff05a9
DM
1043};
1044
1045static void collect_sge_port_stats(const struct adapter *adap,
1046 const struct port_info *p, struct queue_port_stats *s)
1047{
1048 int i;
1049 const struct sge_eth_txq *tx = &adap->sge.ethtxq[p->first_qset];
1050 const struct sge_eth_rxq *rx = &adap->sge.ethrxq[p->first_qset];
1051
1052 memset(s, 0, sizeof(*s));
1053 for (i = 0; i < p->nqsets; i++, rx++, tx++) {
1054 s->tso += tx->tso;
1055 s->tx_csum += tx->tx_cso;
1056 s->rx_csum += rx->stats.rx_cso;
1057 s->vlan_ex += rx->stats.vlan_ex;
1058 s->vlan_ins += tx->vlan_ins;
4a6346d4
DM
1059 s->gro_pkts += rx->stats.lro_pkts;
1060 s->gro_merged += rx->stats.lro_merged;
b8ff05a9
DM
1061 }
1062}
1063
1064static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1065 u64 *data)
1066{
1067 struct port_info *pi = netdev_priv(dev);
1068 struct adapter *adapter = pi->adapter;
1069
1070 t4_get_port_stats(adapter, pi->tx_chan, (struct port_stats *)data);
1071
1072 data += sizeof(struct port_stats) / sizeof(u64);
1073 collect_sge_port_stats(adapter, pi, (struct queue_port_stats *)data);
1074}
1075
1076/*
1077 * Return a version number to identify the type of adapter. The scheme is:
1078 * - bits 0..9: chip version
1079 * - bits 10..15: chip revision
835bb606 1080 * - bits 16..23: register dump version
b8ff05a9
DM
1081 */
1082static inline unsigned int mk_adap_vers(const struct adapter *ap)
1083{
835bb606 1084 return 4 | (ap->params.rev << 10) | (1 << 16);
b8ff05a9
DM
1085}
1086
1087static void reg_block_dump(struct adapter *ap, void *buf, unsigned int start,
1088 unsigned int end)
1089{
1090 u32 *p = buf + start;
1091
1092 for ( ; start <= end; start += sizeof(u32))
1093 *p++ = t4_read_reg(ap, start);
1094}
1095
1096static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
1097 void *buf)
1098{
1099 static const unsigned int reg_ranges[] = {
1100 0x1008, 0x1108,
1101 0x1180, 0x11b4,
1102 0x11fc, 0x123c,
1103 0x1300, 0x173c,
1104 0x1800, 0x18fc,
1105 0x3000, 0x30d8,
1106 0x30e0, 0x5924,
1107 0x5960, 0x59d4,
1108 0x5a00, 0x5af8,
1109 0x6000, 0x6098,
1110 0x6100, 0x6150,
1111 0x6200, 0x6208,
1112 0x6240, 0x6248,
1113 0x6280, 0x6338,
1114 0x6370, 0x638c,
1115 0x6400, 0x643c,
1116 0x6500, 0x6524,
1117 0x6a00, 0x6a38,
1118 0x6a60, 0x6a78,
1119 0x6b00, 0x6b84,
1120 0x6bf0, 0x6c84,
1121 0x6cf0, 0x6d84,
1122 0x6df0, 0x6e84,
1123 0x6ef0, 0x6f84,
1124 0x6ff0, 0x7084,
1125 0x70f0, 0x7184,
1126 0x71f0, 0x7284,
1127 0x72f0, 0x7384,
1128 0x73f0, 0x7450,
1129 0x7500, 0x7530,
1130 0x7600, 0x761c,
1131 0x7680, 0x76cc,
1132 0x7700, 0x7798,
1133 0x77c0, 0x77fc,
1134 0x7900, 0x79fc,
1135 0x7b00, 0x7c38,
1136 0x7d00, 0x7efc,
1137 0x8dc0, 0x8e1c,
1138 0x8e30, 0x8e78,
1139 0x8ea0, 0x8f6c,
1140 0x8fc0, 0x9074,
1141 0x90fc, 0x90fc,
1142 0x9400, 0x9458,
1143 0x9600, 0x96bc,
1144 0x9800, 0x9808,
1145 0x9820, 0x983c,
1146 0x9850, 0x9864,
1147 0x9c00, 0x9c6c,
1148 0x9c80, 0x9cec,
1149 0x9d00, 0x9d6c,
1150 0x9d80, 0x9dec,
1151 0x9e00, 0x9e6c,
1152 0x9e80, 0x9eec,
1153 0x9f00, 0x9f6c,
1154 0x9f80, 0x9fec,
1155 0xd004, 0xd03c,
1156 0xdfc0, 0xdfe0,
1157 0xe000, 0xea7c,
1158 0xf000, 0x11190,
835bb606
DM
1159 0x19040, 0x1906c,
1160 0x19078, 0x19080,
1161 0x1908c, 0x19124,
b8ff05a9
DM
1162 0x19150, 0x191b0,
1163 0x191d0, 0x191e8,
1164 0x19238, 0x1924c,
1165 0x193f8, 0x19474,
1166 0x19490, 0x194f8,
1167 0x19800, 0x19f30,
1168 0x1a000, 0x1a06c,
1169 0x1a0b0, 0x1a120,
1170 0x1a128, 0x1a138,
1171 0x1a190, 0x1a1c4,
1172 0x1a1fc, 0x1a1fc,
1173 0x1e040, 0x1e04c,
835bb606 1174 0x1e284, 0x1e28c,
b8ff05a9
DM
1175 0x1e2c0, 0x1e2c0,
1176 0x1e2e0, 0x1e2e0,
1177 0x1e300, 0x1e384,
1178 0x1e3c0, 0x1e3c8,
1179 0x1e440, 0x1e44c,
835bb606 1180 0x1e684, 0x1e68c,
b8ff05a9
DM
1181 0x1e6c0, 0x1e6c0,
1182 0x1e6e0, 0x1e6e0,
1183 0x1e700, 0x1e784,
1184 0x1e7c0, 0x1e7c8,
1185 0x1e840, 0x1e84c,
835bb606 1186 0x1ea84, 0x1ea8c,
b8ff05a9
DM
1187 0x1eac0, 0x1eac0,
1188 0x1eae0, 0x1eae0,
1189 0x1eb00, 0x1eb84,
1190 0x1ebc0, 0x1ebc8,
1191 0x1ec40, 0x1ec4c,
835bb606 1192 0x1ee84, 0x1ee8c,
b8ff05a9
DM
1193 0x1eec0, 0x1eec0,
1194 0x1eee0, 0x1eee0,
1195 0x1ef00, 0x1ef84,
1196 0x1efc0, 0x1efc8,
1197 0x1f040, 0x1f04c,
835bb606 1198 0x1f284, 0x1f28c,
b8ff05a9
DM
1199 0x1f2c0, 0x1f2c0,
1200 0x1f2e0, 0x1f2e0,
1201 0x1f300, 0x1f384,
1202 0x1f3c0, 0x1f3c8,
1203 0x1f440, 0x1f44c,
835bb606 1204 0x1f684, 0x1f68c,
b8ff05a9
DM
1205 0x1f6c0, 0x1f6c0,
1206 0x1f6e0, 0x1f6e0,
1207 0x1f700, 0x1f784,
1208 0x1f7c0, 0x1f7c8,
1209 0x1f840, 0x1f84c,
835bb606 1210 0x1fa84, 0x1fa8c,
b8ff05a9
DM
1211 0x1fac0, 0x1fac0,
1212 0x1fae0, 0x1fae0,
1213 0x1fb00, 0x1fb84,
1214 0x1fbc0, 0x1fbc8,
1215 0x1fc40, 0x1fc4c,
835bb606 1216 0x1fe84, 0x1fe8c,
b8ff05a9
DM
1217 0x1fec0, 0x1fec0,
1218 0x1fee0, 0x1fee0,
1219 0x1ff00, 0x1ff84,
1220 0x1ffc0, 0x1ffc8,
1221 0x20000, 0x2002c,
1222 0x20100, 0x2013c,
1223 0x20190, 0x201c8,
1224 0x20200, 0x20318,
1225 0x20400, 0x20528,
1226 0x20540, 0x20614,
1227 0x21000, 0x21040,
1228 0x2104c, 0x21060,
1229 0x210c0, 0x210ec,
1230 0x21200, 0x21268,
1231 0x21270, 0x21284,
1232 0x212fc, 0x21388,
1233 0x21400, 0x21404,
1234 0x21500, 0x21518,
1235 0x2152c, 0x2153c,
1236 0x21550, 0x21554,
1237 0x21600, 0x21600,
1238 0x21608, 0x21628,
1239 0x21630, 0x2163c,
1240 0x21700, 0x2171c,
1241 0x21780, 0x2178c,
1242 0x21800, 0x21c38,
1243 0x21c80, 0x21d7c,
1244 0x21e00, 0x21e04,
1245 0x22000, 0x2202c,
1246 0x22100, 0x2213c,
1247 0x22190, 0x221c8,
1248 0x22200, 0x22318,
1249 0x22400, 0x22528,
1250 0x22540, 0x22614,
1251 0x23000, 0x23040,
1252 0x2304c, 0x23060,
1253 0x230c0, 0x230ec,
1254 0x23200, 0x23268,
1255 0x23270, 0x23284,
1256 0x232fc, 0x23388,
1257 0x23400, 0x23404,
1258 0x23500, 0x23518,
1259 0x2352c, 0x2353c,
1260 0x23550, 0x23554,
1261 0x23600, 0x23600,
1262 0x23608, 0x23628,
1263 0x23630, 0x2363c,
1264 0x23700, 0x2371c,
1265 0x23780, 0x2378c,
1266 0x23800, 0x23c38,
1267 0x23c80, 0x23d7c,
1268 0x23e00, 0x23e04,
1269 0x24000, 0x2402c,
1270 0x24100, 0x2413c,
1271 0x24190, 0x241c8,
1272 0x24200, 0x24318,
1273 0x24400, 0x24528,
1274 0x24540, 0x24614,
1275 0x25000, 0x25040,
1276 0x2504c, 0x25060,
1277 0x250c0, 0x250ec,
1278 0x25200, 0x25268,
1279 0x25270, 0x25284,
1280 0x252fc, 0x25388,
1281 0x25400, 0x25404,
1282 0x25500, 0x25518,
1283 0x2552c, 0x2553c,
1284 0x25550, 0x25554,
1285 0x25600, 0x25600,
1286 0x25608, 0x25628,
1287 0x25630, 0x2563c,
1288 0x25700, 0x2571c,
1289 0x25780, 0x2578c,
1290 0x25800, 0x25c38,
1291 0x25c80, 0x25d7c,
1292 0x25e00, 0x25e04,
1293 0x26000, 0x2602c,
1294 0x26100, 0x2613c,
1295 0x26190, 0x261c8,
1296 0x26200, 0x26318,
1297 0x26400, 0x26528,
1298 0x26540, 0x26614,
1299 0x27000, 0x27040,
1300 0x2704c, 0x27060,
1301 0x270c0, 0x270ec,
1302 0x27200, 0x27268,
1303 0x27270, 0x27284,
1304 0x272fc, 0x27388,
1305 0x27400, 0x27404,
1306 0x27500, 0x27518,
1307 0x2752c, 0x2753c,
1308 0x27550, 0x27554,
1309 0x27600, 0x27600,
1310 0x27608, 0x27628,
1311 0x27630, 0x2763c,
1312 0x27700, 0x2771c,
1313 0x27780, 0x2778c,
1314 0x27800, 0x27c38,
1315 0x27c80, 0x27d7c,
1316 0x27e00, 0x27e04
1317 };
1318
1319 int i;
1320 struct adapter *ap = netdev2adap(dev);
1321
1322 regs->version = mk_adap_vers(ap);
1323
1324 memset(buf, 0, T4_REGMAP_SIZE);
1325 for (i = 0; i < ARRAY_SIZE(reg_ranges); i += 2)
1326 reg_block_dump(ap, buf, reg_ranges[i], reg_ranges[i + 1]);
1327}
1328
1329static int restart_autoneg(struct net_device *dev)
1330{
1331 struct port_info *p = netdev_priv(dev);
1332
1333 if (!netif_running(dev))
1334 return -EAGAIN;
1335 if (p->link_cfg.autoneg != AUTONEG_ENABLE)
1336 return -EINVAL;
060e0c75 1337 t4_restart_aneg(p->adapter, p->adapter->fn, p->tx_chan);
b8ff05a9
DM
1338 return 0;
1339}
1340
c5e06360
DM
1341static int identify_port(struct net_device *dev,
1342 enum ethtool_phys_id_state state)
b8ff05a9 1343{
c5e06360 1344 unsigned int val;
060e0c75
DM
1345 struct adapter *adap = netdev2adap(dev);
1346
c5e06360
DM
1347 if (state == ETHTOOL_ID_ACTIVE)
1348 val = 0xffff;
1349 else if (state == ETHTOOL_ID_INACTIVE)
1350 val = 0;
1351 else
1352 return -EINVAL;
b8ff05a9 1353
c5e06360 1354 return t4_identify_port(adap, adap->fn, netdev2pinfo(dev)->viid, val);
b8ff05a9
DM
1355}
1356
1357static unsigned int from_fw_linkcaps(unsigned int type, unsigned int caps)
1358{
1359 unsigned int v = 0;
1360
a0881cab
DM
1361 if (type == FW_PORT_TYPE_BT_SGMII || type == FW_PORT_TYPE_BT_XFI ||
1362 type == FW_PORT_TYPE_BT_XAUI) {
b8ff05a9
DM
1363 v |= SUPPORTED_TP;
1364 if (caps & FW_PORT_CAP_SPEED_100M)
1365 v |= SUPPORTED_100baseT_Full;
1366 if (caps & FW_PORT_CAP_SPEED_1G)
1367 v |= SUPPORTED_1000baseT_Full;
1368 if (caps & FW_PORT_CAP_SPEED_10G)
1369 v |= SUPPORTED_10000baseT_Full;
1370 } else if (type == FW_PORT_TYPE_KX4 || type == FW_PORT_TYPE_KX) {
1371 v |= SUPPORTED_Backplane;
1372 if (caps & FW_PORT_CAP_SPEED_1G)
1373 v |= SUPPORTED_1000baseKX_Full;
1374 if (caps & FW_PORT_CAP_SPEED_10G)
1375 v |= SUPPORTED_10000baseKX4_Full;
1376 } else if (type == FW_PORT_TYPE_KR)
1377 v |= SUPPORTED_Backplane | SUPPORTED_10000baseKR_Full;
a0881cab 1378 else if (type == FW_PORT_TYPE_BP_AP)
7d5e77aa
DM
1379 v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC |
1380 SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full;
1381 else if (type == FW_PORT_TYPE_BP4_AP)
1382 v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC |
1383 SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full |
1384 SUPPORTED_10000baseKX4_Full;
a0881cab
DM
1385 else if (type == FW_PORT_TYPE_FIBER_XFI ||
1386 type == FW_PORT_TYPE_FIBER_XAUI || type == FW_PORT_TYPE_SFP)
b8ff05a9
DM
1387 v |= SUPPORTED_FIBRE;
1388
1389 if (caps & FW_PORT_CAP_ANEG)
1390 v |= SUPPORTED_Autoneg;
1391 return v;
1392}
1393
1394static unsigned int to_fw_linkcaps(unsigned int caps)
1395{
1396 unsigned int v = 0;
1397
1398 if (caps & ADVERTISED_100baseT_Full)
1399 v |= FW_PORT_CAP_SPEED_100M;
1400 if (caps & ADVERTISED_1000baseT_Full)
1401 v |= FW_PORT_CAP_SPEED_1G;
1402 if (caps & ADVERTISED_10000baseT_Full)
1403 v |= FW_PORT_CAP_SPEED_10G;
1404 return v;
1405}
1406
1407static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1408{
1409 const struct port_info *p = netdev_priv(dev);
1410
1411 if (p->port_type == FW_PORT_TYPE_BT_SGMII ||
a0881cab 1412 p->port_type == FW_PORT_TYPE_BT_XFI ||
b8ff05a9
DM
1413 p->port_type == FW_PORT_TYPE_BT_XAUI)
1414 cmd->port = PORT_TP;
a0881cab
DM
1415 else if (p->port_type == FW_PORT_TYPE_FIBER_XFI ||
1416 p->port_type == FW_PORT_TYPE_FIBER_XAUI)
b8ff05a9 1417 cmd->port = PORT_FIBRE;
a0881cab
DM
1418 else if (p->port_type == FW_PORT_TYPE_SFP) {
1419 if (p->mod_type == FW_PORT_MOD_TYPE_TWINAX_PASSIVE ||
1420 p->mod_type == FW_PORT_MOD_TYPE_TWINAX_ACTIVE)
1421 cmd->port = PORT_DA;
1422 else
1423 cmd->port = PORT_FIBRE;
1424 } else
b8ff05a9
DM
1425 cmd->port = PORT_OTHER;
1426
1427 if (p->mdio_addr >= 0) {
1428 cmd->phy_address = p->mdio_addr;
1429 cmd->transceiver = XCVR_EXTERNAL;
1430 cmd->mdio_support = p->port_type == FW_PORT_TYPE_BT_SGMII ?
1431 MDIO_SUPPORTS_C22 : MDIO_SUPPORTS_C45;
1432 } else {
1433 cmd->phy_address = 0; /* not really, but no better option */
1434 cmd->transceiver = XCVR_INTERNAL;
1435 cmd->mdio_support = 0;
1436 }
1437
1438 cmd->supported = from_fw_linkcaps(p->port_type, p->link_cfg.supported);
1439 cmd->advertising = from_fw_linkcaps(p->port_type,
1440 p->link_cfg.advertising);
70739497
DD
1441 ethtool_cmd_speed_set(cmd,
1442 netif_carrier_ok(dev) ? p->link_cfg.speed : 0);
b8ff05a9
DM
1443 cmd->duplex = DUPLEX_FULL;
1444 cmd->autoneg = p->link_cfg.autoneg;
1445 cmd->maxtxpkt = 0;
1446 cmd->maxrxpkt = 0;
1447 return 0;
1448}
1449
1450static unsigned int speed_to_caps(int speed)
1451{
1452 if (speed == SPEED_100)
1453 return FW_PORT_CAP_SPEED_100M;
1454 if (speed == SPEED_1000)
1455 return FW_PORT_CAP_SPEED_1G;
1456 if (speed == SPEED_10000)
1457 return FW_PORT_CAP_SPEED_10G;
1458 return 0;
1459}
1460
1461static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1462{
1463 unsigned int cap;
1464 struct port_info *p = netdev_priv(dev);
1465 struct link_config *lc = &p->link_cfg;
25db0338 1466 u32 speed = ethtool_cmd_speed(cmd);
b8ff05a9
DM
1467
1468 if (cmd->duplex != DUPLEX_FULL) /* only full-duplex supported */
1469 return -EINVAL;
1470
1471 if (!(lc->supported & FW_PORT_CAP_ANEG)) {
1472 /*
1473 * PHY offers a single speed. See if that's what's
1474 * being requested.
1475 */
1476 if (cmd->autoneg == AUTONEG_DISABLE &&
25db0338
DD
1477 (lc->supported & speed_to_caps(speed)))
1478 return 0;
b8ff05a9
DM
1479 return -EINVAL;
1480 }
1481
1482 if (cmd->autoneg == AUTONEG_DISABLE) {
25db0338 1483 cap = speed_to_caps(speed);
b8ff05a9 1484
25db0338
DD
1485 if (!(lc->supported & cap) || (speed == SPEED_1000) ||
1486 (speed == SPEED_10000))
b8ff05a9
DM
1487 return -EINVAL;
1488 lc->requested_speed = cap;
1489 lc->advertising = 0;
1490 } else {
1491 cap = to_fw_linkcaps(cmd->advertising);
1492 if (!(lc->supported & cap))
1493 return -EINVAL;
1494 lc->requested_speed = 0;
1495 lc->advertising = cap | FW_PORT_CAP_ANEG;
1496 }
1497 lc->autoneg = cmd->autoneg;
1498
1499 if (netif_running(dev))
060e0c75
DM
1500 return t4_link_start(p->adapter, p->adapter->fn, p->tx_chan,
1501 lc);
b8ff05a9
DM
1502 return 0;
1503}
1504
1505static void get_pauseparam(struct net_device *dev,
1506 struct ethtool_pauseparam *epause)
1507{
1508 struct port_info *p = netdev_priv(dev);
1509
1510 epause->autoneg = (p->link_cfg.requested_fc & PAUSE_AUTONEG) != 0;
1511 epause->rx_pause = (p->link_cfg.fc & PAUSE_RX) != 0;
1512 epause->tx_pause = (p->link_cfg.fc & PAUSE_TX) != 0;
1513}
1514
1515static int set_pauseparam(struct net_device *dev,
1516 struct ethtool_pauseparam *epause)
1517{
1518 struct port_info *p = netdev_priv(dev);
1519 struct link_config *lc = &p->link_cfg;
1520
1521 if (epause->autoneg == AUTONEG_DISABLE)
1522 lc->requested_fc = 0;
1523 else if (lc->supported & FW_PORT_CAP_ANEG)
1524 lc->requested_fc = PAUSE_AUTONEG;
1525 else
1526 return -EINVAL;
1527
1528 if (epause->rx_pause)
1529 lc->requested_fc |= PAUSE_RX;
1530 if (epause->tx_pause)
1531 lc->requested_fc |= PAUSE_TX;
1532 if (netif_running(dev))
060e0c75
DM
1533 return t4_link_start(p->adapter, p->adapter->fn, p->tx_chan,
1534 lc);
b8ff05a9
DM
1535 return 0;
1536}
1537
b8ff05a9
DM
1538static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1539{
1540 const struct port_info *pi = netdev_priv(dev);
1541 const struct sge *s = &pi->adapter->sge;
1542
1543 e->rx_max_pending = MAX_RX_BUFFERS;
1544 e->rx_mini_max_pending = MAX_RSPQ_ENTRIES;
1545 e->rx_jumbo_max_pending = 0;
1546 e->tx_max_pending = MAX_TXQ_ENTRIES;
1547
1548 e->rx_pending = s->ethrxq[pi->first_qset].fl.size - 8;
1549 e->rx_mini_pending = s->ethrxq[pi->first_qset].rspq.size;
1550 e->rx_jumbo_pending = 0;
1551 e->tx_pending = s->ethtxq[pi->first_qset].q.size;
1552}
1553
1554static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1555{
1556 int i;
1557 const struct port_info *pi = netdev_priv(dev);
1558 struct adapter *adapter = pi->adapter;
1559 struct sge *s = &adapter->sge;
1560
1561 if (e->rx_pending > MAX_RX_BUFFERS || e->rx_jumbo_pending ||
1562 e->tx_pending > MAX_TXQ_ENTRIES ||
1563 e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
1564 e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
1565 e->rx_pending < MIN_FL_ENTRIES || e->tx_pending < MIN_TXQ_ENTRIES)
1566 return -EINVAL;
1567
1568 if (adapter->flags & FULL_INIT_DONE)
1569 return -EBUSY;
1570
1571 for (i = 0; i < pi->nqsets; ++i) {
1572 s->ethtxq[pi->first_qset + i].q.size = e->tx_pending;
1573 s->ethrxq[pi->first_qset + i].fl.size = e->rx_pending + 8;
1574 s->ethrxq[pi->first_qset + i].rspq.size = e->rx_mini_pending;
1575 }
1576 return 0;
1577}
1578
1579static int closest_timer(const struct sge *s, int time)
1580{
1581 int i, delta, match = 0, min_delta = INT_MAX;
1582
1583 for (i = 0; i < ARRAY_SIZE(s->timer_val); i++) {
1584 delta = time - s->timer_val[i];
1585 if (delta < 0)
1586 delta = -delta;
1587 if (delta < min_delta) {
1588 min_delta = delta;
1589 match = i;
1590 }
1591 }
1592 return match;
1593}
1594
1595static int closest_thres(const struct sge *s, int thres)
1596{
1597 int i, delta, match = 0, min_delta = INT_MAX;
1598
1599 for (i = 0; i < ARRAY_SIZE(s->counter_val); i++) {
1600 delta = thres - s->counter_val[i];
1601 if (delta < 0)
1602 delta = -delta;
1603 if (delta < min_delta) {
1604 min_delta = delta;
1605 match = i;
1606 }
1607 }
1608 return match;
1609}
1610
1611/*
1612 * Return a queue's interrupt hold-off time in us. 0 means no timer.
1613 */
1614static unsigned int qtimer_val(const struct adapter *adap,
1615 const struct sge_rspq *q)
1616{
1617 unsigned int idx = q->intr_params >> 1;
1618
1619 return idx < SGE_NTIMERS ? adap->sge.timer_val[idx] : 0;
1620}
1621
1622/**
1623 * set_rxq_intr_params - set a queue's interrupt holdoff parameters
1624 * @adap: the adapter
1625 * @q: the Rx queue
1626 * @us: the hold-off time in us, or 0 to disable timer
1627 * @cnt: the hold-off packet count, or 0 to disable counter
1628 *
1629 * Sets an Rx queue's interrupt hold-off time and packet count. At least
1630 * one of the two needs to be enabled for the queue to generate interrupts.
1631 */
1632static int set_rxq_intr_params(struct adapter *adap, struct sge_rspq *q,
1633 unsigned int us, unsigned int cnt)
1634{
1635 if ((us | cnt) == 0)
1636 cnt = 1;
1637
1638 if (cnt) {
1639 int err;
1640 u32 v, new_idx;
1641
1642 new_idx = closest_thres(&adap->sge, cnt);
1643 if (q->desc && q->pktcnt_idx != new_idx) {
1644 /* the queue has already been created, update it */
1645 v = FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) |
1646 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH) |
1647 FW_PARAMS_PARAM_YZ(q->cntxt_id);
060e0c75
DM
1648 err = t4_set_params(adap, adap->fn, adap->fn, 0, 1, &v,
1649 &new_idx);
b8ff05a9
DM
1650 if (err)
1651 return err;
1652 }
1653 q->pktcnt_idx = new_idx;
1654 }
1655
1656 us = us == 0 ? 6 : closest_timer(&adap->sge, us);
1657 q->intr_params = QINTR_TIMER_IDX(us) | (cnt > 0 ? QINTR_CNT_EN : 0);
1658 return 0;
1659}
1660
1661static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1662{
1663 const struct port_info *pi = netdev_priv(dev);
1664 struct adapter *adap = pi->adapter;
1665
1666 return set_rxq_intr_params(adap, &adap->sge.ethrxq[pi->first_qset].rspq,
1667 c->rx_coalesce_usecs, c->rx_max_coalesced_frames);
1668}
1669
1670static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1671{
1672 const struct port_info *pi = netdev_priv(dev);
1673 const struct adapter *adap = pi->adapter;
1674 const struct sge_rspq *rq = &adap->sge.ethrxq[pi->first_qset].rspq;
1675
1676 c->rx_coalesce_usecs = qtimer_val(adap, rq);
1677 c->rx_max_coalesced_frames = (rq->intr_params & QINTR_CNT_EN) ?
1678 adap->sge.counter_val[rq->pktcnt_idx] : 0;
1679 return 0;
1680}
1681
1478b3ee
DM
1682/**
1683 * eeprom_ptov - translate a physical EEPROM address to virtual
1684 * @phys_addr: the physical EEPROM address
1685 * @fn: the PCI function number
1686 * @sz: size of function-specific area
1687 *
1688 * Translate a physical EEPROM address to virtual. The first 1K is
1689 * accessed through virtual addresses starting at 31K, the rest is
1690 * accessed through virtual addresses starting at 0.
1691 *
1692 * The mapping is as follows:
1693 * [0..1K) -> [31K..32K)
1694 * [1K..1K+A) -> [31K-A..31K)
1695 * [1K+A..ES) -> [0..ES-A-1K)
1696 *
1697 * where A = @fn * @sz, and ES = EEPROM size.
b8ff05a9 1698 */
1478b3ee 1699static int eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz)
b8ff05a9 1700{
1478b3ee 1701 fn *= sz;
b8ff05a9
DM
1702 if (phys_addr < 1024)
1703 return phys_addr + (31 << 10);
1478b3ee
DM
1704 if (phys_addr < 1024 + fn)
1705 return 31744 - fn + phys_addr - 1024;
b8ff05a9 1706 if (phys_addr < EEPROMSIZE)
1478b3ee 1707 return phys_addr - 1024 - fn;
b8ff05a9
DM
1708 return -EINVAL;
1709}
1710
1711/*
1712 * The next two routines implement eeprom read/write from physical addresses.
b8ff05a9
DM
1713 */
1714static int eeprom_rd_phys(struct adapter *adap, unsigned int phys_addr, u32 *v)
1715{
1478b3ee 1716 int vaddr = eeprom_ptov(phys_addr, adap->fn, EEPROMPFSIZE);
b8ff05a9
DM
1717
1718 if (vaddr >= 0)
1719 vaddr = pci_read_vpd(adap->pdev, vaddr, sizeof(u32), v);
1720 return vaddr < 0 ? vaddr : 0;
1721}
1722
1723static int eeprom_wr_phys(struct adapter *adap, unsigned int phys_addr, u32 v)
1724{
1478b3ee 1725 int vaddr = eeprom_ptov(phys_addr, adap->fn, EEPROMPFSIZE);
b8ff05a9
DM
1726
1727 if (vaddr >= 0)
1728 vaddr = pci_write_vpd(adap->pdev, vaddr, sizeof(u32), &v);
1729 return vaddr < 0 ? vaddr : 0;
1730}
1731
1732#define EEPROM_MAGIC 0x38E2F10C
1733
1734static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
1735 u8 *data)
1736{
1737 int i, err = 0;
1738 struct adapter *adapter = netdev2adap(dev);
1739
1740 u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
1741 if (!buf)
1742 return -ENOMEM;
1743
1744 e->magic = EEPROM_MAGIC;
1745 for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
1746 err = eeprom_rd_phys(adapter, i, (u32 *)&buf[i]);
1747
1748 if (!err)
1749 memcpy(data, buf + e->offset, e->len);
1750 kfree(buf);
1751 return err;
1752}
1753
1754static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
1755 u8 *data)
1756{
1757 u8 *buf;
1758 int err = 0;
1759 u32 aligned_offset, aligned_len, *p;
1760 struct adapter *adapter = netdev2adap(dev);
1761
1762 if (eeprom->magic != EEPROM_MAGIC)
1763 return -EINVAL;
1764
1765 aligned_offset = eeprom->offset & ~3;
1766 aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
1767
1478b3ee
DM
1768 if (adapter->fn > 0) {
1769 u32 start = 1024 + adapter->fn * EEPROMPFSIZE;
1770
1771 if (aligned_offset < start ||
1772 aligned_offset + aligned_len > start + EEPROMPFSIZE)
1773 return -EPERM;
1774 }
1775
b8ff05a9
DM
1776 if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
1777 /*
1778 * RMW possibly needed for first or last words.
1779 */
1780 buf = kmalloc(aligned_len, GFP_KERNEL);
1781 if (!buf)
1782 return -ENOMEM;
1783 err = eeprom_rd_phys(adapter, aligned_offset, (u32 *)buf);
1784 if (!err && aligned_len > 4)
1785 err = eeprom_rd_phys(adapter,
1786 aligned_offset + aligned_len - 4,
1787 (u32 *)&buf[aligned_len - 4]);
1788 if (err)
1789 goto out;
1790 memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
1791 } else
1792 buf = data;
1793
1794 err = t4_seeprom_wp(adapter, false);
1795 if (err)
1796 goto out;
1797
1798 for (p = (u32 *)buf; !err && aligned_len; aligned_len -= 4, p++) {
1799 err = eeprom_wr_phys(adapter, aligned_offset, *p);
1800 aligned_offset += 4;
1801 }
1802
1803 if (!err)
1804 err = t4_seeprom_wp(adapter, true);
1805out:
1806 if (buf != data)
1807 kfree(buf);
1808 return err;
1809}
1810
1811static int set_flash(struct net_device *netdev, struct ethtool_flash *ef)
1812{
1813 int ret;
1814 const struct firmware *fw;
1815 struct adapter *adap = netdev2adap(netdev);
1816
1817 ef->data[sizeof(ef->data) - 1] = '\0';
1818 ret = request_firmware(&fw, ef->data, adap->pdev_dev);
1819 if (ret < 0)
1820 return ret;
1821
1822 ret = t4_load_fw(adap, fw->data, fw->size);
1823 release_firmware(fw);
1824 if (!ret)
1825 dev_info(adap->pdev_dev, "loaded firmware %s\n", ef->data);
1826 return ret;
1827}
1828
1829#define WOL_SUPPORTED (WAKE_BCAST | WAKE_MAGIC)
1830#define BCAST_CRC 0xa0ccc1a6
1831
1832static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1833{
1834 wol->supported = WAKE_BCAST | WAKE_MAGIC;
1835 wol->wolopts = netdev2adap(dev)->wol;
1836 memset(&wol->sopass, 0, sizeof(wol->sopass));
1837}
1838
1839static int set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1840{
1841 int err = 0;
1842 struct port_info *pi = netdev_priv(dev);
1843
1844 if (wol->wolopts & ~WOL_SUPPORTED)
1845 return -EINVAL;
1846 t4_wol_magic_enable(pi->adapter, pi->tx_chan,
1847 (wol->wolopts & WAKE_MAGIC) ? dev->dev_addr : NULL);
1848 if (wol->wolopts & WAKE_BCAST) {
1849 err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0xfe, ~0ULL,
1850 ~0ULL, 0, false);
1851 if (!err)
1852 err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 1,
1853 ~6ULL, ~0ULL, BCAST_CRC, true);
1854 } else
1855 t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0, 0, 0, 0, false);
1856 return err;
1857}
1858
c8f44aff 1859static int cxgb_set_features(struct net_device *dev, netdev_features_t features)
87b6cf51 1860{
2ed28baa 1861 const struct port_info *pi = netdev_priv(dev);
c8f44aff 1862 netdev_features_t changed = dev->features ^ features;
19ecae2c 1863 int err;
19ecae2c 1864
2ed28baa
MM
1865 if (!(changed & NETIF_F_HW_VLAN_RX))
1866 return 0;
19ecae2c 1867
2ed28baa
MM
1868 err = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, -1,
1869 -1, -1, -1,
1870 !!(features & NETIF_F_HW_VLAN_RX), true);
1871 if (unlikely(err))
1872 dev->features = features ^ NETIF_F_HW_VLAN_RX;
19ecae2c 1873 return err;
87b6cf51
DM
1874}
1875
7850f63f 1876static u32 get_rss_table_size(struct net_device *dev)
671b0060
DM
1877{
1878 const struct port_info *pi = netdev_priv(dev);
671b0060 1879
7850f63f
BH
1880 return pi->rss_size;
1881}
1882
1883static int get_rss_table(struct net_device *dev, u32 *p)
1884{
1885 const struct port_info *pi = netdev_priv(dev);
1886 unsigned int n = pi->rss_size;
1887
671b0060 1888 while (n--)
7850f63f 1889 p[n] = pi->rss[n];
671b0060
DM
1890 return 0;
1891}
1892
7850f63f 1893static int set_rss_table(struct net_device *dev, const u32 *p)
671b0060
DM
1894{
1895 unsigned int i;
1896 struct port_info *pi = netdev_priv(dev);
1897
7850f63f
BH
1898 for (i = 0; i < pi->rss_size; i++)
1899 pi->rss[i] = p[i];
671b0060
DM
1900 if (pi->adapter->flags & FULL_INIT_DONE)
1901 return write_rss(pi, pi->rss);
1902 return 0;
1903}
1904
1905static int get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
815c7db5 1906 u32 *rules)
671b0060 1907{
f796564a
DM
1908 const struct port_info *pi = netdev_priv(dev);
1909
671b0060 1910 switch (info->cmd) {
f796564a
DM
1911 case ETHTOOL_GRXFH: {
1912 unsigned int v = pi->rss_mode;
1913
1914 info->data = 0;
1915 switch (info->flow_type) {
1916 case TCP_V4_FLOW:
1917 if (v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN)
1918 info->data = RXH_IP_SRC | RXH_IP_DST |
1919 RXH_L4_B_0_1 | RXH_L4_B_2_3;
1920 else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
1921 info->data = RXH_IP_SRC | RXH_IP_DST;
1922 break;
1923 case UDP_V4_FLOW:
1924 if ((v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN) &&
1925 (v & FW_RSS_VI_CONFIG_CMD_UDPEN))
1926 info->data = RXH_IP_SRC | RXH_IP_DST |
1927 RXH_L4_B_0_1 | RXH_L4_B_2_3;
1928 else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
1929 info->data = RXH_IP_SRC | RXH_IP_DST;
1930 break;
1931 case SCTP_V4_FLOW:
1932 case AH_ESP_V4_FLOW:
1933 case IPV4_FLOW:
1934 if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
1935 info->data = RXH_IP_SRC | RXH_IP_DST;
1936 break;
1937 case TCP_V6_FLOW:
1938 if (v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN)
1939 info->data = RXH_IP_SRC | RXH_IP_DST |
1940 RXH_L4_B_0_1 | RXH_L4_B_2_3;
1941 else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
1942 info->data = RXH_IP_SRC | RXH_IP_DST;
1943 break;
1944 case UDP_V6_FLOW:
1945 if ((v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN) &&
1946 (v & FW_RSS_VI_CONFIG_CMD_UDPEN))
1947 info->data = RXH_IP_SRC | RXH_IP_DST |
1948 RXH_L4_B_0_1 | RXH_L4_B_2_3;
1949 else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
1950 info->data = RXH_IP_SRC | RXH_IP_DST;
1951 break;
1952 case SCTP_V6_FLOW:
1953 case AH_ESP_V6_FLOW:
1954 case IPV6_FLOW:
1955 if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
1956 info->data = RXH_IP_SRC | RXH_IP_DST;
1957 break;
1958 }
1959 return 0;
1960 }
671b0060 1961 case ETHTOOL_GRXRINGS:
f796564a 1962 info->data = pi->nqsets;
671b0060
DM
1963 return 0;
1964 }
1965 return -EOPNOTSUPP;
1966}
1967
9b07be4b 1968static const struct ethtool_ops cxgb_ethtool_ops = {
b8ff05a9
DM
1969 .get_settings = get_settings,
1970 .set_settings = set_settings,
1971 .get_drvinfo = get_drvinfo,
1972 .get_msglevel = get_msglevel,
1973 .set_msglevel = set_msglevel,
1974 .get_ringparam = get_sge_param,
1975 .set_ringparam = set_sge_param,
1976 .get_coalesce = get_coalesce,
1977 .set_coalesce = set_coalesce,
1978 .get_eeprom_len = get_eeprom_len,
1979 .get_eeprom = get_eeprom,
1980 .set_eeprom = set_eeprom,
1981 .get_pauseparam = get_pauseparam,
1982 .set_pauseparam = set_pauseparam,
b8ff05a9
DM
1983 .get_link = ethtool_op_get_link,
1984 .get_strings = get_strings,
c5e06360 1985 .set_phys_id = identify_port,
b8ff05a9
DM
1986 .nway_reset = restart_autoneg,
1987 .get_sset_count = get_sset_count,
1988 .get_ethtool_stats = get_stats,
1989 .get_regs_len = get_regs_len,
1990 .get_regs = get_regs,
1991 .get_wol = get_wol,
1992 .set_wol = set_wol,
671b0060 1993 .get_rxnfc = get_rxnfc,
7850f63f 1994 .get_rxfh_indir_size = get_rss_table_size,
671b0060
DM
1995 .get_rxfh_indir = get_rss_table,
1996 .set_rxfh_indir = set_rss_table,
b8ff05a9
DM
1997 .flash_device = set_flash,
1998};
1999
2000/*
2001 * debugfs support
2002 */
b8ff05a9
DM
2003static ssize_t mem_read(struct file *file, char __user *buf, size_t count,
2004 loff_t *ppos)
2005{
2006 loff_t pos = *ppos;
2007 loff_t avail = file->f_path.dentry->d_inode->i_size;
2008 unsigned int mem = (uintptr_t)file->private_data & 3;
2009 struct adapter *adap = file->private_data - mem;
2010
2011 if (pos < 0)
2012 return -EINVAL;
2013 if (pos >= avail)
2014 return 0;
2015 if (count > avail - pos)
2016 count = avail - pos;
2017
2018 while (count) {
2019 size_t len;
2020 int ret, ofst;
2021 __be32 data[16];
2022
2023 if (mem == MEM_MC)
2024 ret = t4_mc_read(adap, pos, data, NULL);
2025 else
2026 ret = t4_edc_read(adap, mem, pos, data, NULL);
2027 if (ret)
2028 return ret;
2029
2030 ofst = pos % sizeof(data);
2031 len = min(count, sizeof(data) - ofst);
2032 if (copy_to_user(buf, (u8 *)data + ofst, len))
2033 return -EFAULT;
2034
2035 buf += len;
2036 pos += len;
2037 count -= len;
2038 }
2039 count = pos - *ppos;
2040 *ppos = pos;
2041 return count;
2042}
2043
2044static const struct file_operations mem_debugfs_fops = {
2045 .owner = THIS_MODULE,
234e3405 2046 .open = simple_open,
b8ff05a9 2047 .read = mem_read,
6038f373 2048 .llseek = default_llseek,
b8ff05a9
DM
2049};
2050
2051static void __devinit add_debugfs_mem(struct adapter *adap, const char *name,
2052 unsigned int idx, unsigned int size_mb)
2053{
2054 struct dentry *de;
2055
2056 de = debugfs_create_file(name, S_IRUSR, adap->debugfs_root,
2057 (void *)adap + idx, &mem_debugfs_fops);
2058 if (de && de->d_inode)
2059 de->d_inode->i_size = size_mb << 20;
2060}
2061
2062static int __devinit setup_debugfs(struct adapter *adap)
2063{
2064 int i;
2065
2066 if (IS_ERR_OR_NULL(adap->debugfs_root))
2067 return -1;
2068
2069 i = t4_read_reg(adap, MA_TARGET_MEM_ENABLE);
2070 if (i & EDRAM0_ENABLE)
2071 add_debugfs_mem(adap, "edc0", MEM_EDC0, 5);
2072 if (i & EDRAM1_ENABLE)
2073 add_debugfs_mem(adap, "edc1", MEM_EDC1, 5);
2074 if (i & EXT_MEM_ENABLE)
2075 add_debugfs_mem(adap, "mc", MEM_MC,
2076 EXT_MEM_SIZE_GET(t4_read_reg(adap, MA_EXT_MEMORY_BAR)));
2077 if (adap->l2t)
2078 debugfs_create_file("l2t", S_IRUSR, adap->debugfs_root, adap,
2079 &t4_l2t_fops);
2080 return 0;
2081}
2082
2083/*
2084 * upper-layer driver support
2085 */
2086
2087/*
2088 * Allocate an active-open TID and set it to the supplied value.
2089 */
2090int cxgb4_alloc_atid(struct tid_info *t, void *data)
2091{
2092 int atid = -1;
2093
2094 spin_lock_bh(&t->atid_lock);
2095 if (t->afree) {
2096 union aopen_entry *p = t->afree;
2097
2098 atid = p - t->atid_tab;
2099 t->afree = p->next;
2100 p->data = data;
2101 t->atids_in_use++;
2102 }
2103 spin_unlock_bh(&t->atid_lock);
2104 return atid;
2105}
2106EXPORT_SYMBOL(cxgb4_alloc_atid);
2107
2108/*
2109 * Release an active-open TID.
2110 */
2111void cxgb4_free_atid(struct tid_info *t, unsigned int atid)
2112{
2113 union aopen_entry *p = &t->atid_tab[atid];
2114
2115 spin_lock_bh(&t->atid_lock);
2116 p->next = t->afree;
2117 t->afree = p;
2118 t->atids_in_use--;
2119 spin_unlock_bh(&t->atid_lock);
2120}
2121EXPORT_SYMBOL(cxgb4_free_atid);
2122
2123/*
2124 * Allocate a server TID and set it to the supplied value.
2125 */
2126int cxgb4_alloc_stid(struct tid_info *t, int family, void *data)
2127{
2128 int stid;
2129
2130 spin_lock_bh(&t->stid_lock);
2131 if (family == PF_INET) {
2132 stid = find_first_zero_bit(t->stid_bmap, t->nstids);
2133 if (stid < t->nstids)
2134 __set_bit(stid, t->stid_bmap);
2135 else
2136 stid = -1;
2137 } else {
2138 stid = bitmap_find_free_region(t->stid_bmap, t->nstids, 2);
2139 if (stid < 0)
2140 stid = -1;
2141 }
2142 if (stid >= 0) {
2143 t->stid_tab[stid].data = data;
2144 stid += t->stid_base;
2145 t->stids_in_use++;
2146 }
2147 spin_unlock_bh(&t->stid_lock);
2148 return stid;
2149}
2150EXPORT_SYMBOL(cxgb4_alloc_stid);
2151
2152/*
2153 * Release a server TID.
2154 */
2155void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family)
2156{
2157 stid -= t->stid_base;
2158 spin_lock_bh(&t->stid_lock);
2159 if (family == PF_INET)
2160 __clear_bit(stid, t->stid_bmap);
2161 else
2162 bitmap_release_region(t->stid_bmap, stid, 2);
2163 t->stid_tab[stid].data = NULL;
2164 t->stids_in_use--;
2165 spin_unlock_bh(&t->stid_lock);
2166}
2167EXPORT_SYMBOL(cxgb4_free_stid);
2168
2169/*
2170 * Populate a TID_RELEASE WR. Caller must properly size the skb.
2171 */
2172static void mk_tid_release(struct sk_buff *skb, unsigned int chan,
2173 unsigned int tid)
2174{
2175 struct cpl_tid_release *req;
2176
2177 set_wr_txq(skb, CPL_PRIORITY_SETUP, chan);
2178 req = (struct cpl_tid_release *)__skb_put(skb, sizeof(*req));
2179 INIT_TP_WR(req, tid);
2180 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, tid));
2181}
2182
2183/*
2184 * Queue a TID release request and if necessary schedule a work queue to
2185 * process it.
2186 */
31b9c19b 2187static void cxgb4_queue_tid_release(struct tid_info *t, unsigned int chan,
2188 unsigned int tid)
b8ff05a9
DM
2189{
2190 void **p = &t->tid_tab[tid];
2191 struct adapter *adap = container_of(t, struct adapter, tids);
2192
2193 spin_lock_bh(&adap->tid_release_lock);
2194 *p = adap->tid_release_head;
2195 /* Low 2 bits encode the Tx channel number */
2196 adap->tid_release_head = (void **)((uintptr_t)p | chan);
2197 if (!adap->tid_release_task_busy) {
2198 adap->tid_release_task_busy = true;
2199 schedule_work(&adap->tid_release_task);
2200 }
2201 spin_unlock_bh(&adap->tid_release_lock);
2202}
b8ff05a9
DM
2203
2204/*
2205 * Process the list of pending TID release requests.
2206 */
2207static void process_tid_release_list(struct work_struct *work)
2208{
2209 struct sk_buff *skb;
2210 struct adapter *adap;
2211
2212 adap = container_of(work, struct adapter, tid_release_task);
2213
2214 spin_lock_bh(&adap->tid_release_lock);
2215 while (adap->tid_release_head) {
2216 void **p = adap->tid_release_head;
2217 unsigned int chan = (uintptr_t)p & 3;
2218 p = (void *)p - chan;
2219
2220 adap->tid_release_head = *p;
2221 *p = NULL;
2222 spin_unlock_bh(&adap->tid_release_lock);
2223
2224 while (!(skb = alloc_skb(sizeof(struct cpl_tid_release),
2225 GFP_KERNEL)))
2226 schedule_timeout_uninterruptible(1);
2227
2228 mk_tid_release(skb, chan, p - adap->tids.tid_tab);
2229 t4_ofld_send(adap, skb);
2230 spin_lock_bh(&adap->tid_release_lock);
2231 }
2232 adap->tid_release_task_busy = false;
2233 spin_unlock_bh(&adap->tid_release_lock);
2234}
2235
2236/*
2237 * Release a TID and inform HW. If we are unable to allocate the release
2238 * message we defer to a work queue.
2239 */
2240void cxgb4_remove_tid(struct tid_info *t, unsigned int chan, unsigned int tid)
2241{
2242 void *old;
2243 struct sk_buff *skb;
2244 struct adapter *adap = container_of(t, struct adapter, tids);
2245
2246 old = t->tid_tab[tid];
2247 skb = alloc_skb(sizeof(struct cpl_tid_release), GFP_ATOMIC);
2248 if (likely(skb)) {
2249 t->tid_tab[tid] = NULL;
2250 mk_tid_release(skb, chan, tid);
2251 t4_ofld_send(adap, skb);
2252 } else
2253 cxgb4_queue_tid_release(t, chan, tid);
2254 if (old)
2255 atomic_dec(&t->tids_in_use);
2256}
2257EXPORT_SYMBOL(cxgb4_remove_tid);
2258
2259/*
2260 * Allocate and initialize the TID tables. Returns 0 on success.
2261 */
2262static int tid_init(struct tid_info *t)
2263{
2264 size_t size;
2265 unsigned int natids = t->natids;
2266
2267 size = t->ntids * sizeof(*t->tid_tab) + natids * sizeof(*t->atid_tab) +
2268 t->nstids * sizeof(*t->stid_tab) +
2269 BITS_TO_LONGS(t->nstids) * sizeof(long);
2270 t->tid_tab = t4_alloc_mem(size);
2271 if (!t->tid_tab)
2272 return -ENOMEM;
2273
2274 t->atid_tab = (union aopen_entry *)&t->tid_tab[t->ntids];
2275 t->stid_tab = (struct serv_entry *)&t->atid_tab[natids];
2276 t->stid_bmap = (unsigned long *)&t->stid_tab[t->nstids];
2277 spin_lock_init(&t->stid_lock);
2278 spin_lock_init(&t->atid_lock);
2279
2280 t->stids_in_use = 0;
2281 t->afree = NULL;
2282 t->atids_in_use = 0;
2283 atomic_set(&t->tids_in_use, 0);
2284
2285 /* Setup the free list for atid_tab and clear the stid bitmap. */
2286 if (natids) {
2287 while (--natids)
2288 t->atid_tab[natids - 1].next = &t->atid_tab[natids];
2289 t->afree = t->atid_tab;
2290 }
2291 bitmap_zero(t->stid_bmap, t->nstids);
2292 return 0;
2293}
2294
2295/**
2296 * cxgb4_create_server - create an IP server
2297 * @dev: the device
2298 * @stid: the server TID
2299 * @sip: local IP address to bind server to
2300 * @sport: the server's TCP port
2301 * @queue: queue to direct messages from this server to
2302 *
2303 * Create an IP server for the given port and address.
2304 * Returns <0 on error and one of the %NET_XMIT_* values on success.
2305 */
2306int cxgb4_create_server(const struct net_device *dev, unsigned int stid,
2307 __be32 sip, __be16 sport, unsigned int queue)
2308{
2309 unsigned int chan;
2310 struct sk_buff *skb;
2311 struct adapter *adap;
2312 struct cpl_pass_open_req *req;
2313
2314 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
2315 if (!skb)
2316 return -ENOMEM;
2317
2318 adap = netdev2adap(dev);
2319 req = (struct cpl_pass_open_req *)__skb_put(skb, sizeof(*req));
2320 INIT_TP_WR(req, 0);
2321 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ, stid));
2322 req->local_port = sport;
2323 req->peer_port = htons(0);
2324 req->local_ip = sip;
2325 req->peer_ip = htonl(0);
e46dab4d 2326 chan = rxq_to_chan(&adap->sge, queue);
b8ff05a9
DM
2327 req->opt0 = cpu_to_be64(TX_CHAN(chan));
2328 req->opt1 = cpu_to_be64(CONN_POLICY_ASK |
2329 SYN_RSS_ENABLE | SYN_RSS_QUEUE(queue));
2330 return t4_mgmt_tx(adap, skb);
2331}
2332EXPORT_SYMBOL(cxgb4_create_server);
2333
b8ff05a9
DM
2334/**
2335 * cxgb4_best_mtu - find the entry in the MTU table closest to an MTU
2336 * @mtus: the HW MTU table
2337 * @mtu: the target MTU
2338 * @idx: index of selected entry in the MTU table
2339 *
2340 * Returns the index and the value in the HW MTU table that is closest to
2341 * but does not exceed @mtu, unless @mtu is smaller than any value in the
2342 * table, in which case that smallest available value is selected.
2343 */
2344unsigned int cxgb4_best_mtu(const unsigned short *mtus, unsigned short mtu,
2345 unsigned int *idx)
2346{
2347 unsigned int i = 0;
2348
2349 while (i < NMTUS - 1 && mtus[i + 1] <= mtu)
2350 ++i;
2351 if (idx)
2352 *idx = i;
2353 return mtus[i];
2354}
2355EXPORT_SYMBOL(cxgb4_best_mtu);
2356
2357/**
2358 * cxgb4_port_chan - get the HW channel of a port
2359 * @dev: the net device for the port
2360 *
2361 * Return the HW Tx channel of the given port.
2362 */
2363unsigned int cxgb4_port_chan(const struct net_device *dev)
2364{
2365 return netdev2pinfo(dev)->tx_chan;
2366}
2367EXPORT_SYMBOL(cxgb4_port_chan);
2368
881806bc
VP
2369unsigned int cxgb4_dbfifo_count(const struct net_device *dev, int lpfifo)
2370{
2371 struct adapter *adap = netdev2adap(dev);
2372 u32 v;
2373
2374 v = t4_read_reg(adap, A_SGE_DBFIFO_STATUS);
2375 return lpfifo ? G_LP_COUNT(v) : G_HP_COUNT(v);
2376}
2377EXPORT_SYMBOL(cxgb4_dbfifo_count);
2378
b8ff05a9
DM
2379/**
2380 * cxgb4_port_viid - get the VI id of a port
2381 * @dev: the net device for the port
2382 *
2383 * Return the VI id of the given port.
2384 */
2385unsigned int cxgb4_port_viid(const struct net_device *dev)
2386{
2387 return netdev2pinfo(dev)->viid;
2388}
2389EXPORT_SYMBOL(cxgb4_port_viid);
2390
2391/**
2392 * cxgb4_port_idx - get the index of a port
2393 * @dev: the net device for the port
2394 *
2395 * Return the index of the given port.
2396 */
2397unsigned int cxgb4_port_idx(const struct net_device *dev)
2398{
2399 return netdev2pinfo(dev)->port_id;
2400}
2401EXPORT_SYMBOL(cxgb4_port_idx);
2402
b8ff05a9
DM
2403void cxgb4_get_tcp_stats(struct pci_dev *pdev, struct tp_tcp_stats *v4,
2404 struct tp_tcp_stats *v6)
2405{
2406 struct adapter *adap = pci_get_drvdata(pdev);
2407
2408 spin_lock(&adap->stats_lock);
2409 t4_tp_get_tcp_stats(adap, v4, v6);
2410 spin_unlock(&adap->stats_lock);
2411}
2412EXPORT_SYMBOL(cxgb4_get_tcp_stats);
2413
2414void cxgb4_iscsi_init(struct net_device *dev, unsigned int tag_mask,
2415 const unsigned int *pgsz_order)
2416{
2417 struct adapter *adap = netdev2adap(dev);
2418
2419 t4_write_reg(adap, ULP_RX_ISCSI_TAGMASK, tag_mask);
2420 t4_write_reg(adap, ULP_RX_ISCSI_PSZ, HPZ0(pgsz_order[0]) |
2421 HPZ1(pgsz_order[1]) | HPZ2(pgsz_order[2]) |
2422 HPZ3(pgsz_order[3]));
2423}
2424EXPORT_SYMBOL(cxgb4_iscsi_init);
2425
2426static struct pci_driver cxgb4_driver;
2427
2428static void check_neigh_update(struct neighbour *neigh)
2429{
2430 const struct device *parent;
2431 const struct net_device *netdev = neigh->dev;
2432
2433 if (netdev->priv_flags & IFF_802_1Q_VLAN)
2434 netdev = vlan_dev_real_dev(netdev);
2435 parent = netdev->dev.parent;
2436 if (parent && parent->driver == &cxgb4_driver.driver)
2437 t4_l2t_update(dev_get_drvdata(parent), neigh);
2438}
2439
2440static int netevent_cb(struct notifier_block *nb, unsigned long event,
2441 void *data)
2442{
2443 switch (event) {
2444 case NETEVENT_NEIGH_UPDATE:
2445 check_neigh_update(data);
2446 break;
b8ff05a9
DM
2447 case NETEVENT_REDIRECT:
2448 default:
2449 break;
2450 }
2451 return 0;
2452}
2453
2454static bool netevent_registered;
2455static struct notifier_block cxgb4_netevent_nb = {
2456 .notifier_call = netevent_cb
2457};
2458
881806bc
VP
2459static void notify_rdma_uld(struct adapter *adap, enum cxgb4_control cmd)
2460{
2461 mutex_lock(&uld_mutex);
2462 if (adap->uld_handle[CXGB4_ULD_RDMA])
2463 ulds[CXGB4_ULD_RDMA].control(adap->uld_handle[CXGB4_ULD_RDMA],
2464 cmd);
2465 mutex_unlock(&uld_mutex);
2466}
2467
2468static void process_db_full(struct work_struct *work)
2469{
2470 struct adapter *adap;
2471 static int delay = 1000;
2472 u32 v;
2473
2474 adap = container_of(work, struct adapter, db_full_task);
2475
2476
2477 /* stop LLD queues */
2478
2479 notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL);
2480 do {
2481 set_current_state(TASK_UNINTERRUPTIBLE);
2482 schedule_timeout(usecs_to_jiffies(delay));
2483 v = t4_read_reg(adap, A_SGE_DBFIFO_STATUS);
2484 if (G_LP_COUNT(v) == 0 && G_HP_COUNT(v) == 0)
2485 break;
2486 } while (1);
2487 notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY);
2488
2489
2490 /*
2491 * The more we get db full interrupts, the more we'll delay
2492 * in re-enabling db rings on queues, capped off at 200ms.
2493 */
2494 delay = min(delay << 1, 200000);
2495
2496 /* resume LLD queues */
2497}
2498
2499static void process_db_drop(struct work_struct *work)
2500{
2501 struct adapter *adap;
2502 adap = container_of(work, struct adapter, db_drop_task);
2503
2504
2505 /*
2506 * sync the PIDX values in HW and SW for LLD queues.
2507 */
2508
2509 notify_rdma_uld(adap, CXGB4_CONTROL_DB_DROP);
2510}
2511
2512void t4_db_full(struct adapter *adap)
2513{
2514 schedule_work(&adap->db_full_task);
2515}
2516
2517void t4_db_dropped(struct adapter *adap)
2518{
2519 schedule_work(&adap->db_drop_task);
2520}
2521
b8ff05a9
DM
2522static void uld_attach(struct adapter *adap, unsigned int uld)
2523{
2524 void *handle;
2525 struct cxgb4_lld_info lli;
2526
2527 lli.pdev = adap->pdev;
2528 lli.l2t = adap->l2t;
2529 lli.tids = &adap->tids;
2530 lli.ports = adap->port;
2531 lli.vr = &adap->vres;
2532 lli.mtus = adap->params.mtus;
2533 if (uld == CXGB4_ULD_RDMA) {
2534 lli.rxq_ids = adap->sge.rdma_rxq;
2535 lli.nrxq = adap->sge.rdmaqs;
2536 } else if (uld == CXGB4_ULD_ISCSI) {
2537 lli.rxq_ids = adap->sge.ofld_rxq;
2538 lli.nrxq = adap->sge.ofldqsets;
2539 }
2540 lli.ntxq = adap->sge.ofldqsets;
2541 lli.nchan = adap->params.nports;
2542 lli.nports = adap->params.nports;
2543 lli.wr_cred = adap->params.ofldq_wr_cred;
2544 lli.adapter_type = adap->params.rev;
2545 lli.iscsi_iolen = MAXRXDATA_GET(t4_read_reg(adap, TP_PARA_REG2));
2546 lli.udb_density = 1 << QUEUESPERPAGEPF0_GET(
060e0c75
DM
2547 t4_read_reg(adap, SGE_EGRESS_QUEUES_PER_PAGE_PF) >>
2548 (adap->fn * 4));
b8ff05a9 2549 lli.ucq_density = 1 << QUEUESPERPAGEPF0_GET(
060e0c75
DM
2550 t4_read_reg(adap, SGE_INGRESS_QUEUES_PER_PAGE_PF) >>
2551 (adap->fn * 4));
b8ff05a9
DM
2552 lli.gts_reg = adap->regs + MYPF_REG(SGE_PF_GTS);
2553 lli.db_reg = adap->regs + MYPF_REG(SGE_PF_KDOORBELL);
2554 lli.fw_vers = adap->params.fw_vers;
2555
2556 handle = ulds[uld].add(&lli);
2557 if (IS_ERR(handle)) {
2558 dev_warn(adap->pdev_dev,
2559 "could not attach to the %s driver, error %ld\n",
2560 uld_str[uld], PTR_ERR(handle));
2561 return;
2562 }
2563
2564 adap->uld_handle[uld] = handle;
2565
2566 if (!netevent_registered) {
2567 register_netevent_notifier(&cxgb4_netevent_nb);
2568 netevent_registered = true;
2569 }
e29f5dbc
DM
2570
2571 if (adap->flags & FULL_INIT_DONE)
2572 ulds[uld].state_change(handle, CXGB4_STATE_UP);
b8ff05a9
DM
2573}
2574
2575static void attach_ulds(struct adapter *adap)
2576{
2577 unsigned int i;
2578
2579 mutex_lock(&uld_mutex);
2580 list_add_tail(&adap->list_node, &adapter_list);
2581 for (i = 0; i < CXGB4_ULD_MAX; i++)
2582 if (ulds[i].add)
2583 uld_attach(adap, i);
2584 mutex_unlock(&uld_mutex);
2585}
2586
2587static void detach_ulds(struct adapter *adap)
2588{
2589 unsigned int i;
2590
2591 mutex_lock(&uld_mutex);
2592 list_del(&adap->list_node);
2593 for (i = 0; i < CXGB4_ULD_MAX; i++)
2594 if (adap->uld_handle[i]) {
2595 ulds[i].state_change(adap->uld_handle[i],
2596 CXGB4_STATE_DETACH);
2597 adap->uld_handle[i] = NULL;
2598 }
2599 if (netevent_registered && list_empty(&adapter_list)) {
2600 unregister_netevent_notifier(&cxgb4_netevent_nb);
2601 netevent_registered = false;
2602 }
2603 mutex_unlock(&uld_mutex);
2604}
2605
2606static void notify_ulds(struct adapter *adap, enum cxgb4_state new_state)
2607{
2608 unsigned int i;
2609
2610 mutex_lock(&uld_mutex);
2611 for (i = 0; i < CXGB4_ULD_MAX; i++)
2612 if (adap->uld_handle[i])
2613 ulds[i].state_change(adap->uld_handle[i], new_state);
2614 mutex_unlock(&uld_mutex);
2615}
2616
2617/**
2618 * cxgb4_register_uld - register an upper-layer driver
2619 * @type: the ULD type
2620 * @p: the ULD methods
2621 *
2622 * Registers an upper-layer driver with this driver and notifies the ULD
2623 * about any presently available devices that support its type. Returns
2624 * %-EBUSY if a ULD of the same type is already registered.
2625 */
2626int cxgb4_register_uld(enum cxgb4_uld type, const struct cxgb4_uld_info *p)
2627{
2628 int ret = 0;
2629 struct adapter *adap;
2630
2631 if (type >= CXGB4_ULD_MAX)
2632 return -EINVAL;
2633 mutex_lock(&uld_mutex);
2634 if (ulds[type].add) {
2635 ret = -EBUSY;
2636 goto out;
2637 }
2638 ulds[type] = *p;
2639 list_for_each_entry(adap, &adapter_list, list_node)
2640 uld_attach(adap, type);
2641out: mutex_unlock(&uld_mutex);
2642 return ret;
2643}
2644EXPORT_SYMBOL(cxgb4_register_uld);
2645
2646/**
2647 * cxgb4_unregister_uld - unregister an upper-layer driver
2648 * @type: the ULD type
2649 *
2650 * Unregisters an existing upper-layer driver.
2651 */
2652int cxgb4_unregister_uld(enum cxgb4_uld type)
2653{
2654 struct adapter *adap;
2655
2656 if (type >= CXGB4_ULD_MAX)
2657 return -EINVAL;
2658 mutex_lock(&uld_mutex);
2659 list_for_each_entry(adap, &adapter_list, list_node)
2660 adap->uld_handle[type] = NULL;
2661 ulds[type].add = NULL;
2662 mutex_unlock(&uld_mutex);
2663 return 0;
2664}
2665EXPORT_SYMBOL(cxgb4_unregister_uld);
2666
2667/**
2668 * cxgb_up - enable the adapter
2669 * @adap: adapter being enabled
2670 *
2671 * Called when the first port is enabled, this function performs the
2672 * actions necessary to make an adapter operational, such as completing
2673 * the initialization of HW modules, and enabling interrupts.
2674 *
2675 * Must be called with the rtnl lock held.
2676 */
2677static int cxgb_up(struct adapter *adap)
2678{
aaefae9b 2679 int err;
b8ff05a9 2680
aaefae9b
DM
2681 err = setup_sge_queues(adap);
2682 if (err)
2683 goto out;
2684 err = setup_rss(adap);
2685 if (err)
2686 goto freeq;
b8ff05a9
DM
2687
2688 if (adap->flags & USING_MSIX) {
aaefae9b 2689 name_msix_vecs(adap);
b8ff05a9
DM
2690 err = request_irq(adap->msix_info[0].vec, t4_nondata_intr, 0,
2691 adap->msix_info[0].desc, adap);
2692 if (err)
2693 goto irq_err;
2694
2695 err = request_msix_queue_irqs(adap);
2696 if (err) {
2697 free_irq(adap->msix_info[0].vec, adap);
2698 goto irq_err;
2699 }
2700 } else {
2701 err = request_irq(adap->pdev->irq, t4_intr_handler(adap),
2702 (adap->flags & USING_MSI) ? 0 : IRQF_SHARED,
b1a3c2b6 2703 adap->port[0]->name, adap);
b8ff05a9
DM
2704 if (err)
2705 goto irq_err;
2706 }
2707 enable_rx(adap);
2708 t4_sge_start(adap);
2709 t4_intr_enable(adap);
aaefae9b 2710 adap->flags |= FULL_INIT_DONE;
b8ff05a9
DM
2711 notify_ulds(adap, CXGB4_STATE_UP);
2712 out:
2713 return err;
2714 irq_err:
2715 dev_err(adap->pdev_dev, "request_irq failed, err %d\n", err);
aaefae9b
DM
2716 freeq:
2717 t4_free_sge_resources(adap);
b8ff05a9
DM
2718 goto out;
2719}
2720
2721static void cxgb_down(struct adapter *adapter)
2722{
2723 t4_intr_disable(adapter);
2724 cancel_work_sync(&adapter->tid_release_task);
881806bc
VP
2725 cancel_work_sync(&adapter->db_full_task);
2726 cancel_work_sync(&adapter->db_drop_task);
b8ff05a9 2727 adapter->tid_release_task_busy = false;
204dc3c0 2728 adapter->tid_release_head = NULL;
b8ff05a9
DM
2729
2730 if (adapter->flags & USING_MSIX) {
2731 free_msix_queue_irqs(adapter);
2732 free_irq(adapter->msix_info[0].vec, adapter);
2733 } else
2734 free_irq(adapter->pdev->irq, adapter);
2735 quiesce_rx(adapter);
aaefae9b
DM
2736 t4_sge_stop(adapter);
2737 t4_free_sge_resources(adapter);
2738 adapter->flags &= ~FULL_INIT_DONE;
b8ff05a9
DM
2739}
2740
2741/*
2742 * net_device operations
2743 */
2744static int cxgb_open(struct net_device *dev)
2745{
2746 int err;
2747 struct port_info *pi = netdev_priv(dev);
2748 struct adapter *adapter = pi->adapter;
2749
6a3c869a
DM
2750 netif_carrier_off(dev);
2751
aaefae9b
DM
2752 if (!(adapter->flags & FULL_INIT_DONE)) {
2753 err = cxgb_up(adapter);
2754 if (err < 0)
2755 return err;
2756 }
b8ff05a9 2757
f68707b8
DM
2758 err = link_start(dev);
2759 if (!err)
2760 netif_tx_start_all_queues(dev);
2761 return err;
b8ff05a9
DM
2762}
2763
2764static int cxgb_close(struct net_device *dev)
2765{
b8ff05a9
DM
2766 struct port_info *pi = netdev_priv(dev);
2767 struct adapter *adapter = pi->adapter;
2768
2769 netif_tx_stop_all_queues(dev);
2770 netif_carrier_off(dev);
060e0c75 2771 return t4_enable_vi(adapter, adapter->fn, pi->viid, false, false);
b8ff05a9
DM
2772}
2773
f5152c90
DM
2774static struct rtnl_link_stats64 *cxgb_get_stats(struct net_device *dev,
2775 struct rtnl_link_stats64 *ns)
b8ff05a9
DM
2776{
2777 struct port_stats stats;
2778 struct port_info *p = netdev_priv(dev);
2779 struct adapter *adapter = p->adapter;
b8ff05a9
DM
2780
2781 spin_lock(&adapter->stats_lock);
2782 t4_get_port_stats(adapter, p->tx_chan, &stats);
2783 spin_unlock(&adapter->stats_lock);
2784
2785 ns->tx_bytes = stats.tx_octets;
2786 ns->tx_packets = stats.tx_frames;
2787 ns->rx_bytes = stats.rx_octets;
2788 ns->rx_packets = stats.rx_frames;
2789 ns->multicast = stats.rx_mcast_frames;
2790
2791 /* detailed rx_errors */
2792 ns->rx_length_errors = stats.rx_jabber + stats.rx_too_long +
2793 stats.rx_runt;
2794 ns->rx_over_errors = 0;
2795 ns->rx_crc_errors = stats.rx_fcs_err;
2796 ns->rx_frame_errors = stats.rx_symbol_err;
2797 ns->rx_fifo_errors = stats.rx_ovflow0 + stats.rx_ovflow1 +
2798 stats.rx_ovflow2 + stats.rx_ovflow3 +
2799 stats.rx_trunc0 + stats.rx_trunc1 +
2800 stats.rx_trunc2 + stats.rx_trunc3;
2801 ns->rx_missed_errors = 0;
2802
2803 /* detailed tx_errors */
2804 ns->tx_aborted_errors = 0;
2805 ns->tx_carrier_errors = 0;
2806 ns->tx_fifo_errors = 0;
2807 ns->tx_heartbeat_errors = 0;
2808 ns->tx_window_errors = 0;
2809
2810 ns->tx_errors = stats.tx_error_frames;
2811 ns->rx_errors = stats.rx_symbol_err + stats.rx_fcs_err +
2812 ns->rx_length_errors + stats.rx_len_err + ns->rx_fifo_errors;
2813 return ns;
2814}
2815
2816static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
2817{
060e0c75 2818 unsigned int mbox;
b8ff05a9
DM
2819 int ret = 0, prtad, devad;
2820 struct port_info *pi = netdev_priv(dev);
2821 struct mii_ioctl_data *data = (struct mii_ioctl_data *)&req->ifr_data;
2822
2823 switch (cmd) {
2824 case SIOCGMIIPHY:
2825 if (pi->mdio_addr < 0)
2826 return -EOPNOTSUPP;
2827 data->phy_id = pi->mdio_addr;
2828 break;
2829 case SIOCGMIIREG:
2830 case SIOCSMIIREG:
2831 if (mdio_phy_id_is_c45(data->phy_id)) {
2832 prtad = mdio_phy_id_prtad(data->phy_id);
2833 devad = mdio_phy_id_devad(data->phy_id);
2834 } else if (data->phy_id < 32) {
2835 prtad = data->phy_id;
2836 devad = 0;
2837 data->reg_num &= 0x1f;
2838 } else
2839 return -EINVAL;
2840
060e0c75 2841 mbox = pi->adapter->fn;
b8ff05a9 2842 if (cmd == SIOCGMIIREG)
060e0c75 2843 ret = t4_mdio_rd(pi->adapter, mbox, prtad, devad,
b8ff05a9
DM
2844 data->reg_num, &data->val_out);
2845 else
060e0c75 2846 ret = t4_mdio_wr(pi->adapter, mbox, prtad, devad,
b8ff05a9
DM
2847 data->reg_num, data->val_in);
2848 break;
2849 default:
2850 return -EOPNOTSUPP;
2851 }
2852 return ret;
2853}
2854
2855static void cxgb_set_rxmode(struct net_device *dev)
2856{
2857 /* unfortunately we can't return errors to the stack */
2858 set_rxmode(dev, -1, false);
2859}
2860
2861static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
2862{
2863 int ret;
2864 struct port_info *pi = netdev_priv(dev);
2865
2866 if (new_mtu < 81 || new_mtu > MAX_MTU) /* accommodate SACK */
2867 return -EINVAL;
060e0c75
DM
2868 ret = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, new_mtu, -1,
2869 -1, -1, -1, true);
b8ff05a9
DM
2870 if (!ret)
2871 dev->mtu = new_mtu;
2872 return ret;
2873}
2874
2875static int cxgb_set_mac_addr(struct net_device *dev, void *p)
2876{
2877 int ret;
2878 struct sockaddr *addr = p;
2879 struct port_info *pi = netdev_priv(dev);
2880
2881 if (!is_valid_ether_addr(addr->sa_data))
504f9b5a 2882 return -EADDRNOTAVAIL;
b8ff05a9 2883
060e0c75
DM
2884 ret = t4_change_mac(pi->adapter, pi->adapter->fn, pi->viid,
2885 pi->xact_addr_filt, addr->sa_data, true, true);
b8ff05a9
DM
2886 if (ret < 0)
2887 return ret;
2888
2889 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
2890 pi->xact_addr_filt = ret;
2891 return 0;
2892}
2893
b8ff05a9
DM
2894#ifdef CONFIG_NET_POLL_CONTROLLER
2895static void cxgb_netpoll(struct net_device *dev)
2896{
2897 struct port_info *pi = netdev_priv(dev);
2898 struct adapter *adap = pi->adapter;
2899
2900 if (adap->flags & USING_MSIX) {
2901 int i;
2902 struct sge_eth_rxq *rx = &adap->sge.ethrxq[pi->first_qset];
2903
2904 for (i = pi->nqsets; i; i--, rx++)
2905 t4_sge_intr_msix(0, &rx->rspq);
2906 } else
2907 t4_intr_handler(adap)(0, adap);
2908}
2909#endif
2910
2911static const struct net_device_ops cxgb4_netdev_ops = {
2912 .ndo_open = cxgb_open,
2913 .ndo_stop = cxgb_close,
2914 .ndo_start_xmit = t4_eth_xmit,
9be793bf 2915 .ndo_get_stats64 = cxgb_get_stats,
b8ff05a9
DM
2916 .ndo_set_rx_mode = cxgb_set_rxmode,
2917 .ndo_set_mac_address = cxgb_set_mac_addr,
2ed28baa 2918 .ndo_set_features = cxgb_set_features,
b8ff05a9
DM
2919 .ndo_validate_addr = eth_validate_addr,
2920 .ndo_do_ioctl = cxgb_ioctl,
2921 .ndo_change_mtu = cxgb_change_mtu,
b8ff05a9
DM
2922#ifdef CONFIG_NET_POLL_CONTROLLER
2923 .ndo_poll_controller = cxgb_netpoll,
2924#endif
2925};
2926
2927void t4_fatal_err(struct adapter *adap)
2928{
2929 t4_set_reg_field(adap, SGE_CONTROL, GLOBALENABLE, 0);
2930 t4_intr_disable(adap);
2931 dev_alert(adap->pdev_dev, "encountered fatal error, adapter stopped\n");
2932}
2933
2934static void setup_memwin(struct adapter *adap)
2935{
2936 u32 bar0;
2937
2938 bar0 = pci_resource_start(adap->pdev, 0); /* truncation intentional */
2939 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 0),
2940 (bar0 + MEMWIN0_BASE) | BIR(0) |
2941 WINDOW(ilog2(MEMWIN0_APERTURE) - 10));
2942 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 1),
2943 (bar0 + MEMWIN1_BASE) | BIR(0) |
2944 WINDOW(ilog2(MEMWIN1_APERTURE) - 10));
2945 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 2),
2946 (bar0 + MEMWIN2_BASE) | BIR(0) |
2947 WINDOW(ilog2(MEMWIN2_APERTURE) - 10));
1ae970e0
DM
2948 if (adap->vres.ocq.size) {
2949 unsigned int start, sz_kb;
2950
2951 start = pci_resource_start(adap->pdev, 2) +
2952 OCQ_WIN_OFFSET(adap->pdev, &adap->vres);
2953 sz_kb = roundup_pow_of_two(adap->vres.ocq.size) >> 10;
2954 t4_write_reg(adap,
2955 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 3),
2956 start | BIR(1) | WINDOW(ilog2(sz_kb)));
2957 t4_write_reg(adap,
2958 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, 3),
2959 adap->vres.ocq.start);
2960 t4_read_reg(adap,
2961 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, 3));
2962 }
b8ff05a9
DM
2963}
2964
02b5fb8e
DM
2965static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c)
2966{
2967 u32 v;
2968 int ret;
2969
2970 /* get device capabilities */
2971 memset(c, 0, sizeof(*c));
2972 c->op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
2973 FW_CMD_REQUEST | FW_CMD_READ);
2974 c->retval_len16 = htonl(FW_LEN16(*c));
060e0c75 2975 ret = t4_wr_mbox(adap, adap->fn, c, sizeof(*c), c);
02b5fb8e
DM
2976 if (ret < 0)
2977 return ret;
2978
2979 /* select capabilities we'll be using */
2980 if (c->niccaps & htons(FW_CAPS_CONFIG_NIC_VM)) {
2981 if (!vf_acls)
2982 c->niccaps ^= htons(FW_CAPS_CONFIG_NIC_VM);
2983 else
2984 c->niccaps = htons(FW_CAPS_CONFIG_NIC_VM);
2985 } else if (vf_acls) {
2986 dev_err(adap->pdev_dev, "virtualization ACLs not supported");
2987 return ret;
2988 }
2989 c->op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
2990 FW_CMD_REQUEST | FW_CMD_WRITE);
060e0c75 2991 ret = t4_wr_mbox(adap, adap->fn, c, sizeof(*c), NULL);
02b5fb8e
DM
2992 if (ret < 0)
2993 return ret;
2994
060e0c75 2995 ret = t4_config_glbl_rss(adap, adap->fn,
02b5fb8e
DM
2996 FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL,
2997 FW_RSS_GLB_CONFIG_CMD_TNLMAPEN |
2998 FW_RSS_GLB_CONFIG_CMD_TNLALLLKP);
2999 if (ret < 0)
3000 return ret;
3001
060e0c75
DM
3002 ret = t4_cfg_pfvf(adap, adap->fn, adap->fn, 0, MAX_EGRQ, 64, MAX_INGQ,
3003 0, 0, 4, 0xf, 0xf, 16, FW_CMD_CAP_PF, FW_CMD_CAP_PF);
02b5fb8e
DM
3004 if (ret < 0)
3005 return ret;
3006
3007 t4_sge_init(adap);
3008
02b5fb8e
DM
3009 /* tweak some settings */
3010 t4_write_reg(adap, TP_SHIFT_CNT, 0x64f8849);
3011 t4_write_reg(adap, ULP_RX_TDDP_PSZ, HPZ0(PAGE_SHIFT - 12));
3012 t4_write_reg(adap, TP_PIO_ADDR, TP_INGRESS_CONFIG);
3013 v = t4_read_reg(adap, TP_PIO_DATA);
3014 t4_write_reg(adap, TP_PIO_DATA, v & ~CSUM_HAS_PSEUDO_HDR);
060e0c75
DM
3015
3016 /* get basic stuff going */
3017 return t4_early_init(adap, adap->fn);
02b5fb8e
DM
3018}
3019
b8ff05a9
DM
3020/*
3021 * Max # of ATIDs. The absolute HW max is 16K but we keep it lower.
3022 */
3023#define MAX_ATIDS 8192U
3024
3025/*
3026 * Phase 0 of initialization: contact FW, obtain config, perform basic init.
3027 */
3028static int adap_init0(struct adapter *adap)
3029{
3030 int ret;
3031 u32 v, port_vec;
3032 enum dev_state state;
3033 u32 params[7], val[7];
3034 struct fw_caps_config_cmd c;
3035
3036 ret = t4_check_fw_version(adap);
3037 if (ret == -EINVAL || ret > 0) {
3038 if (upgrade_fw(adap) >= 0) /* recache FW version */
3039 ret = t4_check_fw_version(adap);
3040 }
3041 if (ret < 0)
3042 return ret;
3043
3044 /* contact FW, request master */
060e0c75 3045 ret = t4_fw_hello(adap, adap->fn, adap->fn, MASTER_MUST, &state);
b8ff05a9
DM
3046 if (ret < 0) {
3047 dev_err(adap->pdev_dev, "could not connect to FW, error %d\n",
3048 ret);
3049 return ret;
3050 }
3051
3052 /* reset device */
060e0c75 3053 ret = t4_fw_reset(adap, adap->fn, PIORSTMODE | PIORST);
b8ff05a9
DM
3054 if (ret < 0)
3055 goto bye;
3056
b8ff05a9
DM
3057 for (v = 0; v < SGE_NTIMERS - 1; v++)
3058 adap->sge.timer_val[v] = min(intr_holdoff[v], MAX_SGE_TIMERVAL);
3059 adap->sge.timer_val[SGE_NTIMERS - 1] = MAX_SGE_TIMERVAL;
3060 adap->sge.counter_val[0] = 1;
3061 for (v = 1; v < SGE_NCOUNTERS; v++)
3062 adap->sge.counter_val[v] = min(intr_cnt[v - 1],
3063 THRESHOLD_3_MASK);
b8ff05a9
DM
3064#define FW_PARAM_DEV(param) \
3065 (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
3066 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
3067
a0881cab 3068 params[0] = FW_PARAM_DEV(CCLK);
060e0c75 3069 ret = t4_query_params(adap, adap->fn, adap->fn, 0, 1, params, val);
a0881cab
DM
3070 if (ret < 0)
3071 goto bye;
3072 adap->params.vpd.cclk = val[0];
3073
3074 ret = adap_init1(adap, &c);
3075 if (ret < 0)
3076 goto bye;
3077
b8ff05a9
DM
3078#define FW_PARAM_PFVF(param) \
3079 (FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
060e0c75
DM
3080 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param) | \
3081 FW_PARAMS_PARAM_Y(adap->fn))
b8ff05a9
DM
3082
3083 params[0] = FW_PARAM_DEV(PORTVEC);
3084 params[1] = FW_PARAM_PFVF(L2T_START);
3085 params[2] = FW_PARAM_PFVF(L2T_END);
3086 params[3] = FW_PARAM_PFVF(FILTER_START);
3087 params[4] = FW_PARAM_PFVF(FILTER_END);
e46dab4d
DM
3088 params[5] = FW_PARAM_PFVF(IQFLINT_START);
3089 params[6] = FW_PARAM_PFVF(EQ_START);
3090 ret = t4_query_params(adap, adap->fn, adap->fn, 0, 7, params, val);
b8ff05a9
DM
3091 if (ret < 0)
3092 goto bye;
3093 port_vec = val[0];
3094 adap->tids.ftid_base = val[3];
3095 adap->tids.nftids = val[4] - val[3] + 1;
e46dab4d
DM
3096 adap->sge.ingr_start = val[5];
3097 adap->sge.egr_start = val[6];
b8ff05a9
DM
3098
3099 if (c.ofldcaps) {
3100 /* query offload-related parameters */
3101 params[0] = FW_PARAM_DEV(NTID);
3102 params[1] = FW_PARAM_PFVF(SERVER_START);
3103 params[2] = FW_PARAM_PFVF(SERVER_END);
3104 params[3] = FW_PARAM_PFVF(TDDP_START);
3105 params[4] = FW_PARAM_PFVF(TDDP_END);
3106 params[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
060e0c75
DM
3107 ret = t4_query_params(adap, adap->fn, adap->fn, 0, 6, params,
3108 val);
b8ff05a9
DM
3109 if (ret < 0)
3110 goto bye;
3111 adap->tids.ntids = val[0];
3112 adap->tids.natids = min(adap->tids.ntids / 2, MAX_ATIDS);
3113 adap->tids.stid_base = val[1];
3114 adap->tids.nstids = val[2] - val[1] + 1;
3115 adap->vres.ddp.start = val[3];
3116 adap->vres.ddp.size = val[4] - val[3] + 1;
3117 adap->params.ofldq_wr_cred = val[5];
3118 adap->params.offload = 1;
3119 }
3120 if (c.rdmacaps) {
3121 params[0] = FW_PARAM_PFVF(STAG_START);
3122 params[1] = FW_PARAM_PFVF(STAG_END);
3123 params[2] = FW_PARAM_PFVF(RQ_START);
3124 params[3] = FW_PARAM_PFVF(RQ_END);
3125 params[4] = FW_PARAM_PFVF(PBL_START);
3126 params[5] = FW_PARAM_PFVF(PBL_END);
060e0c75
DM
3127 ret = t4_query_params(adap, adap->fn, adap->fn, 0, 6, params,
3128 val);
b8ff05a9
DM
3129 if (ret < 0)
3130 goto bye;
3131 adap->vres.stag.start = val[0];
3132 adap->vres.stag.size = val[1] - val[0] + 1;
3133 adap->vres.rq.start = val[2];
3134 adap->vres.rq.size = val[3] - val[2] + 1;
3135 adap->vres.pbl.start = val[4];
3136 adap->vres.pbl.size = val[5] - val[4] + 1;
a0881cab
DM
3137
3138 params[0] = FW_PARAM_PFVF(SQRQ_START);
3139 params[1] = FW_PARAM_PFVF(SQRQ_END);
3140 params[2] = FW_PARAM_PFVF(CQ_START);
3141 params[3] = FW_PARAM_PFVF(CQ_END);
1ae970e0
DM
3142 params[4] = FW_PARAM_PFVF(OCQ_START);
3143 params[5] = FW_PARAM_PFVF(OCQ_END);
060e0c75
DM
3144 ret = t4_query_params(adap, adap->fn, adap->fn, 0, 6, params,
3145 val);
a0881cab
DM
3146 if (ret < 0)
3147 goto bye;
3148 adap->vres.qp.start = val[0];
3149 adap->vres.qp.size = val[1] - val[0] + 1;
3150 adap->vres.cq.start = val[2];
3151 adap->vres.cq.size = val[3] - val[2] + 1;
1ae970e0
DM
3152 adap->vres.ocq.start = val[4];
3153 adap->vres.ocq.size = val[5] - val[4] + 1;
b8ff05a9
DM
3154 }
3155 if (c.iscsicaps) {
3156 params[0] = FW_PARAM_PFVF(ISCSI_START);
3157 params[1] = FW_PARAM_PFVF(ISCSI_END);
060e0c75
DM
3158 ret = t4_query_params(adap, adap->fn, adap->fn, 0, 2, params,
3159 val);
b8ff05a9
DM
3160 if (ret < 0)
3161 goto bye;
3162 adap->vres.iscsi.start = val[0];
3163 adap->vres.iscsi.size = val[1] - val[0] + 1;
3164 }
3165#undef FW_PARAM_PFVF
3166#undef FW_PARAM_DEV
3167
3168 adap->params.nports = hweight32(port_vec);
3169 adap->params.portvec = port_vec;
3170 adap->flags |= FW_OK;
3171
3172 /* These are finalized by FW initialization, load their values now */
3173 v = t4_read_reg(adap, TP_TIMER_RESOLUTION);
3174 adap->params.tp.tre = TIMERRESOLUTION_GET(v);
3175 t4_read_mtu_tbl(adap, adap->params.mtus, NULL);
3176 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
3177 adap->params.b_wnd);
7ee9ff94
CL
3178
3179#ifdef CONFIG_PCI_IOV
3180 /*
3181 * Provision resource limits for Virtual Functions. We currently
3182 * grant them all the same static resource limits except for the Port
3183 * Access Rights Mask which we're assigning based on the PF. All of
3184 * the static provisioning stuff for both the PF and VF really needs
3185 * to be managed in a persistent manner for each device which the
3186 * firmware controls.
3187 */
3188 {
3189 int pf, vf;
3190
3191 for (pf = 0; pf < ARRAY_SIZE(num_vf); pf++) {
3192 if (num_vf[pf] <= 0)
3193 continue;
3194
3195 /* VF numbering starts at 1! */
3196 for (vf = 1; vf <= num_vf[pf]; vf++) {
060e0c75 3197 ret = t4_cfg_pfvf(adap, adap->fn, pf, vf,
7ee9ff94
CL
3198 VFRES_NEQ, VFRES_NETHCTRL,
3199 VFRES_NIQFLINT, VFRES_NIQ,
3200 VFRES_TC, VFRES_NVI,
3201 FW_PFVF_CMD_CMASK_MASK,
3202 pfvfres_pmask(adap, pf, vf),
3203 VFRES_NEXACTF,
3204 VFRES_R_CAPS, VFRES_WX_CAPS);
3205 if (ret < 0)
3206 dev_warn(adap->pdev_dev, "failed to "
3207 "provision pf/vf=%d/%d; "
3208 "err=%d\n", pf, vf, ret);
3209 }
3210 }
3211 }
3212#endif
3213
1ae970e0 3214 setup_memwin(adap);
b8ff05a9
DM
3215 return 0;
3216
3217 /*
3218 * If a command timed out or failed with EIO FW does not operate within
3219 * its spec or something catastrophic happened to HW/FW, stop issuing
3220 * commands.
3221 */
3222bye: if (ret != -ETIMEDOUT && ret != -EIO)
060e0c75 3223 t4_fw_bye(adap, adap->fn);
b8ff05a9
DM
3224 return ret;
3225}
3226
204dc3c0
DM
3227/* EEH callbacks */
3228
3229static pci_ers_result_t eeh_err_detected(struct pci_dev *pdev,
3230 pci_channel_state_t state)
3231{
3232 int i;
3233 struct adapter *adap = pci_get_drvdata(pdev);
3234
3235 if (!adap)
3236 goto out;
3237
3238 rtnl_lock();
3239 adap->flags &= ~FW_OK;
3240 notify_ulds(adap, CXGB4_STATE_START_RECOVERY);
3241 for_each_port(adap, i) {
3242 struct net_device *dev = adap->port[i];
3243
3244 netif_device_detach(dev);
3245 netif_carrier_off(dev);
3246 }
3247 if (adap->flags & FULL_INIT_DONE)
3248 cxgb_down(adap);
3249 rtnl_unlock();
3250 pci_disable_device(pdev);
3251out: return state == pci_channel_io_perm_failure ?
3252 PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET;
3253}
3254
3255static pci_ers_result_t eeh_slot_reset(struct pci_dev *pdev)
3256{
3257 int i, ret;
3258 struct fw_caps_config_cmd c;
3259 struct adapter *adap = pci_get_drvdata(pdev);
3260
3261 if (!adap) {
3262 pci_restore_state(pdev);
3263 pci_save_state(pdev);
3264 return PCI_ERS_RESULT_RECOVERED;
3265 }
3266
3267 if (pci_enable_device(pdev)) {
3268 dev_err(&pdev->dev, "cannot reenable PCI device after reset\n");
3269 return PCI_ERS_RESULT_DISCONNECT;
3270 }
3271
3272 pci_set_master(pdev);
3273 pci_restore_state(pdev);
3274 pci_save_state(pdev);
3275 pci_cleanup_aer_uncorrect_error_status(pdev);
3276
3277 if (t4_wait_dev_ready(adap) < 0)
3278 return PCI_ERS_RESULT_DISCONNECT;
060e0c75 3279 if (t4_fw_hello(adap, adap->fn, adap->fn, MASTER_MUST, NULL))
204dc3c0
DM
3280 return PCI_ERS_RESULT_DISCONNECT;
3281 adap->flags |= FW_OK;
3282 if (adap_init1(adap, &c))
3283 return PCI_ERS_RESULT_DISCONNECT;
3284
3285 for_each_port(adap, i) {
3286 struct port_info *p = adap2pinfo(adap, i);
3287
060e0c75
DM
3288 ret = t4_alloc_vi(adap, adap->fn, p->tx_chan, adap->fn, 0, 1,
3289 NULL, NULL);
204dc3c0
DM
3290 if (ret < 0)
3291 return PCI_ERS_RESULT_DISCONNECT;
3292 p->viid = ret;
3293 p->xact_addr_filt = -1;
3294 }
3295
3296 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
3297 adap->params.b_wnd);
1ae970e0 3298 setup_memwin(adap);
204dc3c0
DM
3299 if (cxgb_up(adap))
3300 return PCI_ERS_RESULT_DISCONNECT;
3301 return PCI_ERS_RESULT_RECOVERED;
3302}
3303
3304static void eeh_resume(struct pci_dev *pdev)
3305{
3306 int i;
3307 struct adapter *adap = pci_get_drvdata(pdev);
3308
3309 if (!adap)
3310 return;
3311
3312 rtnl_lock();
3313 for_each_port(adap, i) {
3314 struct net_device *dev = adap->port[i];
3315
3316 if (netif_running(dev)) {
3317 link_start(dev);
3318 cxgb_set_rxmode(dev);
3319 }
3320 netif_device_attach(dev);
3321 }
3322 rtnl_unlock();
3323}
3324
3325static struct pci_error_handlers cxgb4_eeh = {
3326 .error_detected = eeh_err_detected,
3327 .slot_reset = eeh_slot_reset,
3328 .resume = eeh_resume,
3329};
3330
b8ff05a9
DM
3331static inline bool is_10g_port(const struct link_config *lc)
3332{
3333 return (lc->supported & FW_PORT_CAP_SPEED_10G) != 0;
3334}
3335
3336static inline void init_rspq(struct sge_rspq *q, u8 timer_idx, u8 pkt_cnt_idx,
3337 unsigned int size, unsigned int iqe_size)
3338{
3339 q->intr_params = QINTR_TIMER_IDX(timer_idx) |
3340 (pkt_cnt_idx < SGE_NCOUNTERS ? QINTR_CNT_EN : 0);
3341 q->pktcnt_idx = pkt_cnt_idx < SGE_NCOUNTERS ? pkt_cnt_idx : 0;
3342 q->iqe_len = iqe_size;
3343 q->size = size;
3344}
3345
3346/*
3347 * Perform default configuration of DMA queues depending on the number and type
3348 * of ports we found and the number of available CPUs. Most settings can be
3349 * modified by the admin prior to actual use.
3350 */
3351static void __devinit cfg_queues(struct adapter *adap)
3352{
3353 struct sge *s = &adap->sge;
3354 int i, q10g = 0, n10g = 0, qidx = 0;
3355
3356 for_each_port(adap, i)
3357 n10g += is_10g_port(&adap2pinfo(adap, i)->link_cfg);
3358
3359 /*
3360 * We default to 1 queue per non-10G port and up to # of cores queues
3361 * per 10G port.
3362 */
3363 if (n10g)
3364 q10g = (MAX_ETH_QSETS - (adap->params.nports - n10g)) / n10g;
3365 if (q10g > num_online_cpus())
3366 q10g = num_online_cpus();
3367
3368 for_each_port(adap, i) {
3369 struct port_info *pi = adap2pinfo(adap, i);
3370
3371 pi->first_qset = qidx;
3372 pi->nqsets = is_10g_port(&pi->link_cfg) ? q10g : 1;
3373 qidx += pi->nqsets;
3374 }
3375
3376 s->ethqsets = qidx;
3377 s->max_ethqsets = qidx; /* MSI-X may lower it later */
3378
3379 if (is_offload(adap)) {
3380 /*
3381 * For offload we use 1 queue/channel if all ports are up to 1G,
3382 * otherwise we divide all available queues amongst the channels
3383 * capped by the number of available cores.
3384 */
3385 if (n10g) {
3386 i = min_t(int, ARRAY_SIZE(s->ofldrxq),
3387 num_online_cpus());
3388 s->ofldqsets = roundup(i, adap->params.nports);
3389 } else
3390 s->ofldqsets = adap->params.nports;
3391 /* For RDMA one Rx queue per channel suffices */
3392 s->rdmaqs = adap->params.nports;
3393 }
3394
3395 for (i = 0; i < ARRAY_SIZE(s->ethrxq); i++) {
3396 struct sge_eth_rxq *r = &s->ethrxq[i];
3397
3398 init_rspq(&r->rspq, 0, 0, 1024, 64);
3399 r->fl.size = 72;
3400 }
3401
3402 for (i = 0; i < ARRAY_SIZE(s->ethtxq); i++)
3403 s->ethtxq[i].q.size = 1024;
3404
3405 for (i = 0; i < ARRAY_SIZE(s->ctrlq); i++)
3406 s->ctrlq[i].q.size = 512;
3407
3408 for (i = 0; i < ARRAY_SIZE(s->ofldtxq); i++)
3409 s->ofldtxq[i].q.size = 1024;
3410
3411 for (i = 0; i < ARRAY_SIZE(s->ofldrxq); i++) {
3412 struct sge_ofld_rxq *r = &s->ofldrxq[i];
3413
3414 init_rspq(&r->rspq, 0, 0, 1024, 64);
3415 r->rspq.uld = CXGB4_ULD_ISCSI;
3416 r->fl.size = 72;
3417 }
3418
3419 for (i = 0; i < ARRAY_SIZE(s->rdmarxq); i++) {
3420 struct sge_ofld_rxq *r = &s->rdmarxq[i];
3421
3422 init_rspq(&r->rspq, 0, 0, 511, 64);
3423 r->rspq.uld = CXGB4_ULD_RDMA;
3424 r->fl.size = 72;
3425 }
3426
3427 init_rspq(&s->fw_evtq, 6, 0, 512, 64);
3428 init_rspq(&s->intrq, 6, 0, 2 * MAX_INGQ, 64);
3429}
3430
3431/*
3432 * Reduce the number of Ethernet queues across all ports to at most n.
3433 * n provides at least one queue per port.
3434 */
3435static void __devinit reduce_ethqs(struct adapter *adap, int n)
3436{
3437 int i;
3438 struct port_info *pi;
3439
3440 while (n < adap->sge.ethqsets)
3441 for_each_port(adap, i) {
3442 pi = adap2pinfo(adap, i);
3443 if (pi->nqsets > 1) {
3444 pi->nqsets--;
3445 adap->sge.ethqsets--;
3446 if (adap->sge.ethqsets <= n)
3447 break;
3448 }
3449 }
3450
3451 n = 0;
3452 for_each_port(adap, i) {
3453 pi = adap2pinfo(adap, i);
3454 pi->first_qset = n;
3455 n += pi->nqsets;
3456 }
3457}
3458
3459/* 2 MSI-X vectors needed for the FW queue and non-data interrupts */
3460#define EXTRA_VECS 2
3461
3462static int __devinit enable_msix(struct adapter *adap)
3463{
3464 int ofld_need = 0;
3465 int i, err, want, need;
3466 struct sge *s = &adap->sge;
3467 unsigned int nchan = adap->params.nports;
3468 struct msix_entry entries[MAX_INGQ + 1];
3469
3470 for (i = 0; i < ARRAY_SIZE(entries); ++i)
3471 entries[i].entry = i;
3472
3473 want = s->max_ethqsets + EXTRA_VECS;
3474 if (is_offload(adap)) {
3475 want += s->rdmaqs + s->ofldqsets;
3476 /* need nchan for each possible ULD */
3477 ofld_need = 2 * nchan;
3478 }
3479 need = adap->params.nports + EXTRA_VECS + ofld_need;
3480
3481 while ((err = pci_enable_msix(adap->pdev, entries, want)) >= need)
3482 want = err;
3483
3484 if (!err) {
3485 /*
3486 * Distribute available vectors to the various queue groups.
3487 * Every group gets its minimum requirement and NIC gets top
3488 * priority for leftovers.
3489 */
3490 i = want - EXTRA_VECS - ofld_need;
3491 if (i < s->max_ethqsets) {
3492 s->max_ethqsets = i;
3493 if (i < s->ethqsets)
3494 reduce_ethqs(adap, i);
3495 }
3496 if (is_offload(adap)) {
3497 i = want - EXTRA_VECS - s->max_ethqsets;
3498 i -= ofld_need - nchan;
3499 s->ofldqsets = (i / nchan) * nchan; /* round down */
3500 }
3501 for (i = 0; i < want; ++i)
3502 adap->msix_info[i].vec = entries[i].vector;
3503 } else if (err > 0)
3504 dev_info(adap->pdev_dev,
3505 "only %d MSI-X vectors left, not using MSI-X\n", err);
3506 return err;
3507}
3508
3509#undef EXTRA_VECS
3510
671b0060
DM
3511static int __devinit init_rss(struct adapter *adap)
3512{
3513 unsigned int i, j;
3514
3515 for_each_port(adap, i) {
3516 struct port_info *pi = adap2pinfo(adap, i);
3517
3518 pi->rss = kcalloc(pi->rss_size, sizeof(u16), GFP_KERNEL);
3519 if (!pi->rss)
3520 return -ENOMEM;
3521 for (j = 0; j < pi->rss_size; j++)
278bc429 3522 pi->rss[j] = ethtool_rxfh_indir_default(j, pi->nqsets);
671b0060
DM
3523 }
3524 return 0;
3525}
3526
118969ed 3527static void __devinit print_port_info(const struct net_device *dev)
b8ff05a9
DM
3528{
3529 static const char *base[] = {
a0881cab 3530 "R XFI", "R XAUI", "T SGMII", "T XFI", "T XAUI", "KX4", "CX4",
7d5e77aa 3531 "KX", "KR", "R SFP+", "KR/KX", "KR/KX/KX4"
b8ff05a9
DM
3532 };
3533
b8ff05a9 3534 char buf[80];
118969ed 3535 char *bufp = buf;
f1a051b9 3536 const char *spd = "";
118969ed
DM
3537 const struct port_info *pi = netdev_priv(dev);
3538 const struct adapter *adap = pi->adapter;
f1a051b9
DM
3539
3540 if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_2_5GB)
3541 spd = " 2.5 GT/s";
3542 else if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_5_0GB)
3543 spd = " 5 GT/s";
b8ff05a9 3544
118969ed
DM
3545 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_100M)
3546 bufp += sprintf(bufp, "100/");
3547 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_1G)
3548 bufp += sprintf(bufp, "1000/");
3549 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_10G)
3550 bufp += sprintf(bufp, "10G/");
3551 if (bufp != buf)
3552 --bufp;
3553 sprintf(bufp, "BASE-%s", base[pi->port_type]);
3554
3555 netdev_info(dev, "Chelsio %s rev %d %s %sNIC PCIe x%d%s%s\n",
3556 adap->params.vpd.id, adap->params.rev, buf,
3557 is_offload(adap) ? "R" : "", adap->params.pci.width, spd,
3558 (adap->flags & USING_MSIX) ? " MSI-X" :
3559 (adap->flags & USING_MSI) ? " MSI" : "");
3560 netdev_info(dev, "S/N: %s, E/C: %s\n",
3561 adap->params.vpd.sn, adap->params.vpd.ec);
b8ff05a9
DM
3562}
3563
ef306b50
DM
3564static void __devinit enable_pcie_relaxed_ordering(struct pci_dev *dev)
3565{
3566 u16 v;
3567 int pos;
3568
3569 pos = pci_pcie_cap(dev);
3570 if (pos > 0) {
3571 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, &v);
3572 v |= PCI_EXP_DEVCTL_RELAX_EN;
3573 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, v);
3574 }
3575}
3576
06546391
DM
3577/*
3578 * Free the following resources:
3579 * - memory used for tables
3580 * - MSI/MSI-X
3581 * - net devices
3582 * - resources FW is holding for us
3583 */
3584static void free_some_resources(struct adapter *adapter)
3585{
3586 unsigned int i;
3587
3588 t4_free_mem(adapter->l2t);
3589 t4_free_mem(adapter->tids.tid_tab);
3590 disable_msi(adapter);
3591
3592 for_each_port(adapter, i)
671b0060
DM
3593 if (adapter->port[i]) {
3594 kfree(adap2pinfo(adapter, i)->rss);
06546391 3595 free_netdev(adapter->port[i]);
671b0060 3596 }
06546391 3597 if (adapter->flags & FW_OK)
060e0c75 3598 t4_fw_bye(adapter, adapter->fn);
06546391
DM
3599}
3600
2ed28baa 3601#define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
35d35682 3602#define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \
b8ff05a9
DM
3603 NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
3604
3605static int __devinit init_one(struct pci_dev *pdev,
3606 const struct pci_device_id *ent)
3607{
3608 int func, i, err;
3609 struct port_info *pi;
c8f44aff 3610 bool highdma = false;
b8ff05a9
DM
3611 struct adapter *adapter = NULL;
3612
3613 printk_once(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
3614
3615 err = pci_request_regions(pdev, KBUILD_MODNAME);
3616 if (err) {
3617 /* Just info, some other driver may have claimed the device. */
3618 dev_info(&pdev->dev, "cannot obtain PCI resources\n");
3619 return err;
3620 }
3621
060e0c75 3622 /* We control everything through one PF */
b8ff05a9 3623 func = PCI_FUNC(pdev->devfn);
060e0c75 3624 if (func != ent->driver_data) {
204dc3c0 3625 pci_save_state(pdev); /* to restore SR-IOV later */
b8ff05a9 3626 goto sriov;
204dc3c0 3627 }
b8ff05a9
DM
3628
3629 err = pci_enable_device(pdev);
3630 if (err) {
3631 dev_err(&pdev->dev, "cannot enable PCI device\n");
3632 goto out_release_regions;
3633 }
3634
3635 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
c8f44aff 3636 highdma = true;
b8ff05a9
DM
3637 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
3638 if (err) {
3639 dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
3640 "coherent allocations\n");
3641 goto out_disable_device;
3642 }
3643 } else {
3644 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
3645 if (err) {
3646 dev_err(&pdev->dev, "no usable DMA configuration\n");
3647 goto out_disable_device;
3648 }
3649 }
3650
3651 pci_enable_pcie_error_reporting(pdev);
ef306b50 3652 enable_pcie_relaxed_ordering(pdev);
b8ff05a9
DM
3653 pci_set_master(pdev);
3654 pci_save_state(pdev);
3655
3656 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
3657 if (!adapter) {
3658 err = -ENOMEM;
3659 goto out_disable_device;
3660 }
3661
3662 adapter->regs = pci_ioremap_bar(pdev, 0);
3663 if (!adapter->regs) {
3664 dev_err(&pdev->dev, "cannot map device registers\n");
3665 err = -ENOMEM;
3666 goto out_free_adapter;
3667 }
3668
3669 adapter->pdev = pdev;
3670 adapter->pdev_dev = &pdev->dev;
060e0c75 3671 adapter->fn = func;
b8ff05a9
DM
3672 adapter->msg_enable = dflt_msg_enable;
3673 memset(adapter->chan_map, 0xff, sizeof(adapter->chan_map));
3674
3675 spin_lock_init(&adapter->stats_lock);
3676 spin_lock_init(&adapter->tid_release_lock);
3677
3678 INIT_WORK(&adapter->tid_release_task, process_tid_release_list);
881806bc
VP
3679 INIT_WORK(&adapter->db_full_task, process_db_full);
3680 INIT_WORK(&adapter->db_drop_task, process_db_drop);
b8ff05a9
DM
3681
3682 err = t4_prep_adapter(adapter);
3683 if (err)
3684 goto out_unmap_bar;
3685 err = adap_init0(adapter);
3686 if (err)
3687 goto out_unmap_bar;
3688
3689 for_each_port(adapter, i) {
3690 struct net_device *netdev;
3691
3692 netdev = alloc_etherdev_mq(sizeof(struct port_info),
3693 MAX_ETH_QSETS);
3694 if (!netdev) {
3695 err = -ENOMEM;
3696 goto out_free_dev;
3697 }
3698
3699 SET_NETDEV_DEV(netdev, &pdev->dev);
3700
3701 adapter->port[i] = netdev;
3702 pi = netdev_priv(netdev);
3703 pi->adapter = adapter;
3704 pi->xact_addr_filt = -1;
b8ff05a9 3705 pi->port_id = i;
b8ff05a9
DM
3706 netdev->irq = pdev->irq;
3707
2ed28baa
MM
3708 netdev->hw_features = NETIF_F_SG | TSO_FLAGS |
3709 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
3710 NETIF_F_RXCSUM | NETIF_F_RXHASH |
3711 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
c8f44aff
MM
3712 if (highdma)
3713 netdev->hw_features |= NETIF_F_HIGHDMA;
3714 netdev->features |= netdev->hw_features;
b8ff05a9
DM
3715 netdev->vlan_features = netdev->features & VLAN_FEAT;
3716
01789349
JP
3717 netdev->priv_flags |= IFF_UNICAST_FLT;
3718
b8ff05a9
DM
3719 netdev->netdev_ops = &cxgb4_netdev_ops;
3720 SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops);
3721 }
3722
3723 pci_set_drvdata(pdev, adapter);
3724
3725 if (adapter->flags & FW_OK) {
060e0c75 3726 err = t4_port_init(adapter, func, func, 0);
b8ff05a9
DM
3727 if (err)
3728 goto out_free_dev;
3729 }
3730
3731 /*
3732 * Configure queues and allocate tables now, they can be needed as
3733 * soon as the first register_netdev completes.
3734 */
3735 cfg_queues(adapter);
3736
3737 adapter->l2t = t4_init_l2t();
3738 if (!adapter->l2t) {
3739 /* We tolerate a lack of L2T, giving up some functionality */
3740 dev_warn(&pdev->dev, "could not allocate L2T, continuing\n");
3741 adapter->params.offload = 0;
3742 }
3743
3744 if (is_offload(adapter) && tid_init(&adapter->tids) < 0) {
3745 dev_warn(&pdev->dev, "could not allocate TID table, "
3746 "continuing\n");
3747 adapter->params.offload = 0;
3748 }
3749
f7cabcdd
DM
3750 /* See what interrupts we'll be using */
3751 if (msi > 1 && enable_msix(adapter) == 0)
3752 adapter->flags |= USING_MSIX;
3753 else if (msi > 0 && pci_enable_msi(pdev) == 0)
3754 adapter->flags |= USING_MSI;
3755
671b0060
DM
3756 err = init_rss(adapter);
3757 if (err)
3758 goto out_free_dev;
3759
b8ff05a9
DM
3760 /*
3761 * The card is now ready to go. If any errors occur during device
3762 * registration we do not fail the whole card but rather proceed only
3763 * with the ports we manage to register successfully. However we must
3764 * register at least one net device.
3765 */
3766 for_each_port(adapter, i) {
a57cabe0
DM
3767 pi = adap2pinfo(adapter, i);
3768 netif_set_real_num_tx_queues(adapter->port[i], pi->nqsets);
3769 netif_set_real_num_rx_queues(adapter->port[i], pi->nqsets);
3770
b8ff05a9
DM
3771 err = register_netdev(adapter->port[i]);
3772 if (err)
b1a3c2b6 3773 break;
b1a3c2b6
DM
3774 adapter->chan_map[pi->tx_chan] = i;
3775 print_port_info(adapter->port[i]);
b8ff05a9 3776 }
b1a3c2b6 3777 if (i == 0) {
b8ff05a9
DM
3778 dev_err(&pdev->dev, "could not register any net devices\n");
3779 goto out_free_dev;
3780 }
b1a3c2b6
DM
3781 if (err) {
3782 dev_warn(&pdev->dev, "only %d net devices registered\n", i);
3783 err = 0;
6403eab1 3784 }
b8ff05a9
DM
3785
3786 if (cxgb4_debugfs_root) {
3787 adapter->debugfs_root = debugfs_create_dir(pci_name(pdev),
3788 cxgb4_debugfs_root);
3789 setup_debugfs(adapter);
3790 }
3791
6482aa7c
DLR
3792 /* PCIe EEH recovery on powerpc platforms needs fundamental reset */
3793 pdev->needs_freset = 1;
3794
b8ff05a9
DM
3795 if (is_offload(adapter))
3796 attach_ulds(adapter);
3797
b8ff05a9
DM
3798sriov:
3799#ifdef CONFIG_PCI_IOV
3800 if (func < ARRAY_SIZE(num_vf) && num_vf[func] > 0)
3801 if (pci_enable_sriov(pdev, num_vf[func]) == 0)
3802 dev_info(&pdev->dev,
3803 "instantiated %u virtual functions\n",
3804 num_vf[func]);
3805#endif
3806 return 0;
3807
3808 out_free_dev:
06546391 3809 free_some_resources(adapter);
b8ff05a9
DM
3810 out_unmap_bar:
3811 iounmap(adapter->regs);
3812 out_free_adapter:
3813 kfree(adapter);
3814 out_disable_device:
3815 pci_disable_pcie_error_reporting(pdev);
3816 pci_disable_device(pdev);
3817 out_release_regions:
3818 pci_release_regions(pdev);
3819 pci_set_drvdata(pdev, NULL);
3820 return err;
3821}
3822
3823static void __devexit remove_one(struct pci_dev *pdev)
3824{
3825 struct adapter *adapter = pci_get_drvdata(pdev);
3826
3827 pci_disable_sriov(pdev);
3828
3829 if (adapter) {
3830 int i;
3831
3832 if (is_offload(adapter))
3833 detach_ulds(adapter);
3834
3835 for_each_port(adapter, i)
8f3a7676 3836 if (adapter->port[i]->reg_state == NETREG_REGISTERED)
b8ff05a9
DM
3837 unregister_netdev(adapter->port[i]);
3838
3839 if (adapter->debugfs_root)
3840 debugfs_remove_recursive(adapter->debugfs_root);
3841
aaefae9b
DM
3842 if (adapter->flags & FULL_INIT_DONE)
3843 cxgb_down(adapter);
b8ff05a9 3844
06546391 3845 free_some_resources(adapter);
b8ff05a9
DM
3846 iounmap(adapter->regs);
3847 kfree(adapter);
3848 pci_disable_pcie_error_reporting(pdev);
3849 pci_disable_device(pdev);
3850 pci_release_regions(pdev);
3851 pci_set_drvdata(pdev, NULL);
a069ec91 3852 } else
b8ff05a9
DM
3853 pci_release_regions(pdev);
3854}
3855
3856static struct pci_driver cxgb4_driver = {
3857 .name = KBUILD_MODNAME,
3858 .id_table = cxgb4_pci_tbl,
3859 .probe = init_one,
3860 .remove = __devexit_p(remove_one),
204dc3c0 3861 .err_handler = &cxgb4_eeh,
b8ff05a9
DM
3862};
3863
3864static int __init cxgb4_init_module(void)
3865{
3866 int ret;
3867
3868 /* Debugfs support is optional, just warn if this fails */
3869 cxgb4_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
3870 if (!cxgb4_debugfs_root)
3871 pr_warning("could not create debugfs entry, continuing\n");
3872
3873 ret = pci_register_driver(&cxgb4_driver);
3874 if (ret < 0)
3875 debugfs_remove(cxgb4_debugfs_root);
3876 return ret;
3877}
3878
3879static void __exit cxgb4_cleanup_module(void)
3880{
3881 pci_unregister_driver(&cxgb4_driver);
3882 debugfs_remove(cxgb4_debugfs_root); /* NULL ok */
3883}
3884
3885module_init(cxgb4_init_module);
3886module_exit(cxgb4_cleanup_module);