]>
Commit | Line | Data |
---|---|---|
b8ff05a9 DM |
1 | /* |
2 | * This file is part of the Chelsio T4 Ethernet driver for Linux. | |
3 | * | |
b72a32da | 4 | * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved. |
b8ff05a9 DM |
5 | * |
6 | * This software is available to you under a choice of one of two | |
7 | * licenses. You may choose to be licensed under the terms of the GNU | |
8 | * General Public License (GPL) Version 2, available from the file | |
9 | * COPYING in the main directory of this source tree, or the | |
10 | * OpenIB.org BSD license below: | |
11 | * | |
12 | * Redistribution and use in source and binary forms, with or | |
13 | * without modification, are permitted provided that the following | |
14 | * conditions are met: | |
15 | * | |
16 | * - Redistributions of source code must retain the above | |
17 | * copyright notice, this list of conditions and the following | |
18 | * disclaimer. | |
19 | * | |
20 | * - Redistributions in binary form must reproduce the above | |
21 | * copyright notice, this list of conditions and the following | |
22 | * disclaimer in the documentation and/or other materials | |
23 | * provided with the distribution. | |
24 | * | |
25 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
26 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
27 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
28 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
29 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
30 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
31 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
32 | * SOFTWARE. | |
33 | */ | |
34 | ||
35 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | |
36 | ||
37 | #include <linux/bitmap.h> | |
38 | #include <linux/crc32.h> | |
39 | #include <linux/ctype.h> | |
40 | #include <linux/debugfs.h> | |
41 | #include <linux/err.h> | |
42 | #include <linux/etherdevice.h> | |
43 | #include <linux/firmware.h> | |
01789349 | 44 | #include <linux/if.h> |
b8ff05a9 DM |
45 | #include <linux/if_vlan.h> |
46 | #include <linux/init.h> | |
47 | #include <linux/log2.h> | |
48 | #include <linux/mdio.h> | |
49 | #include <linux/module.h> | |
50 | #include <linux/moduleparam.h> | |
51 | #include <linux/mutex.h> | |
52 | #include <linux/netdevice.h> | |
53 | #include <linux/pci.h> | |
54 | #include <linux/aer.h> | |
55 | #include <linux/rtnetlink.h> | |
56 | #include <linux/sched.h> | |
57 | #include <linux/seq_file.h> | |
58 | #include <linux/sockios.h> | |
59 | #include <linux/vmalloc.h> | |
60 | #include <linux/workqueue.h> | |
61 | #include <net/neighbour.h> | |
62 | #include <net/netevent.h> | |
01bcca68 | 63 | #include <net/addrconf.h> |
1ef8019b | 64 | #include <net/bonding.h> |
b5a02f50 | 65 | #include <net/addrconf.h> |
7c0f6ba6 | 66 | #include <linux/uaccess.h> |
c5a8c0f3 | 67 | #include <linux/crash_dump.h> |
846eac3f | 68 | #include <net/udp_tunnel.h> |
b8ff05a9 DM |
69 | |
70 | #include "cxgb4.h" | |
d57fd6ca | 71 | #include "cxgb4_filter.h" |
b8ff05a9 | 72 | #include "t4_regs.h" |
f612b815 | 73 | #include "t4_values.h" |
b8ff05a9 DM |
74 | #include "t4_msg.h" |
75 | #include "t4fw_api.h" | |
cd6c2f12 | 76 | #include "t4fw_version.h" |
688848b1 | 77 | #include "cxgb4_dcb.h" |
fd88b31a | 78 | #include "cxgb4_debugfs.h" |
b5a02f50 | 79 | #include "clip_tbl.h" |
b8ff05a9 | 80 | #include "l2t.h" |
3bdb376e | 81 | #include "smt.h" |
b72a32da | 82 | #include "sched.h" |
d8931847 | 83 | #include "cxgb4_tc_u32.h" |
6a345b3d | 84 | #include "cxgb4_tc_flower.h" |
a4569504 | 85 | #include "cxgb4_ptp.h" |
ad75b7d3 | 86 | #include "cxgb4_cudbg.h" |
b8ff05a9 | 87 | |
812034f1 HS |
88 | char cxgb4_driver_name[] = KBUILD_MODNAME; |
89 | ||
01bcca68 VP |
90 | #ifdef DRV_VERSION |
91 | #undef DRV_VERSION | |
92 | #endif | |
3a7f8554 | 93 | #define DRV_VERSION "2.0.0-ko" |
812034f1 | 94 | const char cxgb4_driver_version[] = DRV_VERSION; |
52a5f846 | 95 | #define DRV_DESC "Chelsio T4/T5/T6 Network Driver" |
b8ff05a9 | 96 | |
b8ff05a9 DM |
97 | #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \ |
98 | NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\ | |
99 | NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR) | |
100 | ||
3fedeab1 HS |
101 | /* Macros needed to support the PCI Device ID Table ... |
102 | */ | |
103 | #define CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN \ | |
768ffc66 | 104 | static const struct pci_device_id cxgb4_pci_tbl[] = { |
3fedeab1 | 105 | #define CH_PCI_DEVICE_ID_FUNCTION 0x4 |
b8ff05a9 | 106 | |
3fedeab1 HS |
107 | /* Include PCI Device IDs for both PF4 and PF0-3 so our PCI probe() routine is |
108 | * called for both. | |
109 | */ | |
110 | #define CH_PCI_DEVICE_ID_FUNCTION2 0x0 | |
111 | ||
112 | #define CH_PCI_ID_TABLE_ENTRY(devid) \ | |
113 | {PCI_VDEVICE(CHELSIO, (devid)), 4} | |
114 | ||
115 | #define CH_PCI_DEVICE_ID_TABLE_DEFINE_END \ | |
116 | { 0, } \ | |
117 | } | |
118 | ||
119 | #include "t4_pci_id_tbl.h" | |
b8ff05a9 | 120 | |
16e47624 | 121 | #define FW4_FNAME "cxgb4/t4fw.bin" |
0a57a536 | 122 | #define FW5_FNAME "cxgb4/t5fw.bin" |
3ccc6cf7 | 123 | #define FW6_FNAME "cxgb4/t6fw.bin" |
16e47624 | 124 | #define FW4_CFNAME "cxgb4/t4-config.txt" |
0a57a536 | 125 | #define FW5_CFNAME "cxgb4/t5-config.txt" |
3ccc6cf7 | 126 | #define FW6_CFNAME "cxgb4/t6-config.txt" |
01b69614 HS |
127 | #define PHY_AQ1202_FIRMWARE "cxgb4/aq1202_fw.cld" |
128 | #define PHY_BCM84834_FIRMWARE "cxgb4/bcm8483.bin" | |
129 | #define PHY_AQ1202_DEVICEID 0x4409 | |
130 | #define PHY_BCM84834_DEVICEID 0x4486 | |
b8ff05a9 DM |
131 | |
132 | MODULE_DESCRIPTION(DRV_DESC); | |
133 | MODULE_AUTHOR("Chelsio Communications"); | |
134 | MODULE_LICENSE("Dual BSD/GPL"); | |
135 | MODULE_VERSION(DRV_VERSION); | |
136 | MODULE_DEVICE_TABLE(pci, cxgb4_pci_tbl); | |
16e47624 | 137 | MODULE_FIRMWARE(FW4_FNAME); |
0a57a536 | 138 | MODULE_FIRMWARE(FW5_FNAME); |
52a5f846 | 139 | MODULE_FIRMWARE(FW6_FNAME); |
b8ff05a9 | 140 | |
b8ff05a9 DM |
141 | /* |
142 | * The driver uses the best interrupt scheme available on a platform in the | |
143 | * order MSI-X, MSI, legacy INTx interrupts. This parameter determines which | |
144 | * of these schemes the driver may consider as follows: | |
145 | * | |
146 | * msi = 2: choose from among all three options | |
147 | * msi = 1: only consider MSI and INTx interrupts | |
148 | * msi = 0: force INTx interrupts | |
149 | */ | |
150 | static int msi = 2; | |
151 | ||
152 | module_param(msi, int, 0644); | |
153 | MODULE_PARM_DESC(msi, "whether to use INTx (0), MSI (1) or MSI-X (2)"); | |
154 | ||
636f9d37 VP |
155 | /* |
156 | * Normally we tell the chip to deliver Ingress Packets into our DMA buffers | |
157 | * offset by 2 bytes in order to have the IP headers line up on 4-byte | |
158 | * boundaries. This is a requirement for many architectures which will throw | |
159 | * a machine check fault if an attempt is made to access one of the 4-byte IP | |
160 | * header fields on a non-4-byte boundary. And it's a major performance issue | |
161 | * even on some architectures which allow it like some implementations of the | |
162 | * x86 ISA. However, some architectures don't mind this and for some very | |
163 | * edge-case performance sensitive applications (like forwarding large volumes | |
164 | * of small packets), setting this DMA offset to 0 will decrease the number of | |
165 | * PCI-E Bus transfers enough to measurably affect performance. | |
166 | */ | |
167 | static int rx_dma_offset = 2; | |
168 | ||
688848b1 AB |
169 | /* TX Queue select used to determine what algorithm to use for selecting TX |
170 | * queue. Select between the kernel provided function (select_queue=0) or user | |
171 | * cxgb_select_queue function (select_queue=1) | |
172 | * | |
173 | * Default: select_queue=0 | |
174 | */ | |
175 | static int select_queue; | |
176 | module_param(select_queue, int, 0644); | |
177 | MODULE_PARM_DESC(select_queue, | |
178 | "Select between kernel provided method of selecting or driver method of selecting TX queue. Default is kernel method."); | |
179 | ||
b8ff05a9 DM |
180 | static struct dentry *cxgb4_debugfs_root; |
181 | ||
94cdb8bb HS |
182 | LIST_HEAD(adapter_list); |
183 | DEFINE_MUTEX(uld_mutex); | |
b8ff05a9 DM |
184 | |
185 | static void link_report(struct net_device *dev) | |
186 | { | |
187 | if (!netif_carrier_ok(dev)) | |
188 | netdev_info(dev, "link down\n"); | |
189 | else { | |
190 | static const char *fc[] = { "no", "Rx", "Tx", "Tx/Rx" }; | |
191 | ||
85412255 | 192 | const char *s; |
b8ff05a9 DM |
193 | const struct port_info *p = netdev_priv(dev); |
194 | ||
195 | switch (p->link_cfg.speed) { | |
5e78f7fd GG |
196 | case 100: |
197 | s = "100Mbps"; | |
b8ff05a9 | 198 | break; |
e8b39015 | 199 | case 1000: |
5e78f7fd | 200 | s = "1Gbps"; |
b8ff05a9 | 201 | break; |
5e78f7fd GG |
202 | case 10000: |
203 | s = "10Gbps"; | |
204 | break; | |
205 | case 25000: | |
206 | s = "25Gbps"; | |
b8ff05a9 | 207 | break; |
e8b39015 | 208 | case 40000: |
72aca4bf KS |
209 | s = "40Gbps"; |
210 | break; | |
5e78f7fd GG |
211 | case 100000: |
212 | s = "100Gbps"; | |
213 | break; | |
85412255 HS |
214 | default: |
215 | pr_info("%s: unsupported speed: %d\n", | |
216 | dev->name, p->link_cfg.speed); | |
217 | return; | |
b8ff05a9 DM |
218 | } |
219 | ||
220 | netdev_info(dev, "link up, %s, full-duplex, %s PAUSE\n", s, | |
221 | fc[p->link_cfg.fc]); | |
222 | } | |
223 | } | |
224 | ||
688848b1 AB |
225 | #ifdef CONFIG_CHELSIO_T4_DCB |
226 | /* Set up/tear down Data Center Bridging Priority mapping for a net device. */ | |
227 | static void dcb_tx_queue_prio_enable(struct net_device *dev, int enable) | |
228 | { | |
229 | struct port_info *pi = netdev_priv(dev); | |
230 | struct adapter *adap = pi->adapter; | |
231 | struct sge_eth_txq *txq = &adap->sge.ethtxq[pi->first_qset]; | |
232 | int i; | |
233 | ||
234 | /* We use a simple mapping of Port TX Queue Index to DCB | |
235 | * Priority when we're enabling DCB. | |
236 | */ | |
237 | for (i = 0; i < pi->nqsets; i++, txq++) { | |
238 | u32 name, value; | |
239 | int err; | |
240 | ||
5167865a HS |
241 | name = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) | |
242 | FW_PARAMS_PARAM_X_V( | |
243 | FW_PARAMS_PARAM_DMAQ_EQ_DCBPRIO_ETH) | | |
244 | FW_PARAMS_PARAM_YZ_V(txq->q.cntxt_id)); | |
688848b1 AB |
245 | value = enable ? i : 0xffffffff; |
246 | ||
247 | /* Since we can be called while atomic (from "interrupt | |
248 | * level") we need to issue the Set Parameters Commannd | |
249 | * without sleeping (timeout < 0). | |
250 | */ | |
b2612722 | 251 | err = t4_set_params_timeout(adap, adap->mbox, adap->pf, 0, 1, |
01b69614 HS |
252 | &name, &value, |
253 | -FW_CMD_MAX_TIMEOUT); | |
688848b1 AB |
254 | |
255 | if (err) | |
256 | dev_err(adap->pdev_dev, | |
257 | "Can't %s DCB Priority on port %d, TX Queue %d: err=%d\n", | |
258 | enable ? "set" : "unset", pi->port_id, i, -err); | |
10b00466 AB |
259 | else |
260 | txq->dcb_prio = value; | |
688848b1 AB |
261 | } |
262 | } | |
688848b1 | 263 | |
50935857 | 264 | static int cxgb4_dcb_enabled(const struct net_device *dev) |
218d48e7 | 265 | { |
218d48e7 HS |
266 | struct port_info *pi = netdev_priv(dev); |
267 | ||
268 | if (!pi->dcb.enabled) | |
269 | return 0; | |
270 | ||
271 | return ((pi->dcb.state == CXGB4_DCB_STATE_FW_ALLSYNCED) || | |
272 | (pi->dcb.state == CXGB4_DCB_STATE_HOST)); | |
218d48e7 | 273 | } |
7c70c4f8 | 274 | #endif /* CONFIG_CHELSIO_T4_DCB */ |
218d48e7 | 275 | |
b8ff05a9 DM |
276 | void t4_os_link_changed(struct adapter *adapter, int port_id, int link_stat) |
277 | { | |
278 | struct net_device *dev = adapter->port[port_id]; | |
279 | ||
280 | /* Skip changes from disabled ports. */ | |
281 | if (netif_running(dev) && link_stat != netif_carrier_ok(dev)) { | |
282 | if (link_stat) | |
283 | netif_carrier_on(dev); | |
688848b1 AB |
284 | else { |
285 | #ifdef CONFIG_CHELSIO_T4_DCB | |
218d48e7 | 286 | if (cxgb4_dcb_enabled(dev)) { |
ba581f77 | 287 | cxgb4_dcb_reset(dev); |
218d48e7 HS |
288 | dcb_tx_queue_prio_enable(dev, false); |
289 | } | |
688848b1 | 290 | #endif /* CONFIG_CHELSIO_T4_DCB */ |
b8ff05a9 | 291 | netif_carrier_off(dev); |
688848b1 | 292 | } |
b8ff05a9 DM |
293 | |
294 | link_report(dev); | |
295 | } | |
296 | } | |
297 | ||
298 | void t4_os_portmod_changed(const struct adapter *adap, int port_id) | |
299 | { | |
300 | static const char *mod_str[] = { | |
a0881cab | 301 | NULL, "LR", "SR", "ER", "passive DA", "active DA", "LRM" |
b8ff05a9 DM |
302 | }; |
303 | ||
304 | const struct net_device *dev = adap->port[port_id]; | |
305 | const struct port_info *pi = netdev_priv(dev); | |
306 | ||
307 | if (pi->mod_type == FW_PORT_MOD_TYPE_NONE) | |
308 | netdev_info(dev, "port module unplugged\n"); | |
a0881cab | 309 | else if (pi->mod_type < ARRAY_SIZE(mod_str)) |
b8ff05a9 | 310 | netdev_info(dev, "%s module inserted\n", mod_str[pi->mod_type]); |
be81a2de HS |
311 | else if (pi->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED) |
312 | netdev_info(dev, "%s: unsupported port module inserted\n", | |
313 | dev->name); | |
314 | else if (pi->mod_type == FW_PORT_MOD_TYPE_UNKNOWN) | |
315 | netdev_info(dev, "%s: unknown port module inserted\n", | |
316 | dev->name); | |
317 | else if (pi->mod_type == FW_PORT_MOD_TYPE_ERROR) | |
318 | netdev_info(dev, "%s: transceiver module error\n", dev->name); | |
319 | else | |
320 | netdev_info(dev, "%s: unknown module type %d inserted\n", | |
321 | dev->name, pi->mod_type); | |
b8ff05a9 DM |
322 | } |
323 | ||
fc08a01a HS |
324 | int dbfifo_int_thresh = 10; /* 10 == 640 entry threshold */ |
325 | module_param(dbfifo_int_thresh, int, 0644); | |
326 | MODULE_PARM_DESC(dbfifo_int_thresh, "doorbell fifo interrupt threshold"); | |
327 | ||
b8ff05a9 | 328 | /* |
fc08a01a | 329 | * usecs to sleep while draining the dbfifo |
b8ff05a9 | 330 | */ |
fc08a01a HS |
331 | static int dbfifo_drain_delay = 1000; |
332 | module_param(dbfifo_drain_delay, int, 0644); | |
333 | MODULE_PARM_DESC(dbfifo_drain_delay, | |
334 | "usecs to sleep while draining the dbfifo"); | |
335 | ||
336 | static inline int cxgb4_set_addr_hash(struct port_info *pi) | |
b8ff05a9 | 337 | { |
fc08a01a HS |
338 | struct adapter *adap = pi->adapter; |
339 | u64 vec = 0; | |
340 | bool ucast = false; | |
341 | struct hash_mac_addr *entry; | |
342 | ||
343 | /* Calculate the hash vector for the updated list and program it */ | |
344 | list_for_each_entry(entry, &adap->mac_hlist, list) { | |
345 | ucast |= is_unicast_ether_addr(entry->addr); | |
346 | vec |= (1ULL << hash_mac_addr(entry->addr)); | |
347 | } | |
348 | return t4_set_addr_hash(adap, adap->mbox, pi->viid, ucast, | |
349 | vec, false); | |
350 | } | |
351 | ||
352 | static int cxgb4_mac_sync(struct net_device *netdev, const u8 *mac_addr) | |
353 | { | |
354 | struct port_info *pi = netdev_priv(netdev); | |
355 | struct adapter *adap = pi->adapter; | |
356 | int ret; | |
b8ff05a9 DM |
357 | u64 mhash = 0; |
358 | u64 uhash = 0; | |
fc08a01a HS |
359 | bool free = false; |
360 | bool ucast = is_unicast_ether_addr(mac_addr); | |
361 | const u8 *maclist[1] = {mac_addr}; | |
362 | struct hash_mac_addr *new_entry; | |
363 | ||
364 | ret = t4_alloc_mac_filt(adap, adap->mbox, pi->viid, free, 1, maclist, | |
365 | NULL, ucast ? &uhash : &mhash, false); | |
366 | if (ret < 0) | |
367 | goto out; | |
368 | /* if hash != 0, then add the addr to hash addr list | |
369 | * so on the end we will calculate the hash for the | |
370 | * list and program it | |
371 | */ | |
372 | if (uhash || mhash) { | |
373 | new_entry = kzalloc(sizeof(*new_entry), GFP_ATOMIC); | |
374 | if (!new_entry) | |
375 | return -ENOMEM; | |
376 | ether_addr_copy(new_entry->addr, mac_addr); | |
377 | list_add_tail(&new_entry->list, &adap->mac_hlist); | |
378 | ret = cxgb4_set_addr_hash(pi); | |
b8ff05a9 | 379 | } |
fc08a01a HS |
380 | out: |
381 | return ret < 0 ? ret : 0; | |
382 | } | |
b8ff05a9 | 383 | |
fc08a01a HS |
384 | static int cxgb4_mac_unsync(struct net_device *netdev, const u8 *mac_addr) |
385 | { | |
386 | struct port_info *pi = netdev_priv(netdev); | |
387 | struct adapter *adap = pi->adapter; | |
388 | int ret; | |
389 | const u8 *maclist[1] = {mac_addr}; | |
390 | struct hash_mac_addr *entry, *tmp; | |
b8ff05a9 | 391 | |
fc08a01a HS |
392 | /* If the MAC address to be removed is in the hash addr |
393 | * list, delete it from the list and update hash vector | |
394 | */ | |
395 | list_for_each_entry_safe(entry, tmp, &adap->mac_hlist, list) { | |
396 | if (ether_addr_equal(entry->addr, mac_addr)) { | |
397 | list_del(&entry->list); | |
398 | kfree(entry); | |
399 | return cxgb4_set_addr_hash(pi); | |
b8ff05a9 DM |
400 | } |
401 | } | |
402 | ||
fc08a01a HS |
403 | ret = t4_free_mac_filt(adap, adap->mbox, pi->viid, 1, maclist, false); |
404 | return ret < 0 ? -EINVAL : 0; | |
b8ff05a9 DM |
405 | } |
406 | ||
407 | /* | |
408 | * Set Rx properties of a port, such as promiscruity, address filters, and MTU. | |
409 | * If @mtu is -1 it is left unchanged. | |
410 | */ | |
411 | static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok) | |
412 | { | |
b8ff05a9 | 413 | struct port_info *pi = netdev_priv(dev); |
fc08a01a | 414 | struct adapter *adapter = pi->adapter; |
b8ff05a9 | 415 | |
d01f7abc HS |
416 | __dev_uc_sync(dev, cxgb4_mac_sync, cxgb4_mac_unsync); |
417 | __dev_mc_sync(dev, cxgb4_mac_sync, cxgb4_mac_unsync); | |
fc08a01a HS |
418 | |
419 | return t4_set_rxmode(adapter, adapter->mbox, pi->viid, mtu, | |
420 | (dev->flags & IFF_PROMISC) ? 1 : 0, | |
421 | (dev->flags & IFF_ALLMULTI) ? 1 : 0, 1, -1, | |
422 | sleep_ok); | |
b8ff05a9 DM |
423 | } |
424 | ||
425 | /** | |
426 | * link_start - enable a port | |
427 | * @dev: the port to enable | |
428 | * | |
429 | * Performs the MAC and PHY actions needed to enable a port. | |
430 | */ | |
431 | static int link_start(struct net_device *dev) | |
432 | { | |
433 | int ret; | |
434 | struct port_info *pi = netdev_priv(dev); | |
b2612722 | 435 | unsigned int mb = pi->adapter->pf; |
b8ff05a9 DM |
436 | |
437 | /* | |
438 | * We do not set address filters and promiscuity here, the stack does | |
439 | * that step explicitly. | |
440 | */ | |
060e0c75 | 441 | ret = t4_set_rxmode(pi->adapter, mb, pi->viid, dev->mtu, -1, -1, -1, |
f646968f | 442 | !!(dev->features & NETIF_F_HW_VLAN_CTAG_RX), true); |
b8ff05a9 | 443 | if (ret == 0) { |
060e0c75 | 444 | ret = t4_change_mac(pi->adapter, mb, pi->viid, |
b8ff05a9 | 445 | pi->xact_addr_filt, dev->dev_addr, true, |
b6bd29e7 | 446 | true); |
b8ff05a9 DM |
447 | if (ret >= 0) { |
448 | pi->xact_addr_filt = ret; | |
449 | ret = 0; | |
450 | } | |
451 | } | |
452 | if (ret == 0) | |
4036da90 | 453 | ret = t4_link_l1cfg(pi->adapter, mb, pi->tx_chan, |
060e0c75 | 454 | &pi->link_cfg); |
30f00847 AB |
455 | if (ret == 0) { |
456 | local_bh_disable(); | |
688848b1 AB |
457 | ret = t4_enable_vi_params(pi->adapter, mb, pi->viid, true, |
458 | true, CXGB4_DCB_ENABLED); | |
30f00847 AB |
459 | local_bh_enable(); |
460 | } | |
688848b1 | 461 | |
b8ff05a9 DM |
462 | return ret; |
463 | } | |
464 | ||
688848b1 AB |
465 | #ifdef CONFIG_CHELSIO_T4_DCB |
466 | /* Handle a Data Center Bridging update message from the firmware. */ | |
467 | static void dcb_rpl(struct adapter *adap, const struct fw_port_cmd *pcmd) | |
468 | { | |
2b5fb1f2 | 469 | int port = FW_PORT_CMD_PORTID_G(ntohl(pcmd->op_to_portid)); |
134491fd | 470 | struct net_device *dev = adap->port[adap->chan_map[port]]; |
688848b1 AB |
471 | int old_dcb_enabled = cxgb4_dcb_enabled(dev); |
472 | int new_dcb_enabled; | |
473 | ||
474 | cxgb4_dcb_handle_fw_update(adap, pcmd); | |
475 | new_dcb_enabled = cxgb4_dcb_enabled(dev); | |
476 | ||
477 | /* If the DCB has become enabled or disabled on the port then we're | |
478 | * going to need to set up/tear down DCB Priority parameters for the | |
479 | * TX Queues associated with the port. | |
480 | */ | |
481 | if (new_dcb_enabled != old_dcb_enabled) | |
482 | dcb_tx_queue_prio_enable(dev, new_dcb_enabled); | |
483 | } | |
484 | #endif /* CONFIG_CHELSIO_T4_DCB */ | |
485 | ||
f2b7e78d | 486 | /* Response queue handler for the FW event queue. |
b8ff05a9 DM |
487 | */ |
488 | static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp, | |
489 | const struct pkt_gl *gl) | |
490 | { | |
491 | u8 opcode = ((const struct rss_header *)rsp)->opcode; | |
492 | ||
493 | rsp++; /* skip RSS header */ | |
b407a4a9 VP |
494 | |
495 | /* FW can send EGR_UPDATEs encapsulated in a CPL_FW4_MSG. | |
496 | */ | |
497 | if (unlikely(opcode == CPL_FW4_MSG && | |
498 | ((const struct cpl_fw4_msg *)rsp)->type == FW_TYPE_RSSCPL)) { | |
499 | rsp++; | |
500 | opcode = ((const struct rss_header *)rsp)->opcode; | |
501 | rsp++; | |
502 | if (opcode != CPL_SGE_EGR_UPDATE) { | |
503 | dev_err(q->adap->pdev_dev, "unexpected FW4/CPL %#x on FW event queue\n" | |
504 | , opcode); | |
505 | goto out; | |
506 | } | |
507 | } | |
508 | ||
b8ff05a9 DM |
509 | if (likely(opcode == CPL_SGE_EGR_UPDATE)) { |
510 | const struct cpl_sge_egr_update *p = (void *)rsp; | |
bdc590b9 | 511 | unsigned int qid = EGR_QID_G(ntohl(p->opcode_qid)); |
e46dab4d | 512 | struct sge_txq *txq; |
b8ff05a9 | 513 | |
e46dab4d | 514 | txq = q->adap->sge.egr_map[qid - q->adap->sge.egr_start]; |
b8ff05a9 | 515 | txq->restarts++; |
ab677ff4 | 516 | if (txq->q_type == CXGB4_TXQ_ETH) { |
b8ff05a9 DM |
517 | struct sge_eth_txq *eq; |
518 | ||
519 | eq = container_of(txq, struct sge_eth_txq, q); | |
520 | netif_tx_wake_queue(eq->txq); | |
521 | } else { | |
ab677ff4 | 522 | struct sge_uld_txq *oq; |
b8ff05a9 | 523 | |
ab677ff4 | 524 | oq = container_of(txq, struct sge_uld_txq, q); |
b8ff05a9 DM |
525 | tasklet_schedule(&oq->qresume_tsk); |
526 | } | |
527 | } else if (opcode == CPL_FW6_MSG || opcode == CPL_FW4_MSG) { | |
528 | const struct cpl_fw6_msg *p = (void *)rsp; | |
529 | ||
688848b1 AB |
530 | #ifdef CONFIG_CHELSIO_T4_DCB |
531 | const struct fw_port_cmd *pcmd = (const void *)p->data; | |
e2ac9628 | 532 | unsigned int cmd = FW_CMD_OP_G(ntohl(pcmd->op_to_portid)); |
688848b1 | 533 | unsigned int action = |
2b5fb1f2 | 534 | FW_PORT_CMD_ACTION_G(ntohl(pcmd->action_to_len16)); |
688848b1 AB |
535 | |
536 | if (cmd == FW_PORT_CMD && | |
c3168cab GG |
537 | (action == FW_PORT_ACTION_GET_PORT_INFO || |
538 | action == FW_PORT_ACTION_GET_PORT_INFO32)) { | |
2b5fb1f2 | 539 | int port = FW_PORT_CMD_PORTID_G( |
688848b1 | 540 | be32_to_cpu(pcmd->op_to_portid)); |
c3168cab GG |
541 | struct net_device *dev; |
542 | int dcbxdis, state_input; | |
543 | ||
544 | dev = q->adap->port[q->adap->chan_map[port]]; | |
545 | dcbxdis = (action == FW_PORT_ACTION_GET_PORT_INFO | |
546 | ? !!(pcmd->u.info.dcbxdis_pkd & | |
547 | FW_PORT_CMD_DCBXDIS_F) | |
548 | : !!(pcmd->u.info32.lstatus32_to_cbllen32 & | |
549 | FW_PORT_CMD_DCBXDIS32_F)); | |
550 | state_input = (dcbxdis | |
551 | ? CXGB4_DCB_INPUT_FW_DISABLED | |
552 | : CXGB4_DCB_INPUT_FW_ENABLED); | |
688848b1 AB |
553 | |
554 | cxgb4_dcb_state_fsm(dev, state_input); | |
555 | } | |
556 | ||
557 | if (cmd == FW_PORT_CMD && | |
558 | action == FW_PORT_ACTION_L2_DCB_CFG) | |
559 | dcb_rpl(q->adap, pcmd); | |
560 | else | |
561 | #endif | |
562 | if (p->type == 0) | |
563 | t4_handle_fw_rpl(q->adap, p->data); | |
b8ff05a9 DM |
564 | } else if (opcode == CPL_L2T_WRITE_RPL) { |
565 | const struct cpl_l2t_write_rpl *p = (void *)rsp; | |
566 | ||
567 | do_l2t_write_rpl(q->adap, p); | |
3bdb376e KS |
568 | } else if (opcode == CPL_SMT_WRITE_RPL) { |
569 | const struct cpl_smt_write_rpl *p = (void *)rsp; | |
570 | ||
571 | do_smt_write_rpl(q->adap, p); | |
f2b7e78d VP |
572 | } else if (opcode == CPL_SET_TCB_RPL) { |
573 | const struct cpl_set_tcb_rpl *p = (void *)rsp; | |
574 | ||
575 | filter_rpl(q->adap, p); | |
12b276fb KS |
576 | } else if (opcode == CPL_ACT_OPEN_RPL) { |
577 | const struct cpl_act_open_rpl *p = (void *)rsp; | |
578 | ||
579 | hash_filter_rpl(q->adap, p); | |
3b0b3bee KS |
580 | } else if (opcode == CPL_ABORT_RPL_RSS) { |
581 | const struct cpl_abort_rpl_rss *p = (void *)rsp; | |
582 | ||
583 | hash_del_filter_rpl(q->adap, p); | |
b8ff05a9 DM |
584 | } else |
585 | dev_err(q->adap->pdev_dev, | |
586 | "unexpected CPL %#x on FW event queue\n", opcode); | |
b407a4a9 | 587 | out: |
b8ff05a9 DM |
588 | return 0; |
589 | } | |
590 | ||
b8ff05a9 DM |
591 | static void disable_msi(struct adapter *adapter) |
592 | { | |
593 | if (adapter->flags & USING_MSIX) { | |
594 | pci_disable_msix(adapter->pdev); | |
595 | adapter->flags &= ~USING_MSIX; | |
596 | } else if (adapter->flags & USING_MSI) { | |
597 | pci_disable_msi(adapter->pdev); | |
598 | adapter->flags &= ~USING_MSI; | |
599 | } | |
600 | } | |
601 | ||
602 | /* | |
603 | * Interrupt handler for non-data events used with MSI-X. | |
604 | */ | |
605 | static irqreturn_t t4_nondata_intr(int irq, void *cookie) | |
606 | { | |
607 | struct adapter *adap = cookie; | |
0d804338 | 608 | u32 v = t4_read_reg(adap, MYPF_REG(PL_PF_INT_CAUSE_A)); |
b8ff05a9 | 609 | |
0d804338 | 610 | if (v & PFSW_F) { |
b8ff05a9 | 611 | adap->swintr = 1; |
0d804338 | 612 | t4_write_reg(adap, MYPF_REG(PL_PF_INT_CAUSE_A), v); |
b8ff05a9 | 613 | } |
c3c7b121 HS |
614 | if (adap->flags & MASTER_PF) |
615 | t4_slow_intr_handler(adap); | |
b8ff05a9 DM |
616 | return IRQ_HANDLED; |
617 | } | |
618 | ||
619 | /* | |
620 | * Name the MSI-X interrupts. | |
621 | */ | |
622 | static void name_msix_vecs(struct adapter *adap) | |
623 | { | |
ba27816c | 624 | int i, j, msi_idx = 2, n = sizeof(adap->msix_info[0].desc); |
b8ff05a9 DM |
625 | |
626 | /* non-data interrupts */ | |
b1a3c2b6 | 627 | snprintf(adap->msix_info[0].desc, n, "%s", adap->port[0]->name); |
b8ff05a9 DM |
628 | |
629 | /* FW events */ | |
b1a3c2b6 DM |
630 | snprintf(adap->msix_info[1].desc, n, "%s-FWeventq", |
631 | adap->port[0]->name); | |
b8ff05a9 DM |
632 | |
633 | /* Ethernet queues */ | |
634 | for_each_port(adap, j) { | |
635 | struct net_device *d = adap->port[j]; | |
636 | const struct port_info *pi = netdev_priv(d); | |
637 | ||
ba27816c | 638 | for (i = 0; i < pi->nqsets; i++, msi_idx++) |
b8ff05a9 DM |
639 | snprintf(adap->msix_info[msi_idx].desc, n, "%s-Rx%d", |
640 | d->name, i); | |
b8ff05a9 | 641 | } |
b8ff05a9 DM |
642 | } |
643 | ||
644 | static int request_msix_queue_irqs(struct adapter *adap) | |
645 | { | |
646 | struct sge *s = &adap->sge; | |
0fbc81b3 | 647 | int err, ethqidx; |
cf38be6d | 648 | int msi_index = 2; |
b8ff05a9 DM |
649 | |
650 | err = request_irq(adap->msix_info[1].vec, t4_sge_intr_msix, 0, | |
651 | adap->msix_info[1].desc, &s->fw_evtq); | |
652 | if (err) | |
653 | return err; | |
654 | ||
655 | for_each_ethrxq(s, ethqidx) { | |
404d9e3f VP |
656 | err = request_irq(adap->msix_info[msi_index].vec, |
657 | t4_sge_intr_msix, 0, | |
658 | adap->msix_info[msi_index].desc, | |
b8ff05a9 DM |
659 | &s->ethrxq[ethqidx].rspq); |
660 | if (err) | |
661 | goto unwind; | |
404d9e3f | 662 | msi_index++; |
b8ff05a9 | 663 | } |
b8ff05a9 DM |
664 | return 0; |
665 | ||
666 | unwind: | |
b8ff05a9 | 667 | while (--ethqidx >= 0) |
404d9e3f VP |
668 | free_irq(adap->msix_info[--msi_index].vec, |
669 | &s->ethrxq[ethqidx].rspq); | |
b8ff05a9 DM |
670 | free_irq(adap->msix_info[1].vec, &s->fw_evtq); |
671 | return err; | |
672 | } | |
673 | ||
674 | static void free_msix_queue_irqs(struct adapter *adap) | |
675 | { | |
404d9e3f | 676 | int i, msi_index = 2; |
b8ff05a9 DM |
677 | struct sge *s = &adap->sge; |
678 | ||
679 | free_irq(adap->msix_info[1].vec, &s->fw_evtq); | |
680 | for_each_ethrxq(s, i) | |
404d9e3f | 681 | free_irq(adap->msix_info[msi_index++].vec, &s->ethrxq[i].rspq); |
b8ff05a9 DM |
682 | } |
683 | ||
671b0060 | 684 | /** |
812034f1 | 685 | * cxgb4_write_rss - write the RSS table for a given port |
671b0060 DM |
686 | * @pi: the port |
687 | * @queues: array of queue indices for RSS | |
688 | * | |
689 | * Sets up the portion of the HW RSS table for the port's VI to distribute | |
690 | * packets to the Rx queues in @queues. | |
c035e183 | 691 | * Should never be called before setting up sge eth rx queues |
671b0060 | 692 | */ |
812034f1 | 693 | int cxgb4_write_rss(const struct port_info *pi, const u16 *queues) |
671b0060 DM |
694 | { |
695 | u16 *rss; | |
696 | int i, err; | |
c035e183 HS |
697 | struct adapter *adapter = pi->adapter; |
698 | const struct sge_eth_rxq *rxq; | |
671b0060 | 699 | |
c035e183 | 700 | rxq = &adapter->sge.ethrxq[pi->first_qset]; |
671b0060 DM |
701 | rss = kmalloc(pi->rss_size * sizeof(u16), GFP_KERNEL); |
702 | if (!rss) | |
703 | return -ENOMEM; | |
704 | ||
705 | /* map the queue indices to queue ids */ | |
706 | for (i = 0; i < pi->rss_size; i++, queues++) | |
c035e183 | 707 | rss[i] = rxq[*queues].rspq.abs_id; |
671b0060 | 708 | |
b2612722 | 709 | err = t4_config_rss_range(adapter, adapter->pf, pi->viid, 0, |
060e0c75 | 710 | pi->rss_size, rss, pi->rss_size); |
c035e183 HS |
711 | /* If Tunnel All Lookup isn't specified in the global RSS |
712 | * Configuration, then we need to specify a default Ingress | |
713 | * Queue for any ingress packets which aren't hashed. We'll | |
714 | * use our first ingress queue ... | |
715 | */ | |
716 | if (!err) | |
717 | err = t4_config_vi_rss(adapter, adapter->mbox, pi->viid, | |
718 | FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F | | |
719 | FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F | | |
720 | FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F | | |
721 | FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F | | |
722 | FW_RSS_VI_CONFIG_CMD_UDPEN_F, | |
723 | rss[0]); | |
671b0060 DM |
724 | kfree(rss); |
725 | return err; | |
726 | } | |
727 | ||
b8ff05a9 DM |
728 | /** |
729 | * setup_rss - configure RSS | |
730 | * @adap: the adapter | |
731 | * | |
671b0060 | 732 | * Sets up RSS for each port. |
b8ff05a9 DM |
733 | */ |
734 | static int setup_rss(struct adapter *adap) | |
735 | { | |
c035e183 | 736 | int i, j, err; |
b8ff05a9 DM |
737 | |
738 | for_each_port(adap, i) { | |
739 | const struct port_info *pi = adap2pinfo(adap, i); | |
b8ff05a9 | 740 | |
c035e183 HS |
741 | /* Fill default values with equal distribution */ |
742 | for (j = 0; j < pi->rss_size; j++) | |
743 | pi->rss[j] = j % pi->nqsets; | |
744 | ||
812034f1 | 745 | err = cxgb4_write_rss(pi, pi->rss); |
b8ff05a9 DM |
746 | if (err) |
747 | return err; | |
748 | } | |
749 | return 0; | |
750 | } | |
751 | ||
e46dab4d DM |
752 | /* |
753 | * Return the channel of the ingress queue with the given qid. | |
754 | */ | |
755 | static unsigned int rxq_to_chan(const struct sge *p, unsigned int qid) | |
756 | { | |
757 | qid -= p->ingr_start; | |
758 | return netdev2pinfo(p->ingr_map[qid]->netdev)->tx_chan; | |
759 | } | |
760 | ||
b8ff05a9 DM |
761 | /* |
762 | * Wait until all NAPI handlers are descheduled. | |
763 | */ | |
764 | static void quiesce_rx(struct adapter *adap) | |
765 | { | |
766 | int i; | |
767 | ||
4b8e27a8 | 768 | for (i = 0; i < adap->sge.ingr_sz; i++) { |
b8ff05a9 DM |
769 | struct sge_rspq *q = adap->sge.ingr_map[i]; |
770 | ||
5226b791 | 771 | if (q && q->handler) |
b8ff05a9 DM |
772 | napi_disable(&q->napi); |
773 | } | |
774 | } | |
775 | ||
b37987e8 HS |
776 | /* Disable interrupt and napi handler */ |
777 | static void disable_interrupts(struct adapter *adap) | |
778 | { | |
779 | if (adap->flags & FULL_INIT_DONE) { | |
780 | t4_intr_disable(adap); | |
781 | if (adap->flags & USING_MSIX) { | |
782 | free_msix_queue_irqs(adap); | |
783 | free_irq(adap->msix_info[0].vec, adap); | |
784 | } else { | |
785 | free_irq(adap->pdev->irq, adap); | |
786 | } | |
787 | quiesce_rx(adap); | |
788 | } | |
789 | } | |
790 | ||
b8ff05a9 DM |
791 | /* |
792 | * Enable NAPI scheduling and interrupt generation for all Rx queues. | |
793 | */ | |
794 | static void enable_rx(struct adapter *adap) | |
795 | { | |
796 | int i; | |
797 | ||
4b8e27a8 | 798 | for (i = 0; i < adap->sge.ingr_sz; i++) { |
b8ff05a9 DM |
799 | struct sge_rspq *q = adap->sge.ingr_map[i]; |
800 | ||
801 | if (!q) | |
802 | continue; | |
5226b791 | 803 | if (q->handler) |
b8ff05a9 | 804 | napi_enable(&q->napi); |
5226b791 | 805 | |
b8ff05a9 | 806 | /* 0-increment GTS to start the timer and enable interrupts */ |
f612b815 HS |
807 | t4_write_reg(adap, MYPF_REG(SGE_PF_GTS_A), |
808 | SEINTARM_V(q->intr_params) | | |
809 | INGRESSQID_V(q->cntxt_id)); | |
b8ff05a9 DM |
810 | } |
811 | } | |
812 | ||
1c6a5b0e | 813 | |
0fbc81b3 | 814 | static int setup_fw_sge_queues(struct adapter *adap) |
b8ff05a9 | 815 | { |
b8ff05a9 | 816 | struct sge *s = &adap->sge; |
0fbc81b3 | 817 | int err = 0; |
b8ff05a9 | 818 | |
4b8e27a8 HS |
819 | bitmap_zero(s->starving_fl, s->egr_sz); |
820 | bitmap_zero(s->txq_maperr, s->egr_sz); | |
b8ff05a9 DM |
821 | |
822 | if (adap->flags & USING_MSIX) | |
94cdb8bb | 823 | adap->msi_idx = 1; /* vector 0 is for non-queue interrupts */ |
b8ff05a9 DM |
824 | else { |
825 | err = t4_sge_alloc_rxq(adap, &s->intrq, false, adap->port[0], 0, | |
2337ba42 | 826 | NULL, NULL, NULL, -1); |
b8ff05a9 DM |
827 | if (err) |
828 | return err; | |
94cdb8bb | 829 | adap->msi_idx = -((int)s->intrq.abs_id + 1); |
b8ff05a9 DM |
830 | } |
831 | ||
832 | err = t4_sge_alloc_rxq(adap, &s->fw_evtq, true, adap->port[0], | |
94cdb8bb | 833 | adap->msi_idx, NULL, fwevtq_handler, NULL, -1); |
0fbc81b3 HS |
834 | if (err) |
835 | t4_free_sge_resources(adap); | |
836 | return err; | |
837 | } | |
838 | ||
839 | /** | |
840 | * setup_sge_queues - configure SGE Tx/Rx/response queues | |
841 | * @adap: the adapter | |
842 | * | |
843 | * Determines how many sets of SGE queues to use and initializes them. | |
844 | * We support multiple queue sets per port if we have MSI-X, otherwise | |
845 | * just one queue set per port. | |
846 | */ | |
847 | static int setup_sge_queues(struct adapter *adap) | |
848 | { | |
849 | int err, i, j; | |
850 | struct sge *s = &adap->sge; | |
d427caee | 851 | struct sge_uld_rxq_info *rxq_info = NULL; |
0fbc81b3 | 852 | unsigned int cmplqid = 0; |
b8ff05a9 | 853 | |
d427caee GG |
854 | if (is_uld(adap)) |
855 | rxq_info = s->uld_rxq_info[CXGB4_ULD_RDMA]; | |
856 | ||
b8ff05a9 DM |
857 | for_each_port(adap, i) { |
858 | struct net_device *dev = adap->port[i]; | |
859 | struct port_info *pi = netdev_priv(dev); | |
860 | struct sge_eth_rxq *q = &s->ethrxq[pi->first_qset]; | |
861 | struct sge_eth_txq *t = &s->ethtxq[pi->first_qset]; | |
862 | ||
863 | for (j = 0; j < pi->nqsets; j++, q++) { | |
94cdb8bb HS |
864 | if (adap->msi_idx > 0) |
865 | adap->msi_idx++; | |
b8ff05a9 | 866 | err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev, |
94cdb8bb | 867 | adap->msi_idx, &q->fl, |
145ef8a5 | 868 | t4_ethrx_handler, |
2337ba42 | 869 | NULL, |
193c4c28 AV |
870 | t4_get_tp_ch_map(adap, |
871 | pi->tx_chan)); | |
b8ff05a9 DM |
872 | if (err) |
873 | goto freeout; | |
874 | q->rspq.idx = j; | |
875 | memset(&q->stats, 0, sizeof(q->stats)); | |
876 | } | |
877 | for (j = 0; j < pi->nqsets; j++, t++) { | |
878 | err = t4_sge_alloc_eth_txq(adap, t, dev, | |
879 | netdev_get_tx_queue(dev, j), | |
880 | s->fw_evtq.cntxt_id); | |
881 | if (err) | |
882 | goto freeout; | |
883 | } | |
884 | } | |
885 | ||
b8ff05a9 | 886 | for_each_port(adap, i) { |
0fbc81b3 | 887 | /* Note that cmplqid below is 0 if we don't |
b8ff05a9 DM |
888 | * have RDMA queues, and that's the right value. |
889 | */ | |
0fbc81b3 HS |
890 | if (rxq_info) |
891 | cmplqid = rxq_info->uldrxq[i].rspq.cntxt_id; | |
892 | ||
b8ff05a9 | 893 | err = t4_sge_alloc_ctrl_txq(adap, &s->ctrlq[i], adap->port[i], |
0fbc81b3 | 894 | s->fw_evtq.cntxt_id, cmplqid); |
b8ff05a9 DM |
895 | if (err) |
896 | goto freeout; | |
897 | } | |
898 | ||
a4569504 AG |
899 | if (!is_t4(adap->params.chip)) { |
900 | err = t4_sge_alloc_eth_txq(adap, &s->ptptxq, adap->port[0], | |
901 | netdev_get_tx_queue(adap->port[0], 0) | |
902 | , s->fw_evtq.cntxt_id); | |
903 | if (err) | |
904 | goto freeout; | |
905 | } | |
906 | ||
9bb59b96 | 907 | t4_write_reg(adap, is_t4(adap->params.chip) ? |
837e4a42 HS |
908 | MPS_TRC_RSS_CONTROL_A : |
909 | MPS_T5_TRC_RSS_CONTROL_A, | |
910 | RSSCONTROL_V(netdev2pinfo(adap->port[0])->tx_chan) | | |
911 | QUEUENUMBER_V(s->ethrxq[0].rspq.abs_id)); | |
b8ff05a9 | 912 | return 0; |
0fbc81b3 HS |
913 | freeout: |
914 | t4_free_sge_resources(adap); | |
915 | return err; | |
b8ff05a9 DM |
916 | } |
917 | ||
688848b1 AB |
918 | static u16 cxgb_select_queue(struct net_device *dev, struct sk_buff *skb, |
919 | void *accel_priv, select_queue_fallback_t fallback) | |
920 | { | |
921 | int txq; | |
922 | ||
923 | #ifdef CONFIG_CHELSIO_T4_DCB | |
924 | /* If a Data Center Bridging has been successfully negotiated on this | |
925 | * link then we'll use the skb's priority to map it to a TX Queue. | |
926 | * The skb's priority is determined via the VLAN Tag Priority Code | |
927 | * Point field. | |
928 | */ | |
85eacf3f | 929 | if (cxgb4_dcb_enabled(dev) && !is_kdump_kernel()) { |
688848b1 AB |
930 | u16 vlan_tci; |
931 | int err; | |
932 | ||
933 | err = vlan_get_tag(skb, &vlan_tci); | |
934 | if (unlikely(err)) { | |
935 | if (net_ratelimit()) | |
936 | netdev_warn(dev, | |
937 | "TX Packet without VLAN Tag on DCB Link\n"); | |
938 | txq = 0; | |
939 | } else { | |
940 | txq = (vlan_tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT; | |
84a200b3 VP |
941 | #ifdef CONFIG_CHELSIO_T4_FCOE |
942 | if (skb->protocol == htons(ETH_P_FCOE)) | |
943 | txq = skb->priority & 0x7; | |
944 | #endif /* CONFIG_CHELSIO_T4_FCOE */ | |
688848b1 AB |
945 | } |
946 | return txq; | |
947 | } | |
948 | #endif /* CONFIG_CHELSIO_T4_DCB */ | |
949 | ||
950 | if (select_queue) { | |
951 | txq = (skb_rx_queue_recorded(skb) | |
952 | ? skb_get_rx_queue(skb) | |
953 | : smp_processor_id()); | |
954 | ||
955 | while (unlikely(txq >= dev->real_num_tx_queues)) | |
956 | txq -= dev->real_num_tx_queues; | |
957 | ||
958 | return txq; | |
959 | } | |
960 | ||
961 | return fallback(dev, skb) % dev->real_num_tx_queues; | |
962 | } | |
963 | ||
b8ff05a9 DM |
964 | static int closest_timer(const struct sge *s, int time) |
965 | { | |
966 | int i, delta, match = 0, min_delta = INT_MAX; | |
967 | ||
968 | for (i = 0; i < ARRAY_SIZE(s->timer_val); i++) { | |
969 | delta = time - s->timer_val[i]; | |
970 | if (delta < 0) | |
971 | delta = -delta; | |
972 | if (delta < min_delta) { | |
973 | min_delta = delta; | |
974 | match = i; | |
975 | } | |
976 | } | |
977 | return match; | |
978 | } | |
979 | ||
980 | static int closest_thres(const struct sge *s, int thres) | |
981 | { | |
982 | int i, delta, match = 0, min_delta = INT_MAX; | |
983 | ||
984 | for (i = 0; i < ARRAY_SIZE(s->counter_val); i++) { | |
985 | delta = thres - s->counter_val[i]; | |
986 | if (delta < 0) | |
987 | delta = -delta; | |
988 | if (delta < min_delta) { | |
989 | min_delta = delta; | |
990 | match = i; | |
991 | } | |
992 | } | |
993 | return match; | |
994 | } | |
995 | ||
b8ff05a9 | 996 | /** |
812034f1 | 997 | * cxgb4_set_rspq_intr_params - set a queue's interrupt holdoff parameters |
b8ff05a9 DM |
998 | * @q: the Rx queue |
999 | * @us: the hold-off time in us, or 0 to disable timer | |
1000 | * @cnt: the hold-off packet count, or 0 to disable counter | |
1001 | * | |
1002 | * Sets an Rx queue's interrupt hold-off time and packet count. At least | |
1003 | * one of the two needs to be enabled for the queue to generate interrupts. | |
1004 | */ | |
812034f1 HS |
1005 | int cxgb4_set_rspq_intr_params(struct sge_rspq *q, |
1006 | unsigned int us, unsigned int cnt) | |
b8ff05a9 | 1007 | { |
c887ad0e HS |
1008 | struct adapter *adap = q->adap; |
1009 | ||
b8ff05a9 DM |
1010 | if ((us | cnt) == 0) |
1011 | cnt = 1; | |
1012 | ||
1013 | if (cnt) { | |
1014 | int err; | |
1015 | u32 v, new_idx; | |
1016 | ||
1017 | new_idx = closest_thres(&adap->sge, cnt); | |
1018 | if (q->desc && q->pktcnt_idx != new_idx) { | |
1019 | /* the queue has already been created, update it */ | |
5167865a HS |
1020 | v = FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) | |
1021 | FW_PARAMS_PARAM_X_V( | |
1022 | FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH) | | |
1023 | FW_PARAMS_PARAM_YZ_V(q->cntxt_id); | |
b2612722 HS |
1024 | err = t4_set_params(adap, adap->mbox, adap->pf, 0, 1, |
1025 | &v, &new_idx); | |
b8ff05a9 DM |
1026 | if (err) |
1027 | return err; | |
1028 | } | |
1029 | q->pktcnt_idx = new_idx; | |
1030 | } | |
1031 | ||
1032 | us = us == 0 ? 6 : closest_timer(&adap->sge, us); | |
1ecc7b7a | 1033 | q->intr_params = QINTR_TIMER_IDX_V(us) | QINTR_CNT_EN_V(cnt > 0); |
b8ff05a9 DM |
1034 | return 0; |
1035 | } | |
1036 | ||
c8f44aff | 1037 | static int cxgb_set_features(struct net_device *dev, netdev_features_t features) |
87b6cf51 | 1038 | { |
2ed28baa | 1039 | const struct port_info *pi = netdev_priv(dev); |
c8f44aff | 1040 | netdev_features_t changed = dev->features ^ features; |
19ecae2c | 1041 | int err; |
19ecae2c | 1042 | |
f646968f | 1043 | if (!(changed & NETIF_F_HW_VLAN_CTAG_RX)) |
2ed28baa | 1044 | return 0; |
19ecae2c | 1045 | |
b2612722 | 1046 | err = t4_set_rxmode(pi->adapter, pi->adapter->pf, pi->viid, -1, |
2ed28baa | 1047 | -1, -1, -1, |
f646968f | 1048 | !!(features & NETIF_F_HW_VLAN_CTAG_RX), true); |
2ed28baa | 1049 | if (unlikely(err)) |
f646968f | 1050 | dev->features = features ^ NETIF_F_HW_VLAN_CTAG_RX; |
19ecae2c | 1051 | return err; |
87b6cf51 DM |
1052 | } |
1053 | ||
91744948 | 1054 | static int setup_debugfs(struct adapter *adap) |
b8ff05a9 | 1055 | { |
b8ff05a9 DM |
1056 | if (IS_ERR_OR_NULL(adap->debugfs_root)) |
1057 | return -1; | |
1058 | ||
fd88b31a HS |
1059 | #ifdef CONFIG_DEBUG_FS |
1060 | t4_setup_debugfs(adap); | |
1061 | #endif | |
b8ff05a9 DM |
1062 | return 0; |
1063 | } | |
1064 | ||
1065 | /* | |
1066 | * upper-layer driver support | |
1067 | */ | |
1068 | ||
1069 | /* | |
1070 | * Allocate an active-open TID and set it to the supplied value. | |
1071 | */ | |
1072 | int cxgb4_alloc_atid(struct tid_info *t, void *data) | |
1073 | { | |
1074 | int atid = -1; | |
1075 | ||
1076 | spin_lock_bh(&t->atid_lock); | |
1077 | if (t->afree) { | |
1078 | union aopen_entry *p = t->afree; | |
1079 | ||
f2b7e78d | 1080 | atid = (p - t->atid_tab) + t->atid_base; |
b8ff05a9 DM |
1081 | t->afree = p->next; |
1082 | p->data = data; | |
1083 | t->atids_in_use++; | |
1084 | } | |
1085 | spin_unlock_bh(&t->atid_lock); | |
1086 | return atid; | |
1087 | } | |
1088 | EXPORT_SYMBOL(cxgb4_alloc_atid); | |
1089 | ||
1090 | /* | |
1091 | * Release an active-open TID. | |
1092 | */ | |
1093 | void cxgb4_free_atid(struct tid_info *t, unsigned int atid) | |
1094 | { | |
f2b7e78d | 1095 | union aopen_entry *p = &t->atid_tab[atid - t->atid_base]; |
b8ff05a9 DM |
1096 | |
1097 | spin_lock_bh(&t->atid_lock); | |
1098 | p->next = t->afree; | |
1099 | t->afree = p; | |
1100 | t->atids_in_use--; | |
1101 | spin_unlock_bh(&t->atid_lock); | |
1102 | } | |
1103 | EXPORT_SYMBOL(cxgb4_free_atid); | |
1104 | ||
1105 | /* | |
1106 | * Allocate a server TID and set it to the supplied value. | |
1107 | */ | |
1108 | int cxgb4_alloc_stid(struct tid_info *t, int family, void *data) | |
1109 | { | |
1110 | int stid; | |
1111 | ||
1112 | spin_lock_bh(&t->stid_lock); | |
1113 | if (family == PF_INET) { | |
1114 | stid = find_first_zero_bit(t->stid_bmap, t->nstids); | |
1115 | if (stid < t->nstids) | |
1116 | __set_bit(stid, t->stid_bmap); | |
1117 | else | |
1118 | stid = -1; | |
1119 | } else { | |
a99c683e | 1120 | stid = bitmap_find_free_region(t->stid_bmap, t->nstids, 1); |
b8ff05a9 DM |
1121 | if (stid < 0) |
1122 | stid = -1; | |
1123 | } | |
1124 | if (stid >= 0) { | |
1125 | t->stid_tab[stid].data = data; | |
1126 | stid += t->stid_base; | |
15f63b74 KS |
1127 | /* IPv6 requires max of 520 bits or 16 cells in TCAM |
1128 | * This is equivalent to 4 TIDs. With CLIP enabled it | |
1129 | * needs 2 TIDs. | |
1130 | */ | |
1dec4cec | 1131 | if (family == PF_INET6) { |
a99c683e | 1132 | t->stids_in_use += 2; |
1dec4cec GG |
1133 | t->v6_stids_in_use += 2; |
1134 | } else { | |
1135 | t->stids_in_use++; | |
1136 | } | |
b8ff05a9 DM |
1137 | } |
1138 | spin_unlock_bh(&t->stid_lock); | |
1139 | return stid; | |
1140 | } | |
1141 | EXPORT_SYMBOL(cxgb4_alloc_stid); | |
1142 | ||
dca4faeb VP |
1143 | /* Allocate a server filter TID and set it to the supplied value. |
1144 | */ | |
1145 | int cxgb4_alloc_sftid(struct tid_info *t, int family, void *data) | |
1146 | { | |
1147 | int stid; | |
1148 | ||
1149 | spin_lock_bh(&t->stid_lock); | |
1150 | if (family == PF_INET) { | |
1151 | stid = find_next_zero_bit(t->stid_bmap, | |
1152 | t->nstids + t->nsftids, t->nstids); | |
1153 | if (stid < (t->nstids + t->nsftids)) | |
1154 | __set_bit(stid, t->stid_bmap); | |
1155 | else | |
1156 | stid = -1; | |
1157 | } else { | |
1158 | stid = -1; | |
1159 | } | |
1160 | if (stid >= 0) { | |
1161 | t->stid_tab[stid].data = data; | |
470c60c4 KS |
1162 | stid -= t->nstids; |
1163 | stid += t->sftid_base; | |
2248b293 | 1164 | t->sftids_in_use++; |
dca4faeb VP |
1165 | } |
1166 | spin_unlock_bh(&t->stid_lock); | |
1167 | return stid; | |
1168 | } | |
1169 | EXPORT_SYMBOL(cxgb4_alloc_sftid); | |
1170 | ||
1171 | /* Release a server TID. | |
b8ff05a9 DM |
1172 | */ |
1173 | void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family) | |
1174 | { | |
470c60c4 KS |
1175 | /* Is it a server filter TID? */ |
1176 | if (t->nsftids && (stid >= t->sftid_base)) { | |
1177 | stid -= t->sftid_base; | |
1178 | stid += t->nstids; | |
1179 | } else { | |
1180 | stid -= t->stid_base; | |
1181 | } | |
1182 | ||
b8ff05a9 DM |
1183 | spin_lock_bh(&t->stid_lock); |
1184 | if (family == PF_INET) | |
1185 | __clear_bit(stid, t->stid_bmap); | |
1186 | else | |
a99c683e | 1187 | bitmap_release_region(t->stid_bmap, stid, 1); |
b8ff05a9 | 1188 | t->stid_tab[stid].data = NULL; |
2248b293 | 1189 | if (stid < t->nstids) { |
1dec4cec | 1190 | if (family == PF_INET6) { |
a99c683e | 1191 | t->stids_in_use -= 2; |
1dec4cec GG |
1192 | t->v6_stids_in_use -= 2; |
1193 | } else { | |
1194 | t->stids_in_use--; | |
1195 | } | |
2248b293 HS |
1196 | } else { |
1197 | t->sftids_in_use--; | |
1198 | } | |
1dec4cec | 1199 | |
b8ff05a9 DM |
1200 | spin_unlock_bh(&t->stid_lock); |
1201 | } | |
1202 | EXPORT_SYMBOL(cxgb4_free_stid); | |
1203 | ||
1204 | /* | |
1205 | * Populate a TID_RELEASE WR. Caller must properly size the skb. | |
1206 | */ | |
1207 | static void mk_tid_release(struct sk_buff *skb, unsigned int chan, | |
1208 | unsigned int tid) | |
1209 | { | |
1210 | struct cpl_tid_release *req; | |
1211 | ||
1212 | set_wr_txq(skb, CPL_PRIORITY_SETUP, chan); | |
4df864c1 | 1213 | req = __skb_put(skb, sizeof(*req)); |
b8ff05a9 DM |
1214 | INIT_TP_WR(req, tid); |
1215 | OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, tid)); | |
1216 | } | |
1217 | ||
1218 | /* | |
1219 | * Queue a TID release request and if necessary schedule a work queue to | |
1220 | * process it. | |
1221 | */ | |
31b9c19b | 1222 | static void cxgb4_queue_tid_release(struct tid_info *t, unsigned int chan, |
1223 | unsigned int tid) | |
b8ff05a9 DM |
1224 | { |
1225 | void **p = &t->tid_tab[tid]; | |
1226 | struct adapter *adap = container_of(t, struct adapter, tids); | |
1227 | ||
1228 | spin_lock_bh(&adap->tid_release_lock); | |
1229 | *p = adap->tid_release_head; | |
1230 | /* Low 2 bits encode the Tx channel number */ | |
1231 | adap->tid_release_head = (void **)((uintptr_t)p | chan); | |
1232 | if (!adap->tid_release_task_busy) { | |
1233 | adap->tid_release_task_busy = true; | |
29aaee65 | 1234 | queue_work(adap->workq, &adap->tid_release_task); |
b8ff05a9 DM |
1235 | } |
1236 | spin_unlock_bh(&adap->tid_release_lock); | |
1237 | } | |
b8ff05a9 DM |
1238 | |
1239 | /* | |
1240 | * Process the list of pending TID release requests. | |
1241 | */ | |
1242 | static void process_tid_release_list(struct work_struct *work) | |
1243 | { | |
1244 | struct sk_buff *skb; | |
1245 | struct adapter *adap; | |
1246 | ||
1247 | adap = container_of(work, struct adapter, tid_release_task); | |
1248 | ||
1249 | spin_lock_bh(&adap->tid_release_lock); | |
1250 | while (adap->tid_release_head) { | |
1251 | void **p = adap->tid_release_head; | |
1252 | unsigned int chan = (uintptr_t)p & 3; | |
1253 | p = (void *)p - chan; | |
1254 | ||
1255 | adap->tid_release_head = *p; | |
1256 | *p = NULL; | |
1257 | spin_unlock_bh(&adap->tid_release_lock); | |
1258 | ||
1259 | while (!(skb = alloc_skb(sizeof(struct cpl_tid_release), | |
1260 | GFP_KERNEL))) | |
1261 | schedule_timeout_uninterruptible(1); | |
1262 | ||
1263 | mk_tid_release(skb, chan, p - adap->tids.tid_tab); | |
1264 | t4_ofld_send(adap, skb); | |
1265 | spin_lock_bh(&adap->tid_release_lock); | |
1266 | } | |
1267 | adap->tid_release_task_busy = false; | |
1268 | spin_unlock_bh(&adap->tid_release_lock); | |
1269 | } | |
1270 | ||
1271 | /* | |
1272 | * Release a TID and inform HW. If we are unable to allocate the release | |
1273 | * message we defer to a work queue. | |
1274 | */ | |
1dec4cec GG |
1275 | void cxgb4_remove_tid(struct tid_info *t, unsigned int chan, unsigned int tid, |
1276 | unsigned short family) | |
b8ff05a9 | 1277 | { |
b8ff05a9 DM |
1278 | struct sk_buff *skb; |
1279 | struct adapter *adap = container_of(t, struct adapter, tids); | |
1280 | ||
9a1bb9f6 HS |
1281 | WARN_ON(tid >= t->ntids); |
1282 | ||
1283 | if (t->tid_tab[tid]) { | |
1284 | t->tid_tab[tid] = NULL; | |
1dec4cec GG |
1285 | atomic_dec(&t->conns_in_use); |
1286 | if (t->hash_base && (tid >= t->hash_base)) { | |
1287 | if (family == AF_INET6) | |
1288 | atomic_sub(2, &t->hash_tids_in_use); | |
1289 | else | |
1290 | atomic_dec(&t->hash_tids_in_use); | |
1291 | } else { | |
1292 | if (family == AF_INET6) | |
1293 | atomic_sub(2, &t->tids_in_use); | |
1294 | else | |
1295 | atomic_dec(&t->tids_in_use); | |
1296 | } | |
9a1bb9f6 HS |
1297 | } |
1298 | ||
b8ff05a9 DM |
1299 | skb = alloc_skb(sizeof(struct cpl_tid_release), GFP_ATOMIC); |
1300 | if (likely(skb)) { | |
b8ff05a9 DM |
1301 | mk_tid_release(skb, chan, tid); |
1302 | t4_ofld_send(adap, skb); | |
1303 | } else | |
1304 | cxgb4_queue_tid_release(t, chan, tid); | |
b8ff05a9 DM |
1305 | } |
1306 | EXPORT_SYMBOL(cxgb4_remove_tid); | |
1307 | ||
1308 | /* | |
1309 | * Allocate and initialize the TID tables. Returns 0 on success. | |
1310 | */ | |
1311 | static int tid_init(struct tid_info *t) | |
1312 | { | |
b6f8eaec | 1313 | struct adapter *adap = container_of(t, struct adapter, tids); |
578b46b9 RL |
1314 | unsigned int max_ftids = t->nftids + t->nsftids; |
1315 | unsigned int natids = t->natids; | |
1316 | unsigned int stid_bmap_size; | |
1317 | unsigned int ftid_bmap_size; | |
1318 | size_t size; | |
b8ff05a9 | 1319 | |
dca4faeb | 1320 | stid_bmap_size = BITS_TO_LONGS(t->nstids + t->nsftids); |
578b46b9 | 1321 | ftid_bmap_size = BITS_TO_LONGS(t->nftids); |
f2b7e78d VP |
1322 | size = t->ntids * sizeof(*t->tid_tab) + |
1323 | natids * sizeof(*t->atid_tab) + | |
b8ff05a9 | 1324 | t->nstids * sizeof(*t->stid_tab) + |
dca4faeb | 1325 | t->nsftids * sizeof(*t->stid_tab) + |
f2b7e78d | 1326 | stid_bmap_size * sizeof(long) + |
578b46b9 RL |
1327 | max_ftids * sizeof(*t->ftid_tab) + |
1328 | ftid_bmap_size * sizeof(long); | |
f2b7e78d | 1329 | |
752ade68 | 1330 | t->tid_tab = kvzalloc(size, GFP_KERNEL); |
b8ff05a9 DM |
1331 | if (!t->tid_tab) |
1332 | return -ENOMEM; | |
1333 | ||
1334 | t->atid_tab = (union aopen_entry *)&t->tid_tab[t->ntids]; | |
1335 | t->stid_tab = (struct serv_entry *)&t->atid_tab[natids]; | |
dca4faeb | 1336 | t->stid_bmap = (unsigned long *)&t->stid_tab[t->nstids + t->nsftids]; |
f2b7e78d | 1337 | t->ftid_tab = (struct filter_entry *)&t->stid_bmap[stid_bmap_size]; |
578b46b9 | 1338 | t->ftid_bmap = (unsigned long *)&t->ftid_tab[max_ftids]; |
b8ff05a9 DM |
1339 | spin_lock_init(&t->stid_lock); |
1340 | spin_lock_init(&t->atid_lock); | |
578b46b9 | 1341 | spin_lock_init(&t->ftid_lock); |
b8ff05a9 DM |
1342 | |
1343 | t->stids_in_use = 0; | |
1dec4cec | 1344 | t->v6_stids_in_use = 0; |
2248b293 | 1345 | t->sftids_in_use = 0; |
b8ff05a9 DM |
1346 | t->afree = NULL; |
1347 | t->atids_in_use = 0; | |
1348 | atomic_set(&t->tids_in_use, 0); | |
1dec4cec | 1349 | atomic_set(&t->conns_in_use, 0); |
9a1bb9f6 | 1350 | atomic_set(&t->hash_tids_in_use, 0); |
b8ff05a9 DM |
1351 | |
1352 | /* Setup the free list for atid_tab and clear the stid bitmap. */ | |
1353 | if (natids) { | |
1354 | while (--natids) | |
1355 | t->atid_tab[natids - 1].next = &t->atid_tab[natids]; | |
1356 | t->afree = t->atid_tab; | |
1357 | } | |
b6f8eaec | 1358 | |
578b46b9 RL |
1359 | if (is_offload(adap)) { |
1360 | bitmap_zero(t->stid_bmap, t->nstids + t->nsftids); | |
1361 | /* Reserve stid 0 for T4/T5 adapters */ | |
1362 | if (!t->stid_base && | |
1363 | CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5) | |
1364 | __set_bit(0, t->stid_bmap); | |
1365 | } | |
1366 | ||
1367 | bitmap_zero(t->ftid_bmap, t->nftids); | |
b8ff05a9 DM |
1368 | return 0; |
1369 | } | |
1370 | ||
1371 | /** | |
1372 | * cxgb4_create_server - create an IP server | |
1373 | * @dev: the device | |
1374 | * @stid: the server TID | |
1375 | * @sip: local IP address to bind server to | |
1376 | * @sport: the server's TCP port | |
1377 | * @queue: queue to direct messages from this server to | |
1378 | * | |
1379 | * Create an IP server for the given port and address. | |
1380 | * Returns <0 on error and one of the %NET_XMIT_* values on success. | |
1381 | */ | |
1382 | int cxgb4_create_server(const struct net_device *dev, unsigned int stid, | |
793dad94 VP |
1383 | __be32 sip, __be16 sport, __be16 vlan, |
1384 | unsigned int queue) | |
b8ff05a9 DM |
1385 | { |
1386 | unsigned int chan; | |
1387 | struct sk_buff *skb; | |
1388 | struct adapter *adap; | |
1389 | struct cpl_pass_open_req *req; | |
80f40c1f | 1390 | int ret; |
b8ff05a9 DM |
1391 | |
1392 | skb = alloc_skb(sizeof(*req), GFP_KERNEL); | |
1393 | if (!skb) | |
1394 | return -ENOMEM; | |
1395 | ||
1396 | adap = netdev2adap(dev); | |
4df864c1 | 1397 | req = __skb_put(skb, sizeof(*req)); |
b8ff05a9 DM |
1398 | INIT_TP_WR(req, 0); |
1399 | OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ, stid)); | |
1400 | req->local_port = sport; | |
1401 | req->peer_port = htons(0); | |
1402 | req->local_ip = sip; | |
1403 | req->peer_ip = htonl(0); | |
e46dab4d | 1404 | chan = rxq_to_chan(&adap->sge, queue); |
d7990b0c | 1405 | req->opt0 = cpu_to_be64(TX_CHAN_V(chan)); |
6c53e938 HS |
1406 | req->opt1 = cpu_to_be64(CONN_POLICY_V(CPL_CONN_POLICY_ASK) | |
1407 | SYN_RSS_ENABLE_F | SYN_RSS_QUEUE_V(queue)); | |
80f40c1f VP |
1408 | ret = t4_mgmt_tx(adap, skb); |
1409 | return net_xmit_eval(ret); | |
b8ff05a9 DM |
1410 | } |
1411 | EXPORT_SYMBOL(cxgb4_create_server); | |
1412 | ||
80f40c1f VP |
1413 | /* cxgb4_create_server6 - create an IPv6 server |
1414 | * @dev: the device | |
1415 | * @stid: the server TID | |
1416 | * @sip: local IPv6 address to bind server to | |
1417 | * @sport: the server's TCP port | |
1418 | * @queue: queue to direct messages from this server to | |
1419 | * | |
1420 | * Create an IPv6 server for the given port and address. | |
1421 | * Returns <0 on error and one of the %NET_XMIT_* values on success. | |
1422 | */ | |
1423 | int cxgb4_create_server6(const struct net_device *dev, unsigned int stid, | |
1424 | const struct in6_addr *sip, __be16 sport, | |
1425 | unsigned int queue) | |
1426 | { | |
1427 | unsigned int chan; | |
1428 | struct sk_buff *skb; | |
1429 | struct adapter *adap; | |
1430 | struct cpl_pass_open_req6 *req; | |
1431 | int ret; | |
1432 | ||
1433 | skb = alloc_skb(sizeof(*req), GFP_KERNEL); | |
1434 | if (!skb) | |
1435 | return -ENOMEM; | |
1436 | ||
1437 | adap = netdev2adap(dev); | |
4df864c1 | 1438 | req = __skb_put(skb, sizeof(*req)); |
80f40c1f VP |
1439 | INIT_TP_WR(req, 0); |
1440 | OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ6, stid)); | |
1441 | req->local_port = sport; | |
1442 | req->peer_port = htons(0); | |
1443 | req->local_ip_hi = *(__be64 *)(sip->s6_addr); | |
1444 | req->local_ip_lo = *(__be64 *)(sip->s6_addr + 8); | |
1445 | req->peer_ip_hi = cpu_to_be64(0); | |
1446 | req->peer_ip_lo = cpu_to_be64(0); | |
1447 | chan = rxq_to_chan(&adap->sge, queue); | |
d7990b0c | 1448 | req->opt0 = cpu_to_be64(TX_CHAN_V(chan)); |
6c53e938 HS |
1449 | req->opt1 = cpu_to_be64(CONN_POLICY_V(CPL_CONN_POLICY_ASK) | |
1450 | SYN_RSS_ENABLE_F | SYN_RSS_QUEUE_V(queue)); | |
80f40c1f VP |
1451 | ret = t4_mgmt_tx(adap, skb); |
1452 | return net_xmit_eval(ret); | |
1453 | } | |
1454 | EXPORT_SYMBOL(cxgb4_create_server6); | |
1455 | ||
1456 | int cxgb4_remove_server(const struct net_device *dev, unsigned int stid, | |
1457 | unsigned int queue, bool ipv6) | |
1458 | { | |
1459 | struct sk_buff *skb; | |
1460 | struct adapter *adap; | |
1461 | struct cpl_close_listsvr_req *req; | |
1462 | int ret; | |
1463 | ||
1464 | adap = netdev2adap(dev); | |
1465 | ||
1466 | skb = alloc_skb(sizeof(*req), GFP_KERNEL); | |
1467 | if (!skb) | |
1468 | return -ENOMEM; | |
1469 | ||
4df864c1 | 1470 | req = __skb_put(skb, sizeof(*req)); |
80f40c1f VP |
1471 | INIT_TP_WR(req, 0); |
1472 | OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ, stid)); | |
bdc590b9 HS |
1473 | req->reply_ctrl = htons(NO_REPLY_V(0) | (ipv6 ? LISTSVR_IPV6_V(1) : |
1474 | LISTSVR_IPV6_V(0)) | QUEUENO_V(queue)); | |
80f40c1f VP |
1475 | ret = t4_mgmt_tx(adap, skb); |
1476 | return net_xmit_eval(ret); | |
1477 | } | |
1478 | EXPORT_SYMBOL(cxgb4_remove_server); | |
1479 | ||
b8ff05a9 DM |
1480 | /** |
1481 | * cxgb4_best_mtu - find the entry in the MTU table closest to an MTU | |
1482 | * @mtus: the HW MTU table | |
1483 | * @mtu: the target MTU | |
1484 | * @idx: index of selected entry in the MTU table | |
1485 | * | |
1486 | * Returns the index and the value in the HW MTU table that is closest to | |
1487 | * but does not exceed @mtu, unless @mtu is smaller than any value in the | |
1488 | * table, in which case that smallest available value is selected. | |
1489 | */ | |
1490 | unsigned int cxgb4_best_mtu(const unsigned short *mtus, unsigned short mtu, | |
1491 | unsigned int *idx) | |
1492 | { | |
1493 | unsigned int i = 0; | |
1494 | ||
1495 | while (i < NMTUS - 1 && mtus[i + 1] <= mtu) | |
1496 | ++i; | |
1497 | if (idx) | |
1498 | *idx = i; | |
1499 | return mtus[i]; | |
1500 | } | |
1501 | EXPORT_SYMBOL(cxgb4_best_mtu); | |
1502 | ||
92e7ae71 HS |
1503 | /** |
1504 | * cxgb4_best_aligned_mtu - find best MTU, [hopefully] data size aligned | |
1505 | * @mtus: the HW MTU table | |
1506 | * @header_size: Header Size | |
1507 | * @data_size_max: maximum Data Segment Size | |
1508 | * @data_size_align: desired Data Segment Size Alignment (2^N) | |
1509 | * @mtu_idxp: HW MTU Table Index return value pointer (possibly NULL) | |
1510 | * | |
1511 | * Similar to cxgb4_best_mtu() but instead of searching the Hardware | |
1512 | * MTU Table based solely on a Maximum MTU parameter, we break that | |
1513 | * parameter up into a Header Size and Maximum Data Segment Size, and | |
1514 | * provide a desired Data Segment Size Alignment. If we find an MTU in | |
1515 | * the Hardware MTU Table which will result in a Data Segment Size with | |
1516 | * the requested alignment _and_ that MTU isn't "too far" from the | |
1517 | * closest MTU, then we'll return that rather than the closest MTU. | |
1518 | */ | |
1519 | unsigned int cxgb4_best_aligned_mtu(const unsigned short *mtus, | |
1520 | unsigned short header_size, | |
1521 | unsigned short data_size_max, | |
1522 | unsigned short data_size_align, | |
1523 | unsigned int *mtu_idxp) | |
1524 | { | |
1525 | unsigned short max_mtu = header_size + data_size_max; | |
1526 | unsigned short data_size_align_mask = data_size_align - 1; | |
1527 | int mtu_idx, aligned_mtu_idx; | |
1528 | ||
1529 | /* Scan the MTU Table till we find an MTU which is larger than our | |
1530 | * Maximum MTU or we reach the end of the table. Along the way, | |
1531 | * record the last MTU found, if any, which will result in a Data | |
1532 | * Segment Length matching the requested alignment. | |
1533 | */ | |
1534 | for (mtu_idx = 0, aligned_mtu_idx = -1; mtu_idx < NMTUS; mtu_idx++) { | |
1535 | unsigned short data_size = mtus[mtu_idx] - header_size; | |
1536 | ||
1537 | /* If this MTU minus the Header Size would result in a | |
1538 | * Data Segment Size of the desired alignment, remember it. | |
1539 | */ | |
1540 | if ((data_size & data_size_align_mask) == 0) | |
1541 | aligned_mtu_idx = mtu_idx; | |
1542 | ||
1543 | /* If we're not at the end of the Hardware MTU Table and the | |
1544 | * next element is larger than our Maximum MTU, drop out of | |
1545 | * the loop. | |
1546 | */ | |
1547 | if (mtu_idx+1 < NMTUS && mtus[mtu_idx+1] > max_mtu) | |
1548 | break; | |
1549 | } | |
1550 | ||
1551 | /* If we fell out of the loop because we ran to the end of the table, | |
1552 | * then we just have to use the last [largest] entry. | |
1553 | */ | |
1554 | if (mtu_idx == NMTUS) | |
1555 | mtu_idx--; | |
1556 | ||
1557 | /* If we found an MTU which resulted in the requested Data Segment | |
1558 | * Length alignment and that's "not far" from the largest MTU which is | |
1559 | * less than or equal to the maximum MTU, then use that. | |
1560 | */ | |
1561 | if (aligned_mtu_idx >= 0 && | |
1562 | mtu_idx - aligned_mtu_idx <= 1) | |
1563 | mtu_idx = aligned_mtu_idx; | |
1564 | ||
1565 | /* If the caller has passed in an MTU Index pointer, pass the | |
1566 | * MTU Index back. Return the MTU value. | |
1567 | */ | |
1568 | if (mtu_idxp) | |
1569 | *mtu_idxp = mtu_idx; | |
1570 | return mtus[mtu_idx]; | |
1571 | } | |
1572 | EXPORT_SYMBOL(cxgb4_best_aligned_mtu); | |
1573 | ||
27999805 H |
1574 | /** |
1575 | * cxgb4_tp_smt_idx - Get the Source Mac Table index for this VI | |
1576 | * @chip: chip type | |
1577 | * @viid: VI id of the given port | |
1578 | * | |
1579 | * Return the SMT index for this VI. | |
1580 | */ | |
1581 | unsigned int cxgb4_tp_smt_idx(enum chip_type chip, unsigned int viid) | |
1582 | { | |
1583 | /* In T4/T5, SMT contains 256 SMAC entries organized in | |
1584 | * 128 rows of 2 entries each. | |
1585 | * In T6, SMT contains 256 SMAC entries in 256 rows. | |
1586 | * TODO: The below code needs to be updated when we add support | |
1587 | * for 256 VFs. | |
1588 | */ | |
1589 | if (CHELSIO_CHIP_VERSION(chip) <= CHELSIO_T5) | |
1590 | return ((viid & 0x7f) << 1); | |
1591 | else | |
1592 | return (viid & 0x7f); | |
1593 | } | |
1594 | EXPORT_SYMBOL(cxgb4_tp_smt_idx); | |
1595 | ||
b8ff05a9 DM |
1596 | /** |
1597 | * cxgb4_port_chan - get the HW channel of a port | |
1598 | * @dev: the net device for the port | |
1599 | * | |
1600 | * Return the HW Tx channel of the given port. | |
1601 | */ | |
1602 | unsigned int cxgb4_port_chan(const struct net_device *dev) | |
1603 | { | |
1604 | return netdev2pinfo(dev)->tx_chan; | |
1605 | } | |
1606 | EXPORT_SYMBOL(cxgb4_port_chan); | |
1607 | ||
881806bc VP |
1608 | unsigned int cxgb4_dbfifo_count(const struct net_device *dev, int lpfifo) |
1609 | { | |
1610 | struct adapter *adap = netdev2adap(dev); | |
2cc301d2 | 1611 | u32 v1, v2, lp_count, hp_count; |
881806bc | 1612 | |
f061de42 HS |
1613 | v1 = t4_read_reg(adap, SGE_DBFIFO_STATUS_A); |
1614 | v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2_A); | |
d14807dd | 1615 | if (is_t4(adap->params.chip)) { |
f061de42 HS |
1616 | lp_count = LP_COUNT_G(v1); |
1617 | hp_count = HP_COUNT_G(v1); | |
2cc301d2 | 1618 | } else { |
f061de42 HS |
1619 | lp_count = LP_COUNT_T5_G(v1); |
1620 | hp_count = HP_COUNT_T5_G(v2); | |
2cc301d2 SR |
1621 | } |
1622 | return lpfifo ? lp_count : hp_count; | |
881806bc VP |
1623 | } |
1624 | EXPORT_SYMBOL(cxgb4_dbfifo_count); | |
1625 | ||
b8ff05a9 DM |
1626 | /** |
1627 | * cxgb4_port_viid - get the VI id of a port | |
1628 | * @dev: the net device for the port | |
1629 | * | |
1630 | * Return the VI id of the given port. | |
1631 | */ | |
1632 | unsigned int cxgb4_port_viid(const struct net_device *dev) | |
1633 | { | |
1634 | return netdev2pinfo(dev)->viid; | |
1635 | } | |
1636 | EXPORT_SYMBOL(cxgb4_port_viid); | |
1637 | ||
1638 | /** | |
1639 | * cxgb4_port_idx - get the index of a port | |
1640 | * @dev: the net device for the port | |
1641 | * | |
1642 | * Return the index of the given port. | |
1643 | */ | |
1644 | unsigned int cxgb4_port_idx(const struct net_device *dev) | |
1645 | { | |
1646 | return netdev2pinfo(dev)->port_id; | |
1647 | } | |
1648 | EXPORT_SYMBOL(cxgb4_port_idx); | |
1649 | ||
b8ff05a9 DM |
1650 | void cxgb4_get_tcp_stats(struct pci_dev *pdev, struct tp_tcp_stats *v4, |
1651 | struct tp_tcp_stats *v6) | |
1652 | { | |
1653 | struct adapter *adap = pci_get_drvdata(pdev); | |
1654 | ||
1655 | spin_lock(&adap->stats_lock); | |
5ccf9d04 | 1656 | t4_tp_get_tcp_stats(adap, v4, v6, false); |
b8ff05a9 DM |
1657 | spin_unlock(&adap->stats_lock); |
1658 | } | |
1659 | EXPORT_SYMBOL(cxgb4_get_tcp_stats); | |
1660 | ||
1661 | void cxgb4_iscsi_init(struct net_device *dev, unsigned int tag_mask, | |
1662 | const unsigned int *pgsz_order) | |
1663 | { | |
1664 | struct adapter *adap = netdev2adap(dev); | |
1665 | ||
0d804338 HS |
1666 | t4_write_reg(adap, ULP_RX_ISCSI_TAGMASK_A, tag_mask); |
1667 | t4_write_reg(adap, ULP_RX_ISCSI_PSZ_A, HPZ0_V(pgsz_order[0]) | | |
1668 | HPZ1_V(pgsz_order[1]) | HPZ2_V(pgsz_order[2]) | | |
1669 | HPZ3_V(pgsz_order[3])); | |
b8ff05a9 DM |
1670 | } |
1671 | EXPORT_SYMBOL(cxgb4_iscsi_init); | |
1672 | ||
3069ee9b VP |
1673 | int cxgb4_flush_eq_cache(struct net_device *dev) |
1674 | { | |
1675 | struct adapter *adap = netdev2adap(dev); | |
3069ee9b | 1676 | |
736c3b94 | 1677 | return t4_sge_ctxt_flush(adap, adap->mbox, CTXT_EGRESS); |
3069ee9b VP |
1678 | } |
1679 | EXPORT_SYMBOL(cxgb4_flush_eq_cache); | |
1680 | ||
1681 | static int read_eq_indices(struct adapter *adap, u16 qid, u16 *pidx, u16 *cidx) | |
1682 | { | |
f061de42 | 1683 | u32 addr = t4_read_reg(adap, SGE_DBQ_CTXT_BADDR_A) + 24 * qid + 8; |
3069ee9b VP |
1684 | __be64 indices; |
1685 | int ret; | |
1686 | ||
fc5ab020 HS |
1687 | spin_lock(&adap->win0_lock); |
1688 | ret = t4_memory_rw(adap, 0, MEM_EDC0, addr, | |
1689 | sizeof(indices), (__be32 *)&indices, | |
1690 | T4_MEMORY_READ); | |
1691 | spin_unlock(&adap->win0_lock); | |
3069ee9b | 1692 | if (!ret) { |
404d9e3f VP |
1693 | *cidx = (be64_to_cpu(indices) >> 25) & 0xffff; |
1694 | *pidx = (be64_to_cpu(indices) >> 9) & 0xffff; | |
3069ee9b VP |
1695 | } |
1696 | return ret; | |
1697 | } | |
1698 | ||
1699 | int cxgb4_sync_txq_pidx(struct net_device *dev, u16 qid, u16 pidx, | |
1700 | u16 size) | |
1701 | { | |
1702 | struct adapter *adap = netdev2adap(dev); | |
1703 | u16 hw_pidx, hw_cidx; | |
1704 | int ret; | |
1705 | ||
1706 | ret = read_eq_indices(adap, qid, &hw_pidx, &hw_cidx); | |
1707 | if (ret) | |
1708 | goto out; | |
1709 | ||
1710 | if (pidx != hw_pidx) { | |
1711 | u16 delta; | |
f612b815 | 1712 | u32 val; |
3069ee9b VP |
1713 | |
1714 | if (pidx >= hw_pidx) | |
1715 | delta = pidx - hw_pidx; | |
1716 | else | |
1717 | delta = size - hw_pidx + pidx; | |
f612b815 HS |
1718 | |
1719 | if (is_t4(adap->params.chip)) | |
1720 | val = PIDX_V(delta); | |
1721 | else | |
1722 | val = PIDX_T5_V(delta); | |
3069ee9b | 1723 | wmb(); |
f612b815 HS |
1724 | t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A), |
1725 | QID_V(qid) | val); | |
3069ee9b VP |
1726 | } |
1727 | out: | |
1728 | return ret; | |
1729 | } | |
1730 | EXPORT_SYMBOL(cxgb4_sync_txq_pidx); | |
1731 | ||
031cf476 HS |
1732 | int cxgb4_read_tpte(struct net_device *dev, u32 stag, __be32 *tpte) |
1733 | { | |
1734 | struct adapter *adap; | |
1735 | u32 offset, memtype, memaddr; | |
6559a7e8 | 1736 | u32 edc0_size, edc1_size, mc0_size, mc1_size, size; |
031cf476 HS |
1737 | u32 edc0_end, edc1_end, mc0_end, mc1_end; |
1738 | int ret; | |
1739 | ||
1740 | adap = netdev2adap(dev); | |
1741 | ||
1742 | offset = ((stag >> 8) * 32) + adap->vres.stag.start; | |
1743 | ||
1744 | /* Figure out where the offset lands in the Memory Type/Address scheme. | |
1745 | * This code assumes that the memory is laid out starting at offset 0 | |
1746 | * with no breaks as: EDC0, EDC1, MC0, MC1. All cards have both EDC0 | |
1747 | * and EDC1. Some cards will have neither MC0 nor MC1, most cards have | |
1748 | * MC0, and some have both MC0 and MC1. | |
1749 | */ | |
6559a7e8 HS |
1750 | size = t4_read_reg(adap, MA_EDRAM0_BAR_A); |
1751 | edc0_size = EDRAM0_SIZE_G(size) << 20; | |
1752 | size = t4_read_reg(adap, MA_EDRAM1_BAR_A); | |
1753 | edc1_size = EDRAM1_SIZE_G(size) << 20; | |
1754 | size = t4_read_reg(adap, MA_EXT_MEMORY0_BAR_A); | |
1755 | mc0_size = EXT_MEM0_SIZE_G(size) << 20; | |
031cf476 HS |
1756 | |
1757 | edc0_end = edc0_size; | |
1758 | edc1_end = edc0_end + edc1_size; | |
1759 | mc0_end = edc1_end + mc0_size; | |
1760 | ||
1761 | if (offset < edc0_end) { | |
1762 | memtype = MEM_EDC0; | |
1763 | memaddr = offset; | |
1764 | } else if (offset < edc1_end) { | |
1765 | memtype = MEM_EDC1; | |
1766 | memaddr = offset - edc0_end; | |
1767 | } else { | |
1768 | if (offset < mc0_end) { | |
1769 | memtype = MEM_MC0; | |
1770 | memaddr = offset - edc1_end; | |
3ccc6cf7 | 1771 | } else if (is_t5(adap->params.chip)) { |
6559a7e8 HS |
1772 | size = t4_read_reg(adap, MA_EXT_MEMORY1_BAR_A); |
1773 | mc1_size = EXT_MEM1_SIZE_G(size) << 20; | |
031cf476 HS |
1774 | mc1_end = mc0_end + mc1_size; |
1775 | if (offset < mc1_end) { | |
1776 | memtype = MEM_MC1; | |
1777 | memaddr = offset - mc0_end; | |
1778 | } else { | |
1779 | /* offset beyond the end of any memory */ | |
1780 | goto err; | |
1781 | } | |
3ccc6cf7 HS |
1782 | } else { |
1783 | /* T4/T6 only has a single memory channel */ | |
1784 | goto err; | |
031cf476 HS |
1785 | } |
1786 | } | |
1787 | ||
1788 | spin_lock(&adap->win0_lock); | |
1789 | ret = t4_memory_rw(adap, 0, memtype, memaddr, 32, tpte, T4_MEMORY_READ); | |
1790 | spin_unlock(&adap->win0_lock); | |
1791 | return ret; | |
1792 | ||
1793 | err: | |
1794 | dev_err(adap->pdev_dev, "stag %#x, offset %#x out of range\n", | |
1795 | stag, offset); | |
1796 | return -EINVAL; | |
1797 | } | |
1798 | EXPORT_SYMBOL(cxgb4_read_tpte); | |
1799 | ||
7730b4c7 HS |
1800 | u64 cxgb4_read_sge_timestamp(struct net_device *dev) |
1801 | { | |
1802 | u32 hi, lo; | |
1803 | struct adapter *adap; | |
1804 | ||
1805 | adap = netdev2adap(dev); | |
f612b815 HS |
1806 | lo = t4_read_reg(adap, SGE_TIMESTAMP_LO_A); |
1807 | hi = TSVAL_G(t4_read_reg(adap, SGE_TIMESTAMP_HI_A)); | |
7730b4c7 HS |
1808 | |
1809 | return ((u64)hi << 32) | (u64)lo; | |
1810 | } | |
1811 | EXPORT_SYMBOL(cxgb4_read_sge_timestamp); | |
1812 | ||
df64e4d3 HS |
1813 | int cxgb4_bar2_sge_qregs(struct net_device *dev, |
1814 | unsigned int qid, | |
1815 | enum cxgb4_bar2_qtype qtype, | |
66cf188e | 1816 | int user, |
df64e4d3 HS |
1817 | u64 *pbar2_qoffset, |
1818 | unsigned int *pbar2_qid) | |
1819 | { | |
b2612722 | 1820 | return t4_bar2_sge_qregs(netdev2adap(dev), |
df64e4d3 HS |
1821 | qid, |
1822 | (qtype == CXGB4_BAR2_QTYPE_EGRESS | |
1823 | ? T4_BAR2_QTYPE_EGRESS | |
1824 | : T4_BAR2_QTYPE_INGRESS), | |
66cf188e | 1825 | user, |
df64e4d3 HS |
1826 | pbar2_qoffset, |
1827 | pbar2_qid); | |
1828 | } | |
1829 | EXPORT_SYMBOL(cxgb4_bar2_sge_qregs); | |
1830 | ||
b8ff05a9 DM |
1831 | static struct pci_driver cxgb4_driver; |
1832 | ||
1833 | static void check_neigh_update(struct neighbour *neigh) | |
1834 | { | |
1835 | const struct device *parent; | |
1836 | const struct net_device *netdev = neigh->dev; | |
1837 | ||
d0d7b10b | 1838 | if (is_vlan_dev(netdev)) |
b8ff05a9 DM |
1839 | netdev = vlan_dev_real_dev(netdev); |
1840 | parent = netdev->dev.parent; | |
1841 | if (parent && parent->driver == &cxgb4_driver.driver) | |
1842 | t4_l2t_update(dev_get_drvdata(parent), neigh); | |
1843 | } | |
1844 | ||
1845 | static int netevent_cb(struct notifier_block *nb, unsigned long event, | |
1846 | void *data) | |
1847 | { | |
1848 | switch (event) { | |
1849 | case NETEVENT_NEIGH_UPDATE: | |
1850 | check_neigh_update(data); | |
1851 | break; | |
b8ff05a9 DM |
1852 | case NETEVENT_REDIRECT: |
1853 | default: | |
1854 | break; | |
1855 | } | |
1856 | return 0; | |
1857 | } | |
1858 | ||
1859 | static bool netevent_registered; | |
1860 | static struct notifier_block cxgb4_netevent_nb = { | |
1861 | .notifier_call = netevent_cb | |
1862 | }; | |
1863 | ||
3069ee9b VP |
1864 | static void drain_db_fifo(struct adapter *adap, int usecs) |
1865 | { | |
2cc301d2 | 1866 | u32 v1, v2, lp_count, hp_count; |
3069ee9b VP |
1867 | |
1868 | do { | |
f061de42 HS |
1869 | v1 = t4_read_reg(adap, SGE_DBFIFO_STATUS_A); |
1870 | v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2_A); | |
d14807dd | 1871 | if (is_t4(adap->params.chip)) { |
f061de42 HS |
1872 | lp_count = LP_COUNT_G(v1); |
1873 | hp_count = HP_COUNT_G(v1); | |
2cc301d2 | 1874 | } else { |
f061de42 HS |
1875 | lp_count = LP_COUNT_T5_G(v1); |
1876 | hp_count = HP_COUNT_T5_G(v2); | |
2cc301d2 SR |
1877 | } |
1878 | ||
1879 | if (lp_count == 0 && hp_count == 0) | |
1880 | break; | |
3069ee9b VP |
1881 | set_current_state(TASK_UNINTERRUPTIBLE); |
1882 | schedule_timeout(usecs_to_jiffies(usecs)); | |
3069ee9b VP |
1883 | } while (1); |
1884 | } | |
1885 | ||
1886 | static void disable_txq_db(struct sge_txq *q) | |
1887 | { | |
05eb2389 SW |
1888 | unsigned long flags; |
1889 | ||
1890 | spin_lock_irqsave(&q->db_lock, flags); | |
3069ee9b | 1891 | q->db_disabled = 1; |
05eb2389 | 1892 | spin_unlock_irqrestore(&q->db_lock, flags); |
3069ee9b VP |
1893 | } |
1894 | ||
05eb2389 | 1895 | static void enable_txq_db(struct adapter *adap, struct sge_txq *q) |
3069ee9b VP |
1896 | { |
1897 | spin_lock_irq(&q->db_lock); | |
05eb2389 SW |
1898 | if (q->db_pidx_inc) { |
1899 | /* Make sure that all writes to the TX descriptors | |
1900 | * are committed before we tell HW about them. | |
1901 | */ | |
1902 | wmb(); | |
f612b815 HS |
1903 | t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A), |
1904 | QID_V(q->cntxt_id) | PIDX_V(q->db_pidx_inc)); | |
05eb2389 SW |
1905 | q->db_pidx_inc = 0; |
1906 | } | |
3069ee9b VP |
1907 | q->db_disabled = 0; |
1908 | spin_unlock_irq(&q->db_lock); | |
1909 | } | |
1910 | ||
1911 | static void disable_dbs(struct adapter *adap) | |
1912 | { | |
1913 | int i; | |
1914 | ||
1915 | for_each_ethrxq(&adap->sge, i) | |
1916 | disable_txq_db(&adap->sge.ethtxq[i].q); | |
ab677ff4 HS |
1917 | if (is_offload(adap)) { |
1918 | struct sge_uld_txq_info *txq_info = | |
1919 | adap->sge.uld_txq_info[CXGB4_TX_OFLD]; | |
1920 | ||
1921 | if (txq_info) { | |
1922 | for_each_ofldtxq(&adap->sge, i) { | |
1923 | struct sge_uld_txq *txq = &txq_info->uldtxq[i]; | |
1924 | ||
1925 | disable_txq_db(&txq->q); | |
1926 | } | |
1927 | } | |
1928 | } | |
3069ee9b VP |
1929 | for_each_port(adap, i) |
1930 | disable_txq_db(&adap->sge.ctrlq[i].q); | |
1931 | } | |
1932 | ||
1933 | static void enable_dbs(struct adapter *adap) | |
1934 | { | |
1935 | int i; | |
1936 | ||
1937 | for_each_ethrxq(&adap->sge, i) | |
05eb2389 | 1938 | enable_txq_db(adap, &adap->sge.ethtxq[i].q); |
ab677ff4 HS |
1939 | if (is_offload(adap)) { |
1940 | struct sge_uld_txq_info *txq_info = | |
1941 | adap->sge.uld_txq_info[CXGB4_TX_OFLD]; | |
1942 | ||
1943 | if (txq_info) { | |
1944 | for_each_ofldtxq(&adap->sge, i) { | |
1945 | struct sge_uld_txq *txq = &txq_info->uldtxq[i]; | |
1946 | ||
1947 | enable_txq_db(adap, &txq->q); | |
1948 | } | |
1949 | } | |
1950 | } | |
3069ee9b | 1951 | for_each_port(adap, i) |
05eb2389 SW |
1952 | enable_txq_db(adap, &adap->sge.ctrlq[i].q); |
1953 | } | |
1954 | ||
1955 | static void notify_rdma_uld(struct adapter *adap, enum cxgb4_control cmd) | |
1956 | { | |
0fbc81b3 HS |
1957 | enum cxgb4_uld type = CXGB4_ULD_RDMA; |
1958 | ||
1959 | if (adap->uld && adap->uld[type].handle) | |
1960 | adap->uld[type].control(adap->uld[type].handle, cmd); | |
05eb2389 SW |
1961 | } |
1962 | ||
1963 | static void process_db_full(struct work_struct *work) | |
1964 | { | |
1965 | struct adapter *adap; | |
1966 | ||
1967 | adap = container_of(work, struct adapter, db_full_task); | |
1968 | ||
1969 | drain_db_fifo(adap, dbfifo_drain_delay); | |
1970 | enable_dbs(adap); | |
1971 | notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY); | |
3ccc6cf7 HS |
1972 | if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5) |
1973 | t4_set_reg_field(adap, SGE_INT_ENABLE3_A, | |
1974 | DBFIFO_HP_INT_F | DBFIFO_LP_INT_F, | |
1975 | DBFIFO_HP_INT_F | DBFIFO_LP_INT_F); | |
1976 | else | |
1977 | t4_set_reg_field(adap, SGE_INT_ENABLE3_A, | |
1978 | DBFIFO_LP_INT_F, DBFIFO_LP_INT_F); | |
3069ee9b VP |
1979 | } |
1980 | ||
1981 | static void sync_txq_pidx(struct adapter *adap, struct sge_txq *q) | |
1982 | { | |
1983 | u16 hw_pidx, hw_cidx; | |
1984 | int ret; | |
1985 | ||
05eb2389 | 1986 | spin_lock_irq(&q->db_lock); |
3069ee9b VP |
1987 | ret = read_eq_indices(adap, (u16)q->cntxt_id, &hw_pidx, &hw_cidx); |
1988 | if (ret) | |
1989 | goto out; | |
1990 | if (q->db_pidx != hw_pidx) { | |
1991 | u16 delta; | |
f612b815 | 1992 | u32 val; |
3069ee9b VP |
1993 | |
1994 | if (q->db_pidx >= hw_pidx) | |
1995 | delta = q->db_pidx - hw_pidx; | |
1996 | else | |
1997 | delta = q->size - hw_pidx + q->db_pidx; | |
f612b815 HS |
1998 | |
1999 | if (is_t4(adap->params.chip)) | |
2000 | val = PIDX_V(delta); | |
2001 | else | |
2002 | val = PIDX_T5_V(delta); | |
3069ee9b | 2003 | wmb(); |
f612b815 HS |
2004 | t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A), |
2005 | QID_V(q->cntxt_id) | val); | |
3069ee9b VP |
2006 | } |
2007 | out: | |
2008 | q->db_disabled = 0; | |
05eb2389 SW |
2009 | q->db_pidx_inc = 0; |
2010 | spin_unlock_irq(&q->db_lock); | |
3069ee9b VP |
2011 | if (ret) |
2012 | CH_WARN(adap, "DB drop recovery failed.\n"); | |
2013 | } | |
0fbc81b3 | 2014 | |
3069ee9b VP |
2015 | static void recover_all_queues(struct adapter *adap) |
2016 | { | |
2017 | int i; | |
2018 | ||
2019 | for_each_ethrxq(&adap->sge, i) | |
2020 | sync_txq_pidx(adap, &adap->sge.ethtxq[i].q); | |
ab677ff4 HS |
2021 | if (is_offload(adap)) { |
2022 | struct sge_uld_txq_info *txq_info = | |
2023 | adap->sge.uld_txq_info[CXGB4_TX_OFLD]; | |
2024 | if (txq_info) { | |
2025 | for_each_ofldtxq(&adap->sge, i) { | |
2026 | struct sge_uld_txq *txq = &txq_info->uldtxq[i]; | |
2027 | ||
2028 | sync_txq_pidx(adap, &txq->q); | |
2029 | } | |
2030 | } | |
2031 | } | |
3069ee9b VP |
2032 | for_each_port(adap, i) |
2033 | sync_txq_pidx(adap, &adap->sge.ctrlq[i].q); | |
2034 | } | |
2035 | ||
881806bc VP |
2036 | static void process_db_drop(struct work_struct *work) |
2037 | { | |
2038 | struct adapter *adap; | |
881806bc | 2039 | |
3069ee9b | 2040 | adap = container_of(work, struct adapter, db_drop_task); |
881806bc | 2041 | |
d14807dd | 2042 | if (is_t4(adap->params.chip)) { |
05eb2389 | 2043 | drain_db_fifo(adap, dbfifo_drain_delay); |
2cc301d2 | 2044 | notify_rdma_uld(adap, CXGB4_CONTROL_DB_DROP); |
05eb2389 | 2045 | drain_db_fifo(adap, dbfifo_drain_delay); |
2cc301d2 | 2046 | recover_all_queues(adap); |
05eb2389 | 2047 | drain_db_fifo(adap, dbfifo_drain_delay); |
2cc301d2 | 2048 | enable_dbs(adap); |
05eb2389 | 2049 | notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY); |
3ccc6cf7 | 2050 | } else if (is_t5(adap->params.chip)) { |
2cc301d2 SR |
2051 | u32 dropped_db = t4_read_reg(adap, 0x010ac); |
2052 | u16 qid = (dropped_db >> 15) & 0x1ffff; | |
2053 | u16 pidx_inc = dropped_db & 0x1fff; | |
df64e4d3 HS |
2054 | u64 bar2_qoffset; |
2055 | unsigned int bar2_qid; | |
2056 | int ret; | |
2cc301d2 | 2057 | |
b2612722 | 2058 | ret = t4_bar2_sge_qregs(adap, qid, T4_BAR2_QTYPE_EGRESS, |
e0456717 | 2059 | 0, &bar2_qoffset, &bar2_qid); |
df64e4d3 HS |
2060 | if (ret) |
2061 | dev_err(adap->pdev_dev, "doorbell drop recovery: " | |
2062 | "qid=%d, pidx_inc=%d\n", qid, pidx_inc); | |
2063 | else | |
f612b815 | 2064 | writel(PIDX_T5_V(pidx_inc) | QID_V(bar2_qid), |
df64e4d3 | 2065 | adap->bar2 + bar2_qoffset + SGE_UDB_KDOORBELL); |
2cc301d2 SR |
2066 | |
2067 | /* Re-enable BAR2 WC */ | |
2068 | t4_set_reg_field(adap, 0x10b0, 1<<15, 1<<15); | |
2069 | } | |
2070 | ||
3ccc6cf7 HS |
2071 | if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5) |
2072 | t4_set_reg_field(adap, SGE_DOORBELL_CONTROL_A, DROPPED_DB_F, 0); | |
881806bc VP |
2073 | } |
2074 | ||
2075 | void t4_db_full(struct adapter *adap) | |
2076 | { | |
d14807dd | 2077 | if (is_t4(adap->params.chip)) { |
05eb2389 SW |
2078 | disable_dbs(adap); |
2079 | notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL); | |
f612b815 HS |
2080 | t4_set_reg_field(adap, SGE_INT_ENABLE3_A, |
2081 | DBFIFO_HP_INT_F | DBFIFO_LP_INT_F, 0); | |
29aaee65 | 2082 | queue_work(adap->workq, &adap->db_full_task); |
2cc301d2 | 2083 | } |
881806bc VP |
2084 | } |
2085 | ||
2086 | void t4_db_dropped(struct adapter *adap) | |
2087 | { | |
05eb2389 SW |
2088 | if (is_t4(adap->params.chip)) { |
2089 | disable_dbs(adap); | |
2090 | notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL); | |
2091 | } | |
29aaee65 | 2092 | queue_work(adap->workq, &adap->db_drop_task); |
881806bc VP |
2093 | } |
2094 | ||
0fbc81b3 HS |
2095 | void t4_register_netevent_notifier(void) |
2096 | { | |
b8ff05a9 DM |
2097 | if (!netevent_registered) { |
2098 | register_netevent_notifier(&cxgb4_netevent_nb); | |
2099 | netevent_registered = true; | |
2100 | } | |
b8ff05a9 DM |
2101 | } |
2102 | ||
2103 | static void detach_ulds(struct adapter *adap) | |
2104 | { | |
2105 | unsigned int i; | |
2106 | ||
2107 | mutex_lock(&uld_mutex); | |
2108 | list_del(&adap->list_node); | |
6a146f3a | 2109 | |
b8ff05a9 | 2110 | for (i = 0; i < CXGB4_ULD_MAX; i++) |
6a146f3a | 2111 | if (adap->uld && adap->uld[i].handle) |
94cdb8bb HS |
2112 | adap->uld[i].state_change(adap->uld[i].handle, |
2113 | CXGB4_STATE_DETACH); | |
6a146f3a | 2114 | |
b8ff05a9 DM |
2115 | if (netevent_registered && list_empty(&adapter_list)) { |
2116 | unregister_netevent_notifier(&cxgb4_netevent_nb); | |
2117 | netevent_registered = false; | |
2118 | } | |
2119 | mutex_unlock(&uld_mutex); | |
2120 | } | |
2121 | ||
2122 | static void notify_ulds(struct adapter *adap, enum cxgb4_state new_state) | |
2123 | { | |
2124 | unsigned int i; | |
2125 | ||
2126 | mutex_lock(&uld_mutex); | |
2127 | for (i = 0; i < CXGB4_ULD_MAX; i++) | |
94cdb8bb HS |
2128 | if (adap->uld && adap->uld[i].handle) |
2129 | adap->uld[i].state_change(adap->uld[i].handle, | |
2130 | new_state); | |
b8ff05a9 DM |
2131 | mutex_unlock(&uld_mutex); |
2132 | } | |
2133 | ||
1bb60376 | 2134 | #if IS_ENABLED(CONFIG_IPV6) |
b5a02f50 AB |
2135 | static int cxgb4_inet6addr_handler(struct notifier_block *this, |
2136 | unsigned long event, void *data) | |
01bcca68 | 2137 | { |
b5a02f50 AB |
2138 | struct inet6_ifaddr *ifa = data; |
2139 | struct net_device *event_dev = ifa->idev->dev; | |
2140 | const struct device *parent = NULL; | |
2141 | #if IS_ENABLED(CONFIG_BONDING) | |
01bcca68 | 2142 | struct adapter *adap; |
b5a02f50 | 2143 | #endif |
d0d7b10b | 2144 | if (is_vlan_dev(event_dev)) |
b5a02f50 AB |
2145 | event_dev = vlan_dev_real_dev(event_dev); |
2146 | #if IS_ENABLED(CONFIG_BONDING) | |
2147 | if (event_dev->flags & IFF_MASTER) { | |
2148 | list_for_each_entry(adap, &adapter_list, list_node) { | |
2149 | switch (event) { | |
2150 | case NETDEV_UP: | |
2151 | cxgb4_clip_get(adap->port[0], | |
2152 | (const u32 *)ifa, 1); | |
2153 | break; | |
2154 | case NETDEV_DOWN: | |
2155 | cxgb4_clip_release(adap->port[0], | |
2156 | (const u32 *)ifa, 1); | |
2157 | break; | |
2158 | default: | |
2159 | break; | |
2160 | } | |
2161 | } | |
2162 | return NOTIFY_OK; | |
2163 | } | |
2164 | #endif | |
01bcca68 | 2165 | |
b5a02f50 AB |
2166 | if (event_dev) |
2167 | parent = event_dev->dev.parent; | |
01bcca68 | 2168 | |
b5a02f50 | 2169 | if (parent && parent->driver == &cxgb4_driver.driver) { |
01bcca68 VP |
2170 | switch (event) { |
2171 | case NETDEV_UP: | |
b5a02f50 | 2172 | cxgb4_clip_get(event_dev, (const u32 *)ifa, 1); |
01bcca68 VP |
2173 | break; |
2174 | case NETDEV_DOWN: | |
b5a02f50 | 2175 | cxgb4_clip_release(event_dev, (const u32 *)ifa, 1); |
01bcca68 VP |
2176 | break; |
2177 | default: | |
2178 | break; | |
2179 | } | |
2180 | } | |
b5a02f50 | 2181 | return NOTIFY_OK; |
01bcca68 VP |
2182 | } |
2183 | ||
b5a02f50 | 2184 | static bool inet6addr_registered; |
01bcca68 VP |
2185 | static struct notifier_block cxgb4_inet6addr_notifier = { |
2186 | .notifier_call = cxgb4_inet6addr_handler | |
2187 | }; | |
2188 | ||
01bcca68 VP |
2189 | static void update_clip(const struct adapter *adap) |
2190 | { | |
2191 | int i; | |
2192 | struct net_device *dev; | |
2193 | int ret; | |
2194 | ||
2195 | rcu_read_lock(); | |
2196 | ||
2197 | for (i = 0; i < MAX_NPORTS; i++) { | |
2198 | dev = adap->port[i]; | |
2199 | ret = 0; | |
2200 | ||
2201 | if (dev) | |
b5a02f50 | 2202 | ret = cxgb4_update_root_dev_clip(dev); |
01bcca68 VP |
2203 | |
2204 | if (ret < 0) | |
2205 | break; | |
2206 | } | |
2207 | rcu_read_unlock(); | |
2208 | } | |
1bb60376 | 2209 | #endif /* IS_ENABLED(CONFIG_IPV6) */ |
01bcca68 | 2210 | |
b8ff05a9 DM |
2211 | /** |
2212 | * cxgb_up - enable the adapter | |
2213 | * @adap: adapter being enabled | |
2214 | * | |
2215 | * Called when the first port is enabled, this function performs the | |
2216 | * actions necessary to make an adapter operational, such as completing | |
2217 | * the initialization of HW modules, and enabling interrupts. | |
2218 | * | |
2219 | * Must be called with the rtnl lock held. | |
2220 | */ | |
2221 | static int cxgb_up(struct adapter *adap) | |
2222 | { | |
aaefae9b | 2223 | int err; |
b8ff05a9 | 2224 | |
91060381 | 2225 | mutex_lock(&uld_mutex); |
aaefae9b DM |
2226 | err = setup_sge_queues(adap); |
2227 | if (err) | |
91060381 | 2228 | goto rel_lock; |
aaefae9b DM |
2229 | err = setup_rss(adap); |
2230 | if (err) | |
2231 | goto freeq; | |
b8ff05a9 DM |
2232 | |
2233 | if (adap->flags & USING_MSIX) { | |
aaefae9b | 2234 | name_msix_vecs(adap); |
b8ff05a9 DM |
2235 | err = request_irq(adap->msix_info[0].vec, t4_nondata_intr, 0, |
2236 | adap->msix_info[0].desc, adap); | |
2237 | if (err) | |
2238 | goto irq_err; | |
b8ff05a9 DM |
2239 | err = request_msix_queue_irqs(adap); |
2240 | if (err) { | |
2241 | free_irq(adap->msix_info[0].vec, adap); | |
2242 | goto irq_err; | |
2243 | } | |
2244 | } else { | |
2245 | err = request_irq(adap->pdev->irq, t4_intr_handler(adap), | |
2246 | (adap->flags & USING_MSI) ? 0 : IRQF_SHARED, | |
b1a3c2b6 | 2247 | adap->port[0]->name, adap); |
b8ff05a9 DM |
2248 | if (err) |
2249 | goto irq_err; | |
2250 | } | |
e7519f99 | 2251 | |
b8ff05a9 DM |
2252 | enable_rx(adap); |
2253 | t4_sge_start(adap); | |
2254 | t4_intr_enable(adap); | |
aaefae9b | 2255 | adap->flags |= FULL_INIT_DONE; |
e7519f99 GG |
2256 | mutex_unlock(&uld_mutex); |
2257 | ||
b8ff05a9 | 2258 | notify_ulds(adap, CXGB4_STATE_UP); |
1bb60376 | 2259 | #if IS_ENABLED(CONFIG_IPV6) |
01bcca68 | 2260 | update_clip(adap); |
1bb60376 | 2261 | #endif |
fc08a01a HS |
2262 | /* Initialize hash mac addr list*/ |
2263 | INIT_LIST_HEAD(&adap->mac_hlist); | |
b8ff05a9 | 2264 | return err; |
91060381 | 2265 | |
b8ff05a9 DM |
2266 | irq_err: |
2267 | dev_err(adap->pdev_dev, "request_irq failed, err %d\n", err); | |
aaefae9b DM |
2268 | freeq: |
2269 | t4_free_sge_resources(adap); | |
91060381 RR |
2270 | rel_lock: |
2271 | mutex_unlock(&uld_mutex); | |
2272 | return err; | |
b8ff05a9 DM |
2273 | } |
2274 | ||
2275 | static void cxgb_down(struct adapter *adapter) | |
2276 | { | |
b8ff05a9 | 2277 | cancel_work_sync(&adapter->tid_release_task); |
881806bc VP |
2278 | cancel_work_sync(&adapter->db_full_task); |
2279 | cancel_work_sync(&adapter->db_drop_task); | |
b8ff05a9 | 2280 | adapter->tid_release_task_busy = false; |
204dc3c0 | 2281 | adapter->tid_release_head = NULL; |
b8ff05a9 | 2282 | |
aaefae9b DM |
2283 | t4_sge_stop(adapter); |
2284 | t4_free_sge_resources(adapter); | |
2285 | adapter->flags &= ~FULL_INIT_DONE; | |
b8ff05a9 DM |
2286 | } |
2287 | ||
2288 | /* | |
2289 | * net_device operations | |
2290 | */ | |
2291 | static int cxgb_open(struct net_device *dev) | |
2292 | { | |
2293 | int err; | |
2294 | struct port_info *pi = netdev_priv(dev); | |
2295 | struct adapter *adapter = pi->adapter; | |
2296 | ||
6a3c869a DM |
2297 | netif_carrier_off(dev); |
2298 | ||
aaefae9b DM |
2299 | if (!(adapter->flags & FULL_INIT_DONE)) { |
2300 | err = cxgb_up(adapter); | |
2301 | if (err < 0) | |
2302 | return err; | |
2303 | } | |
b8ff05a9 | 2304 | |
2061ec3f GG |
2305 | /* It's possible that the basic port information could have |
2306 | * changed since we first read it. | |
2307 | */ | |
2308 | err = t4_update_port_info(pi); | |
2309 | if (err < 0) | |
2310 | return err; | |
2311 | ||
f68707b8 DM |
2312 | err = link_start(dev); |
2313 | if (!err) | |
2314 | netif_tx_start_all_queues(dev); | |
2315 | return err; | |
b8ff05a9 DM |
2316 | } |
2317 | ||
2318 | static int cxgb_close(struct net_device *dev) | |
2319 | { | |
b8ff05a9 DM |
2320 | struct port_info *pi = netdev_priv(dev); |
2321 | struct adapter *adapter = pi->adapter; | |
ba581f77 | 2322 | int ret; |
b8ff05a9 DM |
2323 | |
2324 | netif_tx_stop_all_queues(dev); | |
2325 | netif_carrier_off(dev); | |
ba581f77 GG |
2326 | ret = t4_enable_vi(adapter, adapter->pf, pi->viid, false, false); |
2327 | #ifdef CONFIG_CHELSIO_T4_DCB | |
2328 | cxgb4_dcb_reset(dev); | |
2329 | dcb_tx_queue_prio_enable(dev, false); | |
2330 | #endif | |
2331 | return ret; | |
b8ff05a9 DM |
2332 | } |
2333 | ||
dca4faeb | 2334 | int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid, |
793dad94 VP |
2335 | __be32 sip, __be16 sport, __be16 vlan, |
2336 | unsigned int queue, unsigned char port, unsigned char mask) | |
dca4faeb VP |
2337 | { |
2338 | int ret; | |
2339 | struct filter_entry *f; | |
2340 | struct adapter *adap; | |
2341 | int i; | |
2342 | u8 *val; | |
2343 | ||
2344 | adap = netdev2adap(dev); | |
2345 | ||
1cab775c | 2346 | /* Adjust stid to correct filter index */ |
470c60c4 | 2347 | stid -= adap->tids.sftid_base; |
1cab775c VP |
2348 | stid += adap->tids.nftids; |
2349 | ||
dca4faeb VP |
2350 | /* Check to make sure the filter requested is writable ... |
2351 | */ | |
2352 | f = &adap->tids.ftid_tab[stid]; | |
2353 | ret = writable_filter(f); | |
2354 | if (ret) | |
2355 | return ret; | |
2356 | ||
2357 | /* Clear out any old resources being used by the filter before | |
2358 | * we start constructing the new filter. | |
2359 | */ | |
2360 | if (f->valid) | |
2361 | clear_filter(adap, f); | |
2362 | ||
2363 | /* Clear out filter specifications */ | |
2364 | memset(&f->fs, 0, sizeof(struct ch_filter_specification)); | |
2365 | f->fs.val.lport = cpu_to_be16(sport); | |
2366 | f->fs.mask.lport = ~0; | |
2367 | val = (u8 *)&sip; | |
793dad94 | 2368 | if ((val[0] | val[1] | val[2] | val[3]) != 0) { |
dca4faeb VP |
2369 | for (i = 0; i < 4; i++) { |
2370 | f->fs.val.lip[i] = val[i]; | |
2371 | f->fs.mask.lip[i] = ~0; | |
2372 | } | |
0d804338 | 2373 | if (adap->params.tp.vlan_pri_map & PORT_F) { |
793dad94 VP |
2374 | f->fs.val.iport = port; |
2375 | f->fs.mask.iport = mask; | |
2376 | } | |
2377 | } | |
dca4faeb | 2378 | |
0d804338 | 2379 | if (adap->params.tp.vlan_pri_map & PROTOCOL_F) { |
7c89e555 KS |
2380 | f->fs.val.proto = IPPROTO_TCP; |
2381 | f->fs.mask.proto = ~0; | |
2382 | } | |
2383 | ||
dca4faeb VP |
2384 | f->fs.dirsteer = 1; |
2385 | f->fs.iq = queue; | |
2386 | /* Mark filter as locked */ | |
2387 | f->locked = 1; | |
2388 | f->fs.rpttid = 1; | |
2389 | ||
6b254afd GG |
2390 | /* Save the actual tid. We need this to get the corresponding |
2391 | * filter entry structure in filter_rpl. | |
2392 | */ | |
2393 | f->tid = stid + adap->tids.ftid_base; | |
dca4faeb VP |
2394 | ret = set_filter_wr(adap, stid); |
2395 | if (ret) { | |
2396 | clear_filter(adap, f); | |
2397 | return ret; | |
2398 | } | |
2399 | ||
2400 | return 0; | |
2401 | } | |
2402 | EXPORT_SYMBOL(cxgb4_create_server_filter); | |
2403 | ||
2404 | int cxgb4_remove_server_filter(const struct net_device *dev, unsigned int stid, | |
2405 | unsigned int queue, bool ipv6) | |
2406 | { | |
dca4faeb VP |
2407 | struct filter_entry *f; |
2408 | struct adapter *adap; | |
2409 | ||
2410 | adap = netdev2adap(dev); | |
1cab775c VP |
2411 | |
2412 | /* Adjust stid to correct filter index */ | |
470c60c4 | 2413 | stid -= adap->tids.sftid_base; |
1cab775c VP |
2414 | stid += adap->tids.nftids; |
2415 | ||
dca4faeb VP |
2416 | f = &adap->tids.ftid_tab[stid]; |
2417 | /* Unlock the filter */ | |
2418 | f->locked = 0; | |
2419 | ||
8c14846d | 2420 | return delete_filter(adap, stid); |
dca4faeb VP |
2421 | } |
2422 | EXPORT_SYMBOL(cxgb4_remove_server_filter); | |
2423 | ||
bc1f4470 | 2424 | static void cxgb_get_stats(struct net_device *dev, |
2425 | struct rtnl_link_stats64 *ns) | |
b8ff05a9 DM |
2426 | { |
2427 | struct port_stats stats; | |
2428 | struct port_info *p = netdev_priv(dev); | |
2429 | struct adapter *adapter = p->adapter; | |
b8ff05a9 | 2430 | |
9fe6cb58 GS |
2431 | /* Block retrieving statistics during EEH error |
2432 | * recovery. Otherwise, the recovery might fail | |
2433 | * and the PCI device will be removed permanently | |
2434 | */ | |
b8ff05a9 | 2435 | spin_lock(&adapter->stats_lock); |
9fe6cb58 GS |
2436 | if (!netif_device_present(dev)) { |
2437 | spin_unlock(&adapter->stats_lock); | |
bc1f4470 | 2438 | return; |
9fe6cb58 | 2439 | } |
a4cfd929 HS |
2440 | t4_get_port_stats_offset(adapter, p->tx_chan, &stats, |
2441 | &p->stats_base); | |
b8ff05a9 DM |
2442 | spin_unlock(&adapter->stats_lock); |
2443 | ||
2444 | ns->tx_bytes = stats.tx_octets; | |
2445 | ns->tx_packets = stats.tx_frames; | |
2446 | ns->rx_bytes = stats.rx_octets; | |
2447 | ns->rx_packets = stats.rx_frames; | |
2448 | ns->multicast = stats.rx_mcast_frames; | |
2449 | ||
2450 | /* detailed rx_errors */ | |
2451 | ns->rx_length_errors = stats.rx_jabber + stats.rx_too_long + | |
2452 | stats.rx_runt; | |
2453 | ns->rx_over_errors = 0; | |
2454 | ns->rx_crc_errors = stats.rx_fcs_err; | |
2455 | ns->rx_frame_errors = stats.rx_symbol_err; | |
b93f79be | 2456 | ns->rx_dropped = stats.rx_ovflow0 + stats.rx_ovflow1 + |
b8ff05a9 DM |
2457 | stats.rx_ovflow2 + stats.rx_ovflow3 + |
2458 | stats.rx_trunc0 + stats.rx_trunc1 + | |
2459 | stats.rx_trunc2 + stats.rx_trunc3; | |
2460 | ns->rx_missed_errors = 0; | |
2461 | ||
2462 | /* detailed tx_errors */ | |
2463 | ns->tx_aborted_errors = 0; | |
2464 | ns->tx_carrier_errors = 0; | |
2465 | ns->tx_fifo_errors = 0; | |
2466 | ns->tx_heartbeat_errors = 0; | |
2467 | ns->tx_window_errors = 0; | |
2468 | ||
2469 | ns->tx_errors = stats.tx_error_frames; | |
2470 | ns->rx_errors = stats.rx_symbol_err + stats.rx_fcs_err + | |
2471 | ns->rx_length_errors + stats.rx_len_err + ns->rx_fifo_errors; | |
b8ff05a9 DM |
2472 | } |
2473 | ||
2474 | static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd) | |
2475 | { | |
060e0c75 | 2476 | unsigned int mbox; |
b8ff05a9 DM |
2477 | int ret = 0, prtad, devad; |
2478 | struct port_info *pi = netdev_priv(dev); | |
a4569504 | 2479 | struct adapter *adapter = pi->adapter; |
b8ff05a9 DM |
2480 | struct mii_ioctl_data *data = (struct mii_ioctl_data *)&req->ifr_data; |
2481 | ||
2482 | switch (cmd) { | |
2483 | case SIOCGMIIPHY: | |
2484 | if (pi->mdio_addr < 0) | |
2485 | return -EOPNOTSUPP; | |
2486 | data->phy_id = pi->mdio_addr; | |
2487 | break; | |
2488 | case SIOCGMIIREG: | |
2489 | case SIOCSMIIREG: | |
2490 | if (mdio_phy_id_is_c45(data->phy_id)) { | |
2491 | prtad = mdio_phy_id_prtad(data->phy_id); | |
2492 | devad = mdio_phy_id_devad(data->phy_id); | |
2493 | } else if (data->phy_id < 32) { | |
2494 | prtad = data->phy_id; | |
2495 | devad = 0; | |
2496 | data->reg_num &= 0x1f; | |
2497 | } else | |
2498 | return -EINVAL; | |
2499 | ||
b2612722 | 2500 | mbox = pi->adapter->pf; |
b8ff05a9 | 2501 | if (cmd == SIOCGMIIREG) |
060e0c75 | 2502 | ret = t4_mdio_rd(pi->adapter, mbox, prtad, devad, |
b8ff05a9 DM |
2503 | data->reg_num, &data->val_out); |
2504 | else | |
060e0c75 | 2505 | ret = t4_mdio_wr(pi->adapter, mbox, prtad, devad, |
b8ff05a9 DM |
2506 | data->reg_num, data->val_in); |
2507 | break; | |
5e2a5ebc HS |
2508 | case SIOCGHWTSTAMP: |
2509 | return copy_to_user(req->ifr_data, &pi->tstamp_config, | |
2510 | sizeof(pi->tstamp_config)) ? | |
2511 | -EFAULT : 0; | |
2512 | case SIOCSHWTSTAMP: | |
2513 | if (copy_from_user(&pi->tstamp_config, req->ifr_data, | |
2514 | sizeof(pi->tstamp_config))) | |
2515 | return -EFAULT; | |
2516 | ||
a4569504 AG |
2517 | if (!is_t4(adapter->params.chip)) { |
2518 | switch (pi->tstamp_config.tx_type) { | |
2519 | case HWTSTAMP_TX_OFF: | |
2520 | case HWTSTAMP_TX_ON: | |
2521 | break; | |
2522 | default: | |
2523 | return -ERANGE; | |
2524 | } | |
2525 | ||
2526 | switch (pi->tstamp_config.rx_filter) { | |
2527 | case HWTSTAMP_FILTER_NONE: | |
2528 | pi->rxtstamp = false; | |
2529 | break; | |
2530 | case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: | |
2531 | case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: | |
2532 | cxgb4_ptprx_timestamping(pi, pi->port_id, | |
2533 | PTP_TS_L4); | |
2534 | break; | |
2535 | case HWTSTAMP_FILTER_PTP_V2_EVENT: | |
2536 | cxgb4_ptprx_timestamping(pi, pi->port_id, | |
2537 | PTP_TS_L2_L4); | |
2538 | break; | |
2539 | case HWTSTAMP_FILTER_ALL: | |
2540 | case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: | |
2541 | case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: | |
2542 | case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: | |
2543 | case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: | |
2544 | pi->rxtstamp = true; | |
2545 | break; | |
2546 | default: | |
2547 | pi->tstamp_config.rx_filter = | |
2548 | HWTSTAMP_FILTER_NONE; | |
2549 | return -ERANGE; | |
2550 | } | |
2551 | ||
2552 | if ((pi->tstamp_config.tx_type == HWTSTAMP_TX_OFF) && | |
2553 | (pi->tstamp_config.rx_filter == | |
2554 | HWTSTAMP_FILTER_NONE)) { | |
2555 | if (cxgb4_ptp_txtype(adapter, pi->port_id) >= 0) | |
2556 | pi->ptp_enable = false; | |
2557 | } | |
2558 | ||
2559 | if (pi->tstamp_config.rx_filter != | |
2560 | HWTSTAMP_FILTER_NONE) { | |
2561 | if (cxgb4_ptp_redirect_rx_packet(adapter, | |
2562 | pi) >= 0) | |
2563 | pi->ptp_enable = true; | |
2564 | } | |
2565 | } else { | |
2566 | /* For T4 Adapters */ | |
2567 | switch (pi->tstamp_config.rx_filter) { | |
2568 | case HWTSTAMP_FILTER_NONE: | |
5e2a5ebc HS |
2569 | pi->rxtstamp = false; |
2570 | break; | |
a4569504 | 2571 | case HWTSTAMP_FILTER_ALL: |
5e2a5ebc HS |
2572 | pi->rxtstamp = true; |
2573 | break; | |
a4569504 AG |
2574 | default: |
2575 | pi->tstamp_config.rx_filter = | |
2576 | HWTSTAMP_FILTER_NONE; | |
5e2a5ebc | 2577 | return -ERANGE; |
a4569504 | 2578 | } |
5e2a5ebc | 2579 | } |
5e2a5ebc HS |
2580 | return copy_to_user(req->ifr_data, &pi->tstamp_config, |
2581 | sizeof(pi->tstamp_config)) ? | |
2582 | -EFAULT : 0; | |
b8ff05a9 DM |
2583 | default: |
2584 | return -EOPNOTSUPP; | |
2585 | } | |
2586 | return ret; | |
2587 | } | |
2588 | ||
2589 | static void cxgb_set_rxmode(struct net_device *dev) | |
2590 | { | |
2591 | /* unfortunately we can't return errors to the stack */ | |
2592 | set_rxmode(dev, -1, false); | |
2593 | } | |
2594 | ||
2595 | static int cxgb_change_mtu(struct net_device *dev, int new_mtu) | |
2596 | { | |
2597 | int ret; | |
2598 | struct port_info *pi = netdev_priv(dev); | |
2599 | ||
b2612722 | 2600 | ret = t4_set_rxmode(pi->adapter, pi->adapter->pf, pi->viid, new_mtu, -1, |
060e0c75 | 2601 | -1, -1, -1, true); |
b8ff05a9 DM |
2602 | if (!ret) |
2603 | dev->mtu = new_mtu; | |
2604 | return ret; | |
2605 | } | |
2606 | ||
858aa65c | 2607 | #ifdef CONFIG_PCI_IOV |
e7b48a32 HS |
2608 | static int dummy_open(struct net_device *dev) |
2609 | { | |
2610 | /* Turn carrier off since we don't have to transmit anything on this | |
2611 | * interface. | |
2612 | */ | |
2613 | netif_carrier_off(dev); | |
2614 | return 0; | |
2615 | } | |
2616 | ||
661dbeb9 HS |
2617 | /* Fill MAC address that will be assigned by the FW */ |
2618 | static void fill_vf_station_mac_addr(struct adapter *adap) | |
2619 | { | |
2620 | unsigned int i; | |
2621 | u8 hw_addr[ETH_ALEN], macaddr[ETH_ALEN]; | |
2622 | int err; | |
2623 | u8 *na; | |
2624 | u16 a, b; | |
2625 | ||
2626 | err = t4_get_raw_vpd_params(adap, &adap->params.vpd); | |
2627 | if (!err) { | |
2628 | na = adap->params.vpd.na; | |
2629 | for (i = 0; i < ETH_ALEN; i++) | |
2630 | hw_addr[i] = (hex2val(na[2 * i + 0]) * 16 + | |
2631 | hex2val(na[2 * i + 1])); | |
2632 | a = (hw_addr[0] << 8) | hw_addr[1]; | |
2633 | b = (hw_addr[1] << 8) | hw_addr[2]; | |
2634 | a ^= b; | |
2635 | a |= 0x0200; /* locally assigned Ethernet MAC address */ | |
2636 | a &= ~0x0100; /* not a multicast Ethernet MAC address */ | |
2637 | macaddr[0] = a >> 8; | |
2638 | macaddr[1] = a & 0xff; | |
2639 | ||
2640 | for (i = 2; i < 5; i++) | |
2641 | macaddr[i] = hw_addr[i + 1]; | |
2642 | ||
2643 | for (i = 0; i < adap->num_vfs; i++) { | |
2644 | macaddr[5] = adap->pf * 16 + i; | |
2645 | ether_addr_copy(adap->vfinfo[i].vf_mac_addr, macaddr); | |
2646 | } | |
2647 | } | |
2648 | } | |
2649 | ||
858aa65c HS |
2650 | static int cxgb_set_vf_mac(struct net_device *dev, int vf, u8 *mac) |
2651 | { | |
2652 | struct port_info *pi = netdev_priv(dev); | |
2653 | struct adapter *adap = pi->adapter; | |
661dbeb9 | 2654 | int ret; |
858aa65c HS |
2655 | |
2656 | /* verify MAC addr is valid */ | |
2657 | if (!is_valid_ether_addr(mac)) { | |
2658 | dev_err(pi->adapter->pdev_dev, | |
2659 | "Invalid Ethernet address %pM for VF %d\n", | |
2660 | mac, vf); | |
2661 | return -EINVAL; | |
2662 | } | |
2663 | ||
2664 | dev_info(pi->adapter->pdev_dev, | |
2665 | "Setting MAC %pM on VF %d\n", mac, vf); | |
661dbeb9 HS |
2666 | ret = t4_set_vf_mac_acl(adap, vf + 1, 1, mac); |
2667 | if (!ret) | |
2668 | ether_addr_copy(adap->vfinfo[vf].vf_mac_addr, mac); | |
2669 | return ret; | |
2670 | } | |
2671 | ||
2672 | static int cxgb_get_vf_config(struct net_device *dev, | |
2673 | int vf, struct ifla_vf_info *ivi) | |
2674 | { | |
2675 | struct port_info *pi = netdev_priv(dev); | |
2676 | struct adapter *adap = pi->adapter; | |
2677 | ||
2678 | if (vf >= adap->num_vfs) | |
2679 | return -EINVAL; | |
2680 | ivi->vf = vf; | |
8ea4fae9 GG |
2681 | ivi->max_tx_rate = adap->vfinfo[vf].tx_rate; |
2682 | ivi->min_tx_rate = 0; | |
661dbeb9 HS |
2683 | ether_addr_copy(ivi->mac, adap->vfinfo[vf].vf_mac_addr); |
2684 | return 0; | |
858aa65c | 2685 | } |
96fe11f2 GG |
2686 | |
2687 | static int cxgb_get_phys_port_id(struct net_device *dev, | |
2688 | struct netdev_phys_item_id *ppid) | |
2689 | { | |
2690 | struct port_info *pi = netdev_priv(dev); | |
2691 | unsigned int phy_port_id; | |
2692 | ||
2693 | phy_port_id = pi->adapter->adap_idx * 10 + pi->port_id; | |
2694 | ppid->id_len = sizeof(phy_port_id); | |
2695 | memcpy(ppid->id, &phy_port_id, ppid->id_len); | |
2696 | return 0; | |
2697 | } | |
2698 | ||
8ea4fae9 GG |
2699 | static int cxgb_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate, |
2700 | int max_tx_rate) | |
2701 | { | |
2702 | struct port_info *pi = netdev_priv(dev); | |
2703 | struct adapter *adap = pi->adapter; | |
c3168cab | 2704 | unsigned int link_ok, speed, mtu; |
8ea4fae9 GG |
2705 | u32 fw_pfvf, fw_class; |
2706 | int class_id = vf; | |
c3168cab | 2707 | int ret; |
8ea4fae9 GG |
2708 | u16 pktsize; |
2709 | ||
2710 | if (vf >= adap->num_vfs) | |
2711 | return -EINVAL; | |
2712 | ||
2713 | if (min_tx_rate) { | |
2714 | dev_err(adap->pdev_dev, | |
2715 | "Min tx rate (%d) (> 0) for VF %d is Invalid.\n", | |
2716 | min_tx_rate, vf); | |
2717 | return -EINVAL; | |
2718 | } | |
c3168cab GG |
2719 | |
2720 | ret = t4_get_link_params(pi, &link_ok, &speed, &mtu); | |
8ea4fae9 GG |
2721 | if (ret != FW_SUCCESS) { |
2722 | dev_err(adap->pdev_dev, | |
c3168cab | 2723 | "Failed to get link information for VF %d\n", vf); |
8ea4fae9 GG |
2724 | return -EINVAL; |
2725 | } | |
c3168cab | 2726 | |
8ea4fae9 GG |
2727 | if (!link_ok) { |
2728 | dev_err(adap->pdev_dev, "Link down for VF %d\n", vf); | |
2729 | return -EINVAL; | |
2730 | } | |
8ea4fae9 GG |
2731 | |
2732 | if (max_tx_rate > speed) { | |
2733 | dev_err(adap->pdev_dev, | |
2734 | "Max tx rate %d for VF %d can't be > link-speed %u", | |
2735 | max_tx_rate, vf, speed); | |
2736 | return -EINVAL; | |
2737 | } | |
c3168cab GG |
2738 | |
2739 | pktsize = mtu; | |
8ea4fae9 GG |
2740 | /* subtract ethhdr size and 4 bytes crc since, f/w appends it */ |
2741 | pktsize = pktsize - sizeof(struct ethhdr) - 4; | |
2742 | /* subtract ipv4 hdr size, tcp hdr size to get typical IPv4 MSS size */ | |
2743 | pktsize = pktsize - sizeof(struct iphdr) - sizeof(struct tcphdr); | |
2744 | /* configure Traffic Class for rate-limiting */ | |
2745 | ret = t4_sched_params(adap, SCHED_CLASS_TYPE_PACKET, | |
2746 | SCHED_CLASS_LEVEL_CL_RL, | |
2747 | SCHED_CLASS_MODE_CLASS, | |
2748 | SCHED_CLASS_RATEUNIT_BITS, | |
2749 | SCHED_CLASS_RATEMODE_ABS, | |
c3168cab | 2750 | pi->tx_chan, class_id, 0, |
8ea4fae9 GG |
2751 | max_tx_rate * 1000, 0, pktsize); |
2752 | if (ret) { | |
2753 | dev_err(adap->pdev_dev, "Err %d for Traffic Class config\n", | |
2754 | ret); | |
2755 | return -EINVAL; | |
2756 | } | |
2757 | dev_info(adap->pdev_dev, | |
2758 | "Class %d with MSS %u configured with rate %u\n", | |
2759 | class_id, pktsize, max_tx_rate); | |
2760 | ||
2761 | /* bind VF to configured Traffic Class */ | |
2762 | fw_pfvf = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) | | |
2763 | FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_SCHEDCLASS_ETH)); | |
2764 | fw_class = class_id; | |
2765 | ret = t4_set_params(adap, adap->mbox, adap->pf, vf + 1, 1, &fw_pfvf, | |
2766 | &fw_class); | |
2767 | if (ret) { | |
2768 | dev_err(adap->pdev_dev, | |
2769 | "Err %d in binding VF %d to Traffic Class %d\n", | |
2770 | ret, vf, class_id); | |
2771 | return -EINVAL; | |
2772 | } | |
2773 | dev_info(adap->pdev_dev, "PF %d VF %d is bound to Class %d\n", | |
2774 | adap->pf, vf, class_id); | |
2775 | adap->vfinfo[vf].tx_rate = max_tx_rate; | |
2776 | return 0; | |
2777 | } | |
2778 | ||
858aa65c HS |
2779 | #endif |
2780 | ||
b8ff05a9 DM |
2781 | static int cxgb_set_mac_addr(struct net_device *dev, void *p) |
2782 | { | |
2783 | int ret; | |
2784 | struct sockaddr *addr = p; | |
2785 | struct port_info *pi = netdev_priv(dev); | |
2786 | ||
2787 | if (!is_valid_ether_addr(addr->sa_data)) | |
504f9b5a | 2788 | return -EADDRNOTAVAIL; |
b8ff05a9 | 2789 | |
b2612722 | 2790 | ret = t4_change_mac(pi->adapter, pi->adapter->pf, pi->viid, |
060e0c75 | 2791 | pi->xact_addr_filt, addr->sa_data, true, true); |
b8ff05a9 DM |
2792 | if (ret < 0) |
2793 | return ret; | |
2794 | ||
2795 | memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); | |
2796 | pi->xact_addr_filt = ret; | |
2797 | return 0; | |
2798 | } | |
2799 | ||
b8ff05a9 DM |
2800 | #ifdef CONFIG_NET_POLL_CONTROLLER |
2801 | static void cxgb_netpoll(struct net_device *dev) | |
2802 | { | |
2803 | struct port_info *pi = netdev_priv(dev); | |
2804 | struct adapter *adap = pi->adapter; | |
2805 | ||
2806 | if (adap->flags & USING_MSIX) { | |
2807 | int i; | |
2808 | struct sge_eth_rxq *rx = &adap->sge.ethrxq[pi->first_qset]; | |
2809 | ||
2810 | for (i = pi->nqsets; i; i--, rx++) | |
2811 | t4_sge_intr_msix(0, &rx->rspq); | |
2812 | } else | |
2813 | t4_intr_handler(adap)(0, adap); | |
2814 | } | |
2815 | #endif | |
2816 | ||
10a2604e RL |
2817 | static int cxgb_set_tx_maxrate(struct net_device *dev, int index, u32 rate) |
2818 | { | |
2819 | struct port_info *pi = netdev_priv(dev); | |
2820 | struct adapter *adap = pi->adapter; | |
2821 | struct sched_class *e; | |
2822 | struct ch_sched_params p; | |
2823 | struct ch_sched_queue qe; | |
2824 | u32 req_rate; | |
2825 | int err = 0; | |
2826 | ||
2827 | if (!can_sched(dev)) | |
2828 | return -ENOTSUPP; | |
2829 | ||
2830 | if (index < 0 || index > pi->nqsets - 1) | |
2831 | return -EINVAL; | |
2832 | ||
2833 | if (!(adap->flags & FULL_INIT_DONE)) { | |
2834 | dev_err(adap->pdev_dev, | |
2835 | "Failed to rate limit on queue %d. Link Down?\n", | |
2836 | index); | |
2837 | return -EINVAL; | |
2838 | } | |
2839 | ||
2840 | /* Convert from Mbps to Kbps */ | |
2841 | req_rate = rate << 10; | |
2842 | ||
2843 | /* Max rate is 10 Gbps */ | |
2844 | if (req_rate >= SCHED_MAX_RATE_KBPS) { | |
2845 | dev_err(adap->pdev_dev, | |
2846 | "Invalid rate %u Mbps, Max rate is %u Gbps\n", | |
2847 | rate, SCHED_MAX_RATE_KBPS); | |
2848 | return -ERANGE; | |
2849 | } | |
2850 | ||
2851 | /* First unbind the queue from any existing class */ | |
2852 | memset(&qe, 0, sizeof(qe)); | |
2853 | qe.queue = index; | |
2854 | qe.class = SCHED_CLS_NONE; | |
2855 | ||
2856 | err = cxgb4_sched_class_unbind(dev, (void *)(&qe), SCHED_QUEUE); | |
2857 | if (err) { | |
2858 | dev_err(adap->pdev_dev, | |
2859 | "Unbinding Queue %d on port %d fail. Err: %d\n", | |
2860 | index, pi->port_id, err); | |
2861 | return err; | |
2862 | } | |
2863 | ||
2864 | /* Queue already unbound */ | |
2865 | if (!req_rate) | |
2866 | return 0; | |
2867 | ||
2868 | /* Fetch any available unused or matching scheduling class */ | |
2869 | memset(&p, 0, sizeof(p)); | |
2870 | p.type = SCHED_CLASS_TYPE_PACKET; | |
2871 | p.u.params.level = SCHED_CLASS_LEVEL_CL_RL; | |
2872 | p.u.params.mode = SCHED_CLASS_MODE_CLASS; | |
2873 | p.u.params.rateunit = SCHED_CLASS_RATEUNIT_BITS; | |
2874 | p.u.params.ratemode = SCHED_CLASS_RATEMODE_ABS; | |
2875 | p.u.params.channel = pi->tx_chan; | |
2876 | p.u.params.class = SCHED_CLS_NONE; | |
2877 | p.u.params.minrate = 0; | |
2878 | p.u.params.maxrate = req_rate; | |
2879 | p.u.params.weight = 0; | |
2880 | p.u.params.pktsize = dev->mtu; | |
2881 | ||
2882 | e = cxgb4_sched_class_alloc(dev, &p); | |
2883 | if (!e) | |
2884 | return -ENOMEM; | |
2885 | ||
2886 | /* Bind the queue to a scheduling class */ | |
2887 | memset(&qe, 0, sizeof(qe)); | |
2888 | qe.queue = index; | |
2889 | qe.class = e->idx; | |
2890 | ||
2891 | err = cxgb4_sched_class_bind(dev, (void *)(&qe), SCHED_QUEUE); | |
2892 | if (err) | |
2893 | dev_err(adap->pdev_dev, | |
2894 | "Queue rate limiting failed. Err: %d\n", err); | |
2895 | return err; | |
2896 | } | |
2897 | ||
6a345b3d KS |
2898 | static int cxgb_setup_tc_flower(struct net_device *dev, |
2899 | struct tc_cls_flower_offload *cls_flower) | |
2900 | { | |
cd019e91 | 2901 | if (cls_flower->common.chain_index) |
6a345b3d KS |
2902 | return -EOPNOTSUPP; |
2903 | ||
2904 | switch (cls_flower->command) { | |
2905 | case TC_CLSFLOWER_REPLACE: | |
2906 | return cxgb4_tc_flower_replace(dev, cls_flower); | |
2907 | case TC_CLSFLOWER_DESTROY: | |
2908 | return cxgb4_tc_flower_destroy(dev, cls_flower); | |
2909 | case TC_CLSFLOWER_STATS: | |
2910 | return cxgb4_tc_flower_stats(dev, cls_flower); | |
2911 | default: | |
2912 | return -EOPNOTSUPP; | |
2913 | } | |
2914 | } | |
2915 | ||
f7323043 | 2916 | static int cxgb_setup_tc_cls_u32(struct net_device *dev, |
f7323043 JP |
2917 | struct tc_cls_u32_offload *cls_u32) |
2918 | { | |
cd019e91 | 2919 | if (cls_u32->common.chain_index) |
f7323043 JP |
2920 | return -EOPNOTSUPP; |
2921 | ||
2922 | switch (cls_u32->command) { | |
2923 | case TC_CLSU32_NEW_KNODE: | |
2924 | case TC_CLSU32_REPLACE_KNODE: | |
5fd9fc4e | 2925 | return cxgb4_config_knode(dev, cls_u32); |
f7323043 | 2926 | case TC_CLSU32_DELETE_KNODE: |
5fd9fc4e | 2927 | return cxgb4_delete_knode(dev, cls_u32); |
f7323043 JP |
2928 | default: |
2929 | return -EOPNOTSUPP; | |
2930 | } | |
2931 | } | |
2932 | ||
cd019e91 JP |
2933 | static int cxgb_setup_tc_block_cb(enum tc_setup_type type, void *type_data, |
2934 | void *cb_priv) | |
d8931847 | 2935 | { |
cd019e91 | 2936 | struct net_device *dev = cb_priv; |
d8931847 RL |
2937 | struct port_info *pi = netdev2pinfo(dev); |
2938 | struct adapter *adap = netdev2adap(dev); | |
2939 | ||
2940 | if (!(adap->flags & FULL_INIT_DONE)) { | |
2941 | dev_err(adap->pdev_dev, | |
2942 | "Failed to setup tc on port %d. Link Down?\n", | |
2943 | pi->port_id); | |
2944 | return -EINVAL; | |
2945 | } | |
2946 | ||
44ae12a7 JP |
2947 | if (!tc_can_offload(dev)) |
2948 | return -EOPNOTSUPP; | |
2949 | ||
f7323043 JP |
2950 | switch (type) { |
2951 | case TC_SETUP_CLSU32: | |
de4784ca | 2952 | return cxgb_setup_tc_cls_u32(dev, type_data); |
6a345b3d KS |
2953 | case TC_SETUP_CLSFLOWER: |
2954 | return cxgb_setup_tc_flower(dev, type_data); | |
f7323043 JP |
2955 | default: |
2956 | return -EOPNOTSUPP; | |
d8931847 | 2957 | } |
d8931847 RL |
2958 | } |
2959 | ||
cd019e91 JP |
2960 | static int cxgb_setup_tc_block(struct net_device *dev, |
2961 | struct tc_block_offload *f) | |
2962 | { | |
2963 | struct port_info *pi = netdev2pinfo(dev); | |
2964 | ||
2965 | if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS) | |
2966 | return -EOPNOTSUPP; | |
2967 | ||
2968 | switch (f->command) { | |
2969 | case TC_BLOCK_BIND: | |
2970 | return tcf_block_cb_register(f->block, cxgb_setup_tc_block_cb, | |
2971 | pi, dev); | |
2972 | case TC_BLOCK_UNBIND: | |
2973 | tcf_block_cb_unregister(f->block, cxgb_setup_tc_block_cb, pi); | |
2974 | return 0; | |
2975 | default: | |
2976 | return -EOPNOTSUPP; | |
2977 | } | |
2978 | } | |
2979 | ||
2980 | static int cxgb_setup_tc(struct net_device *dev, enum tc_setup_type type, | |
2981 | void *type_data) | |
2982 | { | |
2983 | switch (type) { | |
cd019e91 JP |
2984 | case TC_SETUP_BLOCK: |
2985 | return cxgb_setup_tc_block(dev, type_data); | |
2986 | default: | |
2987 | return -EOPNOTSUPP; | |
2988 | } | |
2989 | } | |
2990 | ||
846eac3f GG |
2991 | static void cxgb_del_udp_tunnel(struct net_device *netdev, |
2992 | struct udp_tunnel_info *ti) | |
2993 | { | |
2994 | struct port_info *pi = netdev_priv(netdev); | |
2995 | struct adapter *adapter = pi->adapter; | |
2996 | unsigned int chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip); | |
2997 | u8 match_all_mac[] = { 0, 0, 0, 0, 0, 0 }; | |
2998 | int ret = 0, i; | |
2999 | ||
3000 | if (chip_ver < CHELSIO_T6) | |
3001 | return; | |
3002 | ||
3003 | switch (ti->type) { | |
3004 | case UDP_TUNNEL_TYPE_VXLAN: | |
3005 | if (!adapter->vxlan_port_cnt || | |
3006 | adapter->vxlan_port != ti->port) | |
3007 | return; /* Invalid VxLAN destination port */ | |
3008 | ||
3009 | adapter->vxlan_port_cnt--; | |
3010 | if (adapter->vxlan_port_cnt) | |
3011 | return; | |
3012 | ||
3013 | adapter->vxlan_port = 0; | |
3014 | t4_write_reg(adapter, MPS_RX_VXLAN_TYPE_A, 0); | |
3015 | break; | |
3016 | default: | |
3017 | return; | |
3018 | } | |
3019 | ||
3020 | /* Matchall mac entries can be deleted only after all tunnel ports | |
3021 | * are brought down or removed. | |
3022 | */ | |
3023 | if (!adapter->rawf_cnt) | |
3024 | return; | |
3025 | for_each_port(adapter, i) { | |
3026 | pi = adap2pinfo(adapter, i); | |
3027 | ret = t4_free_raw_mac_filt(adapter, pi->viid, | |
3028 | match_all_mac, match_all_mac, | |
3029 | adapter->rawf_start + | |
3030 | pi->port_id, | |
3031 | 1, pi->port_id, true); | |
3032 | if (ret < 0) { | |
3033 | netdev_info(netdev, "Failed to free mac filter entry, for port %d\n", | |
3034 | i); | |
3035 | return; | |
3036 | } | |
3037 | atomic_dec(&adapter->mps_encap[adapter->rawf_start + | |
3038 | pi->port_id].refcnt); | |
3039 | } | |
3040 | } | |
3041 | ||
3042 | static void cxgb_add_udp_tunnel(struct net_device *netdev, | |
3043 | struct udp_tunnel_info *ti) | |
3044 | { | |
3045 | struct port_info *pi = netdev_priv(netdev); | |
3046 | struct adapter *adapter = pi->adapter; | |
3047 | unsigned int chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip); | |
3048 | u8 match_all_mac[] = { 0, 0, 0, 0, 0, 0 }; | |
3049 | int i, ret; | |
3050 | ||
3051 | if (chip_ver < CHELSIO_T6) | |
3052 | return; | |
3053 | ||
3054 | switch (ti->type) { | |
3055 | case UDP_TUNNEL_TYPE_VXLAN: | |
3056 | /* For T6 fw reserves last 2 entries for | |
3057 | * storing match all mac filter (config file entry). | |
3058 | */ | |
3059 | if (!adapter->rawf_cnt) | |
3060 | return; | |
3061 | ||
3062 | /* Callback for adding vxlan port can be called with the same | |
3063 | * port for both IPv4 and IPv6. We should not disable the | |
3064 | * offloading when the same port for both protocols is added | |
3065 | * and later one of them is removed. | |
3066 | */ | |
3067 | if (adapter->vxlan_port_cnt && | |
3068 | adapter->vxlan_port == ti->port) { | |
3069 | adapter->vxlan_port_cnt++; | |
3070 | return; | |
3071 | } | |
3072 | ||
3073 | /* We will support only one VxLAN port */ | |
3074 | if (adapter->vxlan_port_cnt) { | |
3075 | netdev_info(netdev, "UDP port %d already offloaded, not adding port %d\n", | |
3076 | be16_to_cpu(adapter->vxlan_port), | |
3077 | be16_to_cpu(ti->port)); | |
3078 | return; | |
3079 | } | |
3080 | ||
3081 | adapter->vxlan_port = ti->port; | |
3082 | adapter->vxlan_port_cnt = 1; | |
3083 | ||
3084 | t4_write_reg(adapter, MPS_RX_VXLAN_TYPE_A, | |
3085 | VXLAN_V(be16_to_cpu(ti->port)) | VXLAN_EN_F); | |
3086 | break; | |
3087 | default: | |
3088 | return; | |
3089 | } | |
3090 | ||
3091 | /* Create a 'match all' mac filter entry for inner mac, | |
3092 | * if raw mac interface is supported. Once the linux kernel provides | |
3093 | * driver entry points for adding/deleting the inner mac addresses, | |
3094 | * we will remove this 'match all' entry and fallback to adding | |
3095 | * exact match filters. | |
3096 | */ | |
3097 | if (adapter->rawf_cnt) { | |
3098 | for_each_port(adapter, i) { | |
3099 | pi = adap2pinfo(adapter, i); | |
3100 | ||
3101 | ret = t4_alloc_raw_mac_filt(adapter, pi->viid, | |
3102 | match_all_mac, | |
3103 | match_all_mac, | |
3104 | adapter->rawf_start + | |
3105 | pi->port_id, | |
3106 | 1, pi->port_id, true); | |
3107 | if (ret < 0) { | |
3108 | netdev_info(netdev, "Failed to allocate a mac filter entry, not adding port %d\n", | |
3109 | be16_to_cpu(ti->port)); | |
3110 | cxgb_del_udp_tunnel(netdev, ti); | |
3111 | return; | |
3112 | } | |
3113 | atomic_inc(&adapter->mps_encap[ret].refcnt); | |
3114 | } | |
3115 | } | |
3116 | } | |
3117 | ||
90592b9a AV |
3118 | static netdev_features_t cxgb_fix_features(struct net_device *dev, |
3119 | netdev_features_t features) | |
3120 | { | |
3121 | /* Disable GRO, if RX_CSUM is disabled */ | |
3122 | if (!(features & NETIF_F_RXCSUM)) | |
3123 | features &= ~NETIF_F_GRO; | |
3124 | ||
3125 | return features; | |
3126 | } | |
3127 | ||
b8ff05a9 DM |
3128 | static const struct net_device_ops cxgb4_netdev_ops = { |
3129 | .ndo_open = cxgb_open, | |
3130 | .ndo_stop = cxgb_close, | |
3131 | .ndo_start_xmit = t4_eth_xmit, | |
688848b1 | 3132 | .ndo_select_queue = cxgb_select_queue, |
9be793bf | 3133 | .ndo_get_stats64 = cxgb_get_stats, |
b8ff05a9 DM |
3134 | .ndo_set_rx_mode = cxgb_set_rxmode, |
3135 | .ndo_set_mac_address = cxgb_set_mac_addr, | |
2ed28baa | 3136 | .ndo_set_features = cxgb_set_features, |
b8ff05a9 DM |
3137 | .ndo_validate_addr = eth_validate_addr, |
3138 | .ndo_do_ioctl = cxgb_ioctl, | |
3139 | .ndo_change_mtu = cxgb_change_mtu, | |
b8ff05a9 DM |
3140 | #ifdef CONFIG_NET_POLL_CONTROLLER |
3141 | .ndo_poll_controller = cxgb_netpoll, | |
3142 | #endif | |
84a200b3 VP |
3143 | #ifdef CONFIG_CHELSIO_T4_FCOE |
3144 | .ndo_fcoe_enable = cxgb_fcoe_enable, | |
3145 | .ndo_fcoe_disable = cxgb_fcoe_disable, | |
3146 | #endif /* CONFIG_CHELSIO_T4_FCOE */ | |
10a2604e | 3147 | .ndo_set_tx_maxrate = cxgb_set_tx_maxrate, |
d8931847 | 3148 | .ndo_setup_tc = cxgb_setup_tc, |
846eac3f GG |
3149 | .ndo_udp_tunnel_add = cxgb_add_udp_tunnel, |
3150 | .ndo_udp_tunnel_del = cxgb_del_udp_tunnel, | |
90592b9a | 3151 | .ndo_fix_features = cxgb_fix_features, |
b8ff05a9 DM |
3152 | }; |
3153 | ||
858aa65c | 3154 | #ifdef CONFIG_PCI_IOV |
e7b48a32 HS |
3155 | static const struct net_device_ops cxgb4_mgmt_netdev_ops = { |
3156 | .ndo_open = dummy_open, | |
858aa65c | 3157 | .ndo_set_vf_mac = cxgb_set_vf_mac, |
661dbeb9 | 3158 | .ndo_get_vf_config = cxgb_get_vf_config, |
8ea4fae9 | 3159 | .ndo_set_vf_rate = cxgb_set_vf_rate, |
96fe11f2 | 3160 | .ndo_get_phys_port_id = cxgb_get_phys_port_id, |
7829451c | 3161 | }; |
e7b48a32 | 3162 | #endif |
7829451c HS |
3163 | |
3164 | static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) | |
3165 | { | |
3166 | struct adapter *adapter = netdev2adap(dev); | |
3167 | ||
3168 | strlcpy(info->driver, cxgb4_driver_name, sizeof(info->driver)); | |
3169 | strlcpy(info->version, cxgb4_driver_version, | |
3170 | sizeof(info->version)); | |
3171 | strlcpy(info->bus_info, pci_name(adapter->pdev), | |
3172 | sizeof(info->bus_info)); | |
3173 | } | |
3174 | ||
3175 | static const struct ethtool_ops cxgb4_mgmt_ethtool_ops = { | |
3176 | .get_drvinfo = get_drvinfo, | |
3177 | }; | |
3178 | ||
b8ff05a9 DM |
3179 | void t4_fatal_err(struct adapter *adap) |
3180 | { | |
3be0679b HS |
3181 | int port; |
3182 | ||
025d0973 GP |
3183 | if (pci_channel_offline(adap->pdev)) |
3184 | return; | |
3185 | ||
3be0679b HS |
3186 | /* Disable the SGE since ULDs are going to free resources that |
3187 | * could be exposed to the adapter. RDMA MWs for example... | |
3188 | */ | |
3189 | t4_shutdown_adapter(adap); | |
3190 | for_each_port(adap, port) { | |
3191 | struct net_device *dev = adap->port[port]; | |
3192 | ||
3193 | /* If we get here in very early initialization the network | |
3194 | * devices may not have been set up yet. | |
3195 | */ | |
3196 | if (!dev) | |
3197 | continue; | |
3198 | ||
3199 | netif_tx_stop_all_queues(dev); | |
3200 | netif_carrier_off(dev); | |
3201 | } | |
b8ff05a9 DM |
3202 | dev_alert(adap->pdev_dev, "encountered fatal error, adapter stopped\n"); |
3203 | } | |
3204 | ||
3205 | static void setup_memwin(struct adapter *adap) | |
3206 | { | |
b562fc37 | 3207 | u32 nic_win_base = t4_get_util_window(adap); |
b8ff05a9 | 3208 | |
b562fc37 | 3209 | t4_setup_memwin(adap, nic_win_base, MEMWIN_NIC); |
636f9d37 VP |
3210 | } |
3211 | ||
3212 | static void setup_memwin_rdma(struct adapter *adap) | |
3213 | { | |
1ae970e0 | 3214 | if (adap->vres.ocq.size) { |
0abfd152 HS |
3215 | u32 start; |
3216 | unsigned int sz_kb; | |
1ae970e0 | 3217 | |
0abfd152 HS |
3218 | start = t4_read_pcie_cfg4(adap, PCI_BASE_ADDRESS_2); |
3219 | start &= PCI_BASE_ADDRESS_MEM_MASK; | |
3220 | start += OCQ_WIN_OFFSET(adap->pdev, &adap->vres); | |
1ae970e0 DM |
3221 | sz_kb = roundup_pow_of_two(adap->vres.ocq.size) >> 10; |
3222 | t4_write_reg(adap, | |
f061de42 HS |
3223 | PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, 3), |
3224 | start | BIR_V(1) | WINDOW_V(ilog2(sz_kb))); | |
1ae970e0 | 3225 | t4_write_reg(adap, |
f061de42 | 3226 | PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, 3), |
1ae970e0 DM |
3227 | adap->vres.ocq.start); |
3228 | t4_read_reg(adap, | |
f061de42 | 3229 | PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, 3)); |
1ae970e0 | 3230 | } |
b8ff05a9 DM |
3231 | } |
3232 | ||
02b5fb8e DM |
3233 | static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c) |
3234 | { | |
3235 | u32 v; | |
3236 | int ret; | |
3237 | ||
3238 | /* get device capabilities */ | |
3239 | memset(c, 0, sizeof(*c)); | |
e2ac9628 HS |
3240 | c->op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) | |
3241 | FW_CMD_REQUEST_F | FW_CMD_READ_F); | |
ce91a923 | 3242 | c->cfvalid_to_len16 = htonl(FW_LEN16(*c)); |
b2612722 | 3243 | ret = t4_wr_mbox(adap, adap->mbox, c, sizeof(*c), c); |
02b5fb8e DM |
3244 | if (ret < 0) |
3245 | return ret; | |
3246 | ||
e2ac9628 HS |
3247 | c->op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) | |
3248 | FW_CMD_REQUEST_F | FW_CMD_WRITE_F); | |
b2612722 | 3249 | ret = t4_wr_mbox(adap, adap->mbox, c, sizeof(*c), NULL); |
02b5fb8e DM |
3250 | if (ret < 0) |
3251 | return ret; | |
3252 | ||
b2612722 | 3253 | ret = t4_config_glbl_rss(adap, adap->pf, |
02b5fb8e | 3254 | FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL, |
b2e1a3f0 HS |
3255 | FW_RSS_GLB_CONFIG_CMD_TNLMAPEN_F | |
3256 | FW_RSS_GLB_CONFIG_CMD_TNLALLLKP_F); | |
02b5fb8e DM |
3257 | if (ret < 0) |
3258 | return ret; | |
3259 | ||
b2612722 | 3260 | ret = t4_cfg_pfvf(adap, adap->mbox, adap->pf, 0, adap->sge.egr_sz, 64, |
4b8e27a8 HS |
3261 | MAX_INGQ, 0, 0, 4, 0xf, 0xf, 16, FW_CMD_CAP_PF, |
3262 | FW_CMD_CAP_PF); | |
02b5fb8e DM |
3263 | if (ret < 0) |
3264 | return ret; | |
3265 | ||
3266 | t4_sge_init(adap); | |
3267 | ||
02b5fb8e | 3268 | /* tweak some settings */ |
837e4a42 | 3269 | t4_write_reg(adap, TP_SHIFT_CNT_A, 0x64f8849); |
0d804338 | 3270 | t4_write_reg(adap, ULP_RX_TDDP_PSZ_A, HPZ0_V(PAGE_SHIFT - 12)); |
837e4a42 HS |
3271 | t4_write_reg(adap, TP_PIO_ADDR_A, TP_INGRESS_CONFIG_A); |
3272 | v = t4_read_reg(adap, TP_PIO_DATA_A); | |
3273 | t4_write_reg(adap, TP_PIO_DATA_A, v & ~CSUM_HAS_PSEUDO_HDR_F); | |
060e0c75 | 3274 | |
dca4faeb VP |
3275 | /* first 4 Tx modulation queues point to consecutive Tx channels */ |
3276 | adap->params.tp.tx_modq_map = 0xE4; | |
0d804338 HS |
3277 | t4_write_reg(adap, TP_TX_MOD_QUEUE_REQ_MAP_A, |
3278 | TX_MOD_QUEUE_REQ_MAP_V(adap->params.tp.tx_modq_map)); | |
dca4faeb VP |
3279 | |
3280 | /* associate each Tx modulation queue with consecutive Tx channels */ | |
3281 | v = 0x84218421; | |
837e4a42 | 3282 | t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A, |
0d804338 | 3283 | &v, 1, TP_TX_SCHED_HDR_A); |
837e4a42 | 3284 | t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A, |
0d804338 | 3285 | &v, 1, TP_TX_SCHED_FIFO_A); |
837e4a42 | 3286 | t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A, |
0d804338 | 3287 | &v, 1, TP_TX_SCHED_PCMD_A); |
dca4faeb VP |
3288 | |
3289 | #define T4_TX_MODQ_10G_WEIGHT_DEFAULT 16 /* in KB units */ | |
3290 | if (is_offload(adap)) { | |
0d804338 HS |
3291 | t4_write_reg(adap, TP_TX_MOD_QUEUE_WEIGHT0_A, |
3292 | TX_MODQ_WEIGHT0_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) | | |
3293 | TX_MODQ_WEIGHT1_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) | | |
3294 | TX_MODQ_WEIGHT2_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) | | |
3295 | TX_MODQ_WEIGHT3_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT)); | |
3296 | t4_write_reg(adap, TP_TX_MOD_CHANNEL_WEIGHT_A, | |
3297 | TX_MODQ_WEIGHT0_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) | | |
3298 | TX_MODQ_WEIGHT1_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) | | |
3299 | TX_MODQ_WEIGHT2_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) | | |
3300 | TX_MODQ_WEIGHT3_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT)); | |
dca4faeb VP |
3301 | } |
3302 | ||
060e0c75 | 3303 | /* get basic stuff going */ |
b2612722 | 3304 | return t4_early_init(adap, adap->pf); |
02b5fb8e DM |
3305 | } |
3306 | ||
b8ff05a9 DM |
3307 | /* |
3308 | * Max # of ATIDs. The absolute HW max is 16K but we keep it lower. | |
3309 | */ | |
3310 | #define MAX_ATIDS 8192U | |
3311 | ||
636f9d37 VP |
3312 | /* |
3313 | * Phase 0 of initialization: contact FW, obtain config, perform basic init. | |
3314 | * | |
3315 | * If the firmware we're dealing with has Configuration File support, then | |
3316 | * we use that to perform all configuration | |
3317 | */ | |
3318 | ||
3319 | /* | |
3320 | * Tweak configuration based on module parameters, etc. Most of these have | |
3321 | * defaults assigned to them by Firmware Configuration Files (if we're using | |
3322 | * them) but need to be explicitly set if we're using hard-coded | |
3323 | * initialization. But even in the case of using Firmware Configuration | |
3324 | * Files, we'd like to expose the ability to change these via module | |
3325 | * parameters so these are essentially common tweaks/settings for | |
3326 | * Configuration Files and hard-coded initialization ... | |
3327 | */ | |
3328 | static int adap_init0_tweaks(struct adapter *adapter) | |
3329 | { | |
3330 | /* | |
3331 | * Fix up various Host-Dependent Parameters like Page Size, Cache | |
3332 | * Line Size, etc. The firmware default is for a 4KB Page Size and | |
3333 | * 64B Cache Line Size ... | |
3334 | */ | |
3335 | t4_fixup_host_params(adapter, PAGE_SIZE, L1_CACHE_BYTES); | |
3336 | ||
3337 | /* | |
3338 | * Process module parameters which affect early initialization. | |
3339 | */ | |
3340 | if (rx_dma_offset != 2 && rx_dma_offset != 0) { | |
3341 | dev_err(&adapter->pdev->dev, | |
3342 | "Ignoring illegal rx_dma_offset=%d, using 2\n", | |
3343 | rx_dma_offset); | |
3344 | rx_dma_offset = 2; | |
3345 | } | |
f612b815 HS |
3346 | t4_set_reg_field(adapter, SGE_CONTROL_A, |
3347 | PKTSHIFT_V(PKTSHIFT_M), | |
3348 | PKTSHIFT_V(rx_dma_offset)); | |
636f9d37 VP |
3349 | |
3350 | /* | |
3351 | * Don't include the "IP Pseudo Header" in CPL_RX_PKT checksums: Linux | |
3352 | * adds the pseudo header itself. | |
3353 | */ | |
837e4a42 HS |
3354 | t4_tp_wr_bits_indirect(adapter, TP_INGRESS_CONFIG_A, |
3355 | CSUM_HAS_PSEUDO_HDR_F, 0); | |
636f9d37 VP |
3356 | |
3357 | return 0; | |
3358 | } | |
3359 | ||
01b69614 HS |
3360 | /* 10Gb/s-BT PHY Support. chip-external 10Gb/s-BT PHYs are complex chips |
3361 | * unto themselves and they contain their own firmware to perform their | |
3362 | * tasks ... | |
3363 | */ | |
3364 | static int phy_aq1202_version(const u8 *phy_fw_data, | |
3365 | size_t phy_fw_size) | |
3366 | { | |
3367 | int offset; | |
3368 | ||
3369 | /* At offset 0x8 you're looking for the primary image's | |
3370 | * starting offset which is 3 Bytes wide | |
3371 | * | |
3372 | * At offset 0xa of the primary image, you look for the offset | |
3373 | * of the DRAM segment which is 3 Bytes wide. | |
3374 | * | |
3375 | * The FW version is at offset 0x27e of the DRAM and is 2 Bytes | |
3376 | * wide | |
3377 | */ | |
3378 | #define be16(__p) (((__p)[0] << 8) | (__p)[1]) | |
3379 | #define le16(__p) ((__p)[0] | ((__p)[1] << 8)) | |
3380 | #define le24(__p) (le16(__p) | ((__p)[2] << 16)) | |
3381 | ||
3382 | offset = le24(phy_fw_data + 0x8) << 12; | |
3383 | offset = le24(phy_fw_data + offset + 0xa); | |
3384 | return be16(phy_fw_data + offset + 0x27e); | |
3385 | ||
3386 | #undef be16 | |
3387 | #undef le16 | |
3388 | #undef le24 | |
3389 | } | |
3390 | ||
3391 | static struct info_10gbt_phy_fw { | |
3392 | unsigned int phy_fw_id; /* PCI Device ID */ | |
3393 | char *phy_fw_file; /* /lib/firmware/ PHY Firmware file */ | |
3394 | int (*phy_fw_version)(const u8 *phy_fw_data, size_t phy_fw_size); | |
3395 | int phy_flash; /* Has FLASH for PHY Firmware */ | |
3396 | } phy_info_array[] = { | |
3397 | { | |
3398 | PHY_AQ1202_DEVICEID, | |
3399 | PHY_AQ1202_FIRMWARE, | |
3400 | phy_aq1202_version, | |
3401 | 1, | |
3402 | }, | |
3403 | { | |
3404 | PHY_BCM84834_DEVICEID, | |
3405 | PHY_BCM84834_FIRMWARE, | |
3406 | NULL, | |
3407 | 0, | |
3408 | }, | |
3409 | { 0, NULL, NULL }, | |
3410 | }; | |
3411 | ||
3412 | static struct info_10gbt_phy_fw *find_phy_info(int devid) | |
3413 | { | |
3414 | int i; | |
3415 | ||
3416 | for (i = 0; i < ARRAY_SIZE(phy_info_array); i++) { | |
3417 | if (phy_info_array[i].phy_fw_id == devid) | |
3418 | return &phy_info_array[i]; | |
3419 | } | |
3420 | return NULL; | |
3421 | } | |
3422 | ||
3423 | /* Handle updating of chip-external 10Gb/s-BT PHY firmware. This needs to | |
3424 | * happen after the FW_RESET_CMD but before the FW_INITIALIZE_CMD. On error | |
3425 | * we return a negative error number. If we transfer new firmware we return 1 | |
3426 | * (from t4_load_phy_fw()). If we don't do anything we return 0. | |
3427 | */ | |
3428 | static int adap_init0_phy(struct adapter *adap) | |
3429 | { | |
3430 | const struct firmware *phyf; | |
3431 | int ret; | |
3432 | struct info_10gbt_phy_fw *phy_info; | |
3433 | ||
3434 | /* Use the device ID to determine which PHY file to flash. | |
3435 | */ | |
3436 | phy_info = find_phy_info(adap->pdev->device); | |
3437 | if (!phy_info) { | |
3438 | dev_warn(adap->pdev_dev, | |
3439 | "No PHY Firmware file found for this PHY\n"); | |
3440 | return -EOPNOTSUPP; | |
3441 | } | |
3442 | ||
3443 | /* If we have a T4 PHY firmware file under /lib/firmware/cxgb4/, then | |
3444 | * use that. The adapter firmware provides us with a memory buffer | |
3445 | * where we can load a PHY firmware file from the host if we want to | |
3446 | * override the PHY firmware File in flash. | |
3447 | */ | |
3448 | ret = request_firmware_direct(&phyf, phy_info->phy_fw_file, | |
3449 | adap->pdev_dev); | |
3450 | if (ret < 0) { | |
3451 | /* For adapters without FLASH attached to PHY for their | |
3452 | * firmware, it's obviously a fatal error if we can't get the | |
3453 | * firmware to the adapter. For adapters with PHY firmware | |
3454 | * FLASH storage, it's worth a warning if we can't find the | |
3455 | * PHY Firmware but we'll neuter the error ... | |
3456 | */ | |
3457 | dev_err(adap->pdev_dev, "unable to find PHY Firmware image " | |
3458 | "/lib/firmware/%s, error %d\n", | |
3459 | phy_info->phy_fw_file, -ret); | |
3460 | if (phy_info->phy_flash) { | |
3461 | int cur_phy_fw_ver = 0; | |
3462 | ||
3463 | t4_phy_fw_ver(adap, &cur_phy_fw_ver); | |
3464 | dev_warn(adap->pdev_dev, "continuing with, on-adapter " | |
3465 | "FLASH copy, version %#x\n", cur_phy_fw_ver); | |
3466 | ret = 0; | |
3467 | } | |
3468 | ||
3469 | return ret; | |
3470 | } | |
3471 | ||
3472 | /* Load PHY Firmware onto adapter. | |
3473 | */ | |
3474 | ret = t4_load_phy_fw(adap, MEMWIN_NIC, &adap->win0_lock, | |
3475 | phy_info->phy_fw_version, | |
3476 | (u8 *)phyf->data, phyf->size); | |
3477 | if (ret < 0) | |
3478 | dev_err(adap->pdev_dev, "PHY Firmware transfer error %d\n", | |
3479 | -ret); | |
3480 | else if (ret > 0) { | |
3481 | int new_phy_fw_ver = 0; | |
3482 | ||
3483 | if (phy_info->phy_fw_version) | |
3484 | new_phy_fw_ver = phy_info->phy_fw_version(phyf->data, | |
3485 | phyf->size); | |
3486 | dev_info(adap->pdev_dev, "Successfully transferred PHY " | |
3487 | "Firmware /lib/firmware/%s, version %#x\n", | |
3488 | phy_info->phy_fw_file, new_phy_fw_ver); | |
3489 | } | |
3490 | ||
3491 | release_firmware(phyf); | |
3492 | ||
3493 | return ret; | |
3494 | } | |
3495 | ||
636f9d37 VP |
3496 | /* |
3497 | * Attempt to initialize the adapter via a Firmware Configuration File. | |
3498 | */ | |
3499 | static int adap_init0_config(struct adapter *adapter, int reset) | |
3500 | { | |
3501 | struct fw_caps_config_cmd caps_cmd; | |
3502 | const struct firmware *cf; | |
3503 | unsigned long mtype = 0, maddr = 0; | |
3504 | u32 finiver, finicsum, cfcsum; | |
16e47624 HS |
3505 | int ret; |
3506 | int config_issued = 0; | |
0a57a536 | 3507 | char *fw_config_file, fw_config_file_path[256]; |
16e47624 | 3508 | char *config_name = NULL; |
636f9d37 VP |
3509 | |
3510 | /* | |
3511 | * Reset device if necessary. | |
3512 | */ | |
3513 | if (reset) { | |
3514 | ret = t4_fw_reset(adapter, adapter->mbox, | |
0d804338 | 3515 | PIORSTMODE_F | PIORST_F); |
636f9d37 VP |
3516 | if (ret < 0) |
3517 | goto bye; | |
3518 | } | |
3519 | ||
01b69614 HS |
3520 | /* If this is a 10Gb/s-BT adapter make sure the chip-external |
3521 | * 10Gb/s-BT PHYs have up-to-date firmware. Note that this step needs | |
3522 | * to be performed after any global adapter RESET above since some | |
3523 | * PHYs only have local RAM copies of the PHY firmware. | |
3524 | */ | |
3525 | if (is_10gbt_device(adapter->pdev->device)) { | |
3526 | ret = adap_init0_phy(adapter); | |
3527 | if (ret < 0) | |
3528 | goto bye; | |
3529 | } | |
636f9d37 VP |
3530 | /* |
3531 | * If we have a T4 configuration file under /lib/firmware/cxgb4/, | |
3532 | * then use that. Otherwise, use the configuration file stored | |
3533 | * in the adapter flash ... | |
3534 | */ | |
d14807dd | 3535 | switch (CHELSIO_CHIP_VERSION(adapter->params.chip)) { |
0a57a536 | 3536 | case CHELSIO_T4: |
16e47624 | 3537 | fw_config_file = FW4_CFNAME; |
0a57a536 SR |
3538 | break; |
3539 | case CHELSIO_T5: | |
3540 | fw_config_file = FW5_CFNAME; | |
3541 | break; | |
3ccc6cf7 HS |
3542 | case CHELSIO_T6: |
3543 | fw_config_file = FW6_CFNAME; | |
3544 | break; | |
0a57a536 SR |
3545 | default: |
3546 | dev_err(adapter->pdev_dev, "Device %d is not supported\n", | |
3547 | adapter->pdev->device); | |
3548 | ret = -EINVAL; | |
3549 | goto bye; | |
3550 | } | |
3551 | ||
3552 | ret = request_firmware(&cf, fw_config_file, adapter->pdev_dev); | |
636f9d37 | 3553 | if (ret < 0) { |
16e47624 | 3554 | config_name = "On FLASH"; |
636f9d37 VP |
3555 | mtype = FW_MEMTYPE_CF_FLASH; |
3556 | maddr = t4_flash_cfg_addr(adapter); | |
3557 | } else { | |
3558 | u32 params[7], val[7]; | |
3559 | ||
16e47624 HS |
3560 | sprintf(fw_config_file_path, |
3561 | "/lib/firmware/%s", fw_config_file); | |
3562 | config_name = fw_config_file_path; | |
3563 | ||
636f9d37 VP |
3564 | if (cf->size >= FLASH_CFG_MAX_SIZE) |
3565 | ret = -ENOMEM; | |
3566 | else { | |
5167865a HS |
3567 | params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | |
3568 | FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CF)); | |
636f9d37 | 3569 | ret = t4_query_params(adapter, adapter->mbox, |
b2612722 | 3570 | adapter->pf, 0, 1, params, val); |
636f9d37 VP |
3571 | if (ret == 0) { |
3572 | /* | |
fc5ab020 | 3573 | * For t4_memory_rw() below addresses and |
636f9d37 VP |
3574 | * sizes have to be in terms of multiples of 4 |
3575 | * bytes. So, if the Configuration File isn't | |
3576 | * a multiple of 4 bytes in length we'll have | |
3577 | * to write that out separately since we can't | |
3578 | * guarantee that the bytes following the | |
3579 | * residual byte in the buffer returned by | |
3580 | * request_firmware() are zeroed out ... | |
3581 | */ | |
3582 | size_t resid = cf->size & 0x3; | |
3583 | size_t size = cf->size & ~0x3; | |
3584 | __be32 *data = (__be32 *)cf->data; | |
3585 | ||
5167865a HS |
3586 | mtype = FW_PARAMS_PARAM_Y_G(val[0]); |
3587 | maddr = FW_PARAMS_PARAM_Z_G(val[0]) << 16; | |
636f9d37 | 3588 | |
fc5ab020 HS |
3589 | spin_lock(&adapter->win0_lock); |
3590 | ret = t4_memory_rw(adapter, 0, mtype, maddr, | |
3591 | size, data, T4_MEMORY_WRITE); | |
636f9d37 VP |
3592 | if (ret == 0 && resid != 0) { |
3593 | union { | |
3594 | __be32 word; | |
3595 | char buf[4]; | |
3596 | } last; | |
3597 | int i; | |
3598 | ||
3599 | last.word = data[size >> 2]; | |
3600 | for (i = resid; i < 4; i++) | |
3601 | last.buf[i] = 0; | |
fc5ab020 HS |
3602 | ret = t4_memory_rw(adapter, 0, mtype, |
3603 | maddr + size, | |
3604 | 4, &last.word, | |
3605 | T4_MEMORY_WRITE); | |
636f9d37 | 3606 | } |
fc5ab020 | 3607 | spin_unlock(&adapter->win0_lock); |
636f9d37 VP |
3608 | } |
3609 | } | |
3610 | ||
3611 | release_firmware(cf); | |
3612 | if (ret) | |
3613 | goto bye; | |
3614 | } | |
3615 | ||
3616 | /* | |
3617 | * Issue a Capability Configuration command to the firmware to get it | |
3618 | * to parse the Configuration File. We don't use t4_fw_config_file() | |
3619 | * because we want the ability to modify various features after we've | |
3620 | * processed the configuration file ... | |
3621 | */ | |
3622 | memset(&caps_cmd, 0, sizeof(caps_cmd)); | |
3623 | caps_cmd.op_to_write = | |
e2ac9628 HS |
3624 | htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) | |
3625 | FW_CMD_REQUEST_F | | |
3626 | FW_CMD_READ_F); | |
ce91a923 | 3627 | caps_cmd.cfvalid_to_len16 = |
5167865a HS |
3628 | htonl(FW_CAPS_CONFIG_CMD_CFVALID_F | |
3629 | FW_CAPS_CONFIG_CMD_MEMTYPE_CF_V(mtype) | | |
3630 | FW_CAPS_CONFIG_CMD_MEMADDR64K_CF_V(maddr >> 16) | | |
636f9d37 VP |
3631 | FW_LEN16(caps_cmd)); |
3632 | ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd), | |
3633 | &caps_cmd); | |
16e47624 HS |
3634 | |
3635 | /* If the CAPS_CONFIG failed with an ENOENT (for a Firmware | |
3636 | * Configuration File in FLASH), our last gasp effort is to use the | |
3637 | * Firmware Configuration File which is embedded in the firmware. A | |
3638 | * very few early versions of the firmware didn't have one embedded | |
3639 | * but we can ignore those. | |
3640 | */ | |
3641 | if (ret == -ENOENT) { | |
3642 | memset(&caps_cmd, 0, sizeof(caps_cmd)); | |
3643 | caps_cmd.op_to_write = | |
e2ac9628 HS |
3644 | htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) | |
3645 | FW_CMD_REQUEST_F | | |
3646 | FW_CMD_READ_F); | |
16e47624 HS |
3647 | caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd)); |
3648 | ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, | |
3649 | sizeof(caps_cmd), &caps_cmd); | |
3650 | config_name = "Firmware Default"; | |
3651 | } | |
3652 | ||
3653 | config_issued = 1; | |
636f9d37 VP |
3654 | if (ret < 0) |
3655 | goto bye; | |
3656 | ||
3657 | finiver = ntohl(caps_cmd.finiver); | |
3658 | finicsum = ntohl(caps_cmd.finicsum); | |
3659 | cfcsum = ntohl(caps_cmd.cfcsum); | |
3660 | if (finicsum != cfcsum) | |
3661 | dev_warn(adapter->pdev_dev, "Configuration File checksum "\ | |
3662 | "mismatch: [fini] csum=%#x, computed csum=%#x\n", | |
3663 | finicsum, cfcsum); | |
3664 | ||
636f9d37 VP |
3665 | /* |
3666 | * And now tell the firmware to use the configuration we just loaded. | |
3667 | */ | |
3668 | caps_cmd.op_to_write = | |
e2ac9628 HS |
3669 | htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) | |
3670 | FW_CMD_REQUEST_F | | |
3671 | FW_CMD_WRITE_F); | |
ce91a923 | 3672 | caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd)); |
636f9d37 VP |
3673 | ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd), |
3674 | NULL); | |
3675 | if (ret < 0) | |
3676 | goto bye; | |
3677 | ||
3678 | /* | |
3679 | * Tweak configuration based on system architecture, module | |
3680 | * parameters, etc. | |
3681 | */ | |
3682 | ret = adap_init0_tweaks(adapter); | |
3683 | if (ret < 0) | |
3684 | goto bye; | |
3685 | ||
3686 | /* | |
3687 | * And finally tell the firmware to initialize itself using the | |
3688 | * parameters from the Configuration File. | |
3689 | */ | |
3690 | ret = t4_fw_initialize(adapter, adapter->mbox); | |
3691 | if (ret < 0) | |
3692 | goto bye; | |
3693 | ||
06640310 HS |
3694 | /* Emit Firmware Configuration File information and return |
3695 | * successfully. | |
636f9d37 | 3696 | */ |
636f9d37 | 3697 | dev_info(adapter->pdev_dev, "Successfully configured using Firmware "\ |
16e47624 HS |
3698 | "Configuration File \"%s\", version %#x, computed checksum %#x\n", |
3699 | config_name, finiver, cfcsum); | |
636f9d37 VP |
3700 | return 0; |
3701 | ||
3702 | /* | |
3703 | * Something bad happened. Return the error ... (If the "error" | |
3704 | * is that there's no Configuration File on the adapter we don't | |
3705 | * want to issue a warning since this is fairly common.) | |
3706 | */ | |
3707 | bye: | |
16e47624 HS |
3708 | if (config_issued && ret != -ENOENT) |
3709 | dev_warn(adapter->pdev_dev, "\"%s\" configuration file error %d\n", | |
3710 | config_name, -ret); | |
636f9d37 VP |
3711 | return ret; |
3712 | } | |
3713 | ||
16e47624 HS |
3714 | static struct fw_info fw_info_array[] = { |
3715 | { | |
3716 | .chip = CHELSIO_T4, | |
3717 | .fs_name = FW4_CFNAME, | |
3718 | .fw_mod_name = FW4_FNAME, | |
3719 | .fw_hdr = { | |
3720 | .chip = FW_HDR_CHIP_T4, | |
3721 | .fw_ver = __cpu_to_be32(FW_VERSION(T4)), | |
3722 | .intfver_nic = FW_INTFVER(T4, NIC), | |
3723 | .intfver_vnic = FW_INTFVER(T4, VNIC), | |
3724 | .intfver_ri = FW_INTFVER(T4, RI), | |
3725 | .intfver_iscsi = FW_INTFVER(T4, ISCSI), | |
3726 | .intfver_fcoe = FW_INTFVER(T4, FCOE), | |
3727 | }, | |
3728 | }, { | |
3729 | .chip = CHELSIO_T5, | |
3730 | .fs_name = FW5_CFNAME, | |
3731 | .fw_mod_name = FW5_FNAME, | |
3732 | .fw_hdr = { | |
3733 | .chip = FW_HDR_CHIP_T5, | |
3734 | .fw_ver = __cpu_to_be32(FW_VERSION(T5)), | |
3735 | .intfver_nic = FW_INTFVER(T5, NIC), | |
3736 | .intfver_vnic = FW_INTFVER(T5, VNIC), | |
3737 | .intfver_ri = FW_INTFVER(T5, RI), | |
3738 | .intfver_iscsi = FW_INTFVER(T5, ISCSI), | |
3739 | .intfver_fcoe = FW_INTFVER(T5, FCOE), | |
3740 | }, | |
3ccc6cf7 HS |
3741 | }, { |
3742 | .chip = CHELSIO_T6, | |
3743 | .fs_name = FW6_CFNAME, | |
3744 | .fw_mod_name = FW6_FNAME, | |
3745 | .fw_hdr = { | |
3746 | .chip = FW_HDR_CHIP_T6, | |
3747 | .fw_ver = __cpu_to_be32(FW_VERSION(T6)), | |
3748 | .intfver_nic = FW_INTFVER(T6, NIC), | |
3749 | .intfver_vnic = FW_INTFVER(T6, VNIC), | |
3750 | .intfver_ofld = FW_INTFVER(T6, OFLD), | |
3751 | .intfver_ri = FW_INTFVER(T6, RI), | |
3752 | .intfver_iscsipdu = FW_INTFVER(T6, ISCSIPDU), | |
3753 | .intfver_iscsi = FW_INTFVER(T6, ISCSI), | |
3754 | .intfver_fcoepdu = FW_INTFVER(T6, FCOEPDU), | |
3755 | .intfver_fcoe = FW_INTFVER(T6, FCOE), | |
3756 | }, | |
16e47624 | 3757 | } |
3ccc6cf7 | 3758 | |
16e47624 HS |
3759 | }; |
3760 | ||
3761 | static struct fw_info *find_fw_info(int chip) | |
3762 | { | |
3763 | int i; | |
3764 | ||
3765 | for (i = 0; i < ARRAY_SIZE(fw_info_array); i++) { | |
3766 | if (fw_info_array[i].chip == chip) | |
3767 | return &fw_info_array[i]; | |
3768 | } | |
3769 | return NULL; | |
3770 | } | |
3771 | ||
b8ff05a9 DM |
3772 | /* |
3773 | * Phase 0 of initialization: contact FW, obtain config, perform basic init. | |
3774 | */ | |
3775 | static int adap_init0(struct adapter *adap) | |
3776 | { | |
3777 | int ret; | |
3778 | u32 v, port_vec; | |
3779 | enum dev_state state; | |
3780 | u32 params[7], val[7]; | |
9a4da2cd | 3781 | struct fw_caps_config_cmd caps_cmd; |
dcf7b6f5 | 3782 | int reset = 1; |
b8ff05a9 | 3783 | |
ae469b68 HS |
3784 | /* Grab Firmware Device Log parameters as early as possible so we have |
3785 | * access to it for debugging, etc. | |
3786 | */ | |
3787 | ret = t4_init_devlog_params(adap); | |
3788 | if (ret < 0) | |
3789 | return ret; | |
3790 | ||
666224d4 | 3791 | /* Contact FW, advertising Master capability */ |
c5a8c0f3 HS |
3792 | ret = t4_fw_hello(adap, adap->mbox, adap->mbox, |
3793 | is_kdump_kernel() ? MASTER_MUST : MASTER_MAY, &state); | |
b8ff05a9 DM |
3794 | if (ret < 0) { |
3795 | dev_err(adap->pdev_dev, "could not connect to FW, error %d\n", | |
3796 | ret); | |
3797 | return ret; | |
3798 | } | |
636f9d37 VP |
3799 | if (ret == adap->mbox) |
3800 | adap->flags |= MASTER_PF; | |
b8ff05a9 | 3801 | |
636f9d37 VP |
3802 | /* |
3803 | * If we're the Master PF Driver and the device is uninitialized, | |
3804 | * then let's consider upgrading the firmware ... (We always want | |
3805 | * to check the firmware version number in order to A. get it for | |
3806 | * later reporting and B. to warn if the currently loaded firmware | |
3807 | * is excessively mismatched relative to the driver.) | |
3808 | */ | |
0de72738 | 3809 | |
760446f9 | 3810 | t4_get_version_info(adap); |
a69265e9 HS |
3811 | ret = t4_check_fw_version(adap); |
3812 | /* If firmware is too old (not supported by driver) force an update. */ | |
21d11bd6 | 3813 | if (ret) |
a69265e9 | 3814 | state = DEV_STATE_UNINIT; |
636f9d37 | 3815 | if ((adap->flags & MASTER_PF) && state != DEV_STATE_INIT) { |
16e47624 HS |
3816 | struct fw_info *fw_info; |
3817 | struct fw_hdr *card_fw; | |
3818 | const struct firmware *fw; | |
3819 | const u8 *fw_data = NULL; | |
3820 | unsigned int fw_size = 0; | |
3821 | ||
3822 | /* This is the firmware whose headers the driver was compiled | |
3823 | * against | |
3824 | */ | |
3825 | fw_info = find_fw_info(CHELSIO_CHIP_VERSION(adap->params.chip)); | |
3826 | if (fw_info == NULL) { | |
3827 | dev_err(adap->pdev_dev, | |
3828 | "unable to get firmware info for chip %d.\n", | |
3829 | CHELSIO_CHIP_VERSION(adap->params.chip)); | |
3830 | return -EINVAL; | |
636f9d37 | 3831 | } |
16e47624 HS |
3832 | |
3833 | /* allocate memory to read the header of the firmware on the | |
3834 | * card | |
3835 | */ | |
752ade68 | 3836 | card_fw = kvzalloc(sizeof(*card_fw), GFP_KERNEL); |
16e47624 HS |
3837 | |
3838 | /* Get FW from from /lib/firmware/ */ | |
3839 | ret = request_firmware(&fw, fw_info->fw_mod_name, | |
3840 | adap->pdev_dev); | |
3841 | if (ret < 0) { | |
3842 | dev_err(adap->pdev_dev, | |
3843 | "unable to load firmware image %s, error %d\n", | |
3844 | fw_info->fw_mod_name, ret); | |
3845 | } else { | |
3846 | fw_data = fw->data; | |
3847 | fw_size = fw->size; | |
3848 | } | |
3849 | ||
3850 | /* upgrade FW logic */ | |
3851 | ret = t4_prep_fw(adap, fw_info, fw_data, fw_size, card_fw, | |
3852 | state, &reset); | |
3853 | ||
3854 | /* Cleaning up */ | |
0b5b6bee | 3855 | release_firmware(fw); |
752ade68 | 3856 | kvfree(card_fw); |
16e47624 | 3857 | |
636f9d37 | 3858 | if (ret < 0) |
16e47624 | 3859 | goto bye; |
636f9d37 | 3860 | } |
b8ff05a9 | 3861 | |
636f9d37 VP |
3862 | /* |
3863 | * Grab VPD parameters. This should be done after we establish a | |
3864 | * connection to the firmware since some of the VPD parameters | |
3865 | * (notably the Core Clock frequency) are retrieved via requests to | |
3866 | * the firmware. On the other hand, we need these fairly early on | |
3867 | * so we do this right after getting ahold of the firmware. | |
3868 | */ | |
098ef6c2 | 3869 | ret = t4_get_vpd_params(adap, &adap->params.vpd); |
a0881cab DM |
3870 | if (ret < 0) |
3871 | goto bye; | |
a0881cab | 3872 | |
636f9d37 | 3873 | /* |
13ee15d3 VP |
3874 | * Find out what ports are available to us. Note that we need to do |
3875 | * this before calling adap_init0_no_config() since it needs nports | |
3876 | * and portvec ... | |
636f9d37 VP |
3877 | */ |
3878 | v = | |
5167865a HS |
3879 | FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | |
3880 | FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PORTVEC); | |
b2612722 | 3881 | ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, &v, &port_vec); |
a0881cab DM |
3882 | if (ret < 0) |
3883 | goto bye; | |
3884 | ||
636f9d37 VP |
3885 | adap->params.nports = hweight32(port_vec); |
3886 | adap->params.portvec = port_vec; | |
3887 | ||
06640310 HS |
3888 | /* If the firmware is initialized already, emit a simply note to that |
3889 | * effect. Otherwise, it's time to try initializing the adapter. | |
636f9d37 VP |
3890 | */ |
3891 | if (state == DEV_STATE_INIT) { | |
3892 | dev_info(adap->pdev_dev, "Coming up as %s: "\ | |
3893 | "Adapter already initialized\n", | |
3894 | adap->flags & MASTER_PF ? "MASTER" : "SLAVE"); | |
636f9d37 VP |
3895 | } else { |
3896 | dev_info(adap->pdev_dev, "Coming up as MASTER: "\ | |
3897 | "Initializing adapter\n"); | |
06640310 HS |
3898 | |
3899 | /* Find out whether we're dealing with a version of the | |
3900 | * firmware which has configuration file support. | |
636f9d37 | 3901 | */ |
06640310 HS |
3902 | params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | |
3903 | FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CF)); | |
b2612722 | 3904 | ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, |
06640310 | 3905 | params, val); |
13ee15d3 | 3906 | |
06640310 HS |
3907 | /* If the firmware doesn't support Configuration Files, |
3908 | * return an error. | |
3909 | */ | |
3910 | if (ret < 0) { | |
3911 | dev_err(adap->pdev_dev, "firmware doesn't support " | |
3912 | "Firmware Configuration Files\n"); | |
3913 | goto bye; | |
3914 | } | |
3915 | ||
3916 | /* The firmware provides us with a memory buffer where we can | |
3917 | * load a Configuration File from the host if we want to | |
3918 | * override the Configuration File in flash. | |
3919 | */ | |
3920 | ret = adap_init0_config(adap, reset); | |
3921 | if (ret == -ENOENT) { | |
3922 | dev_err(adap->pdev_dev, "no Configuration File " | |
3923 | "present on adapter.\n"); | |
3924 | goto bye; | |
636f9d37 VP |
3925 | } |
3926 | if (ret < 0) { | |
06640310 HS |
3927 | dev_err(adap->pdev_dev, "could not initialize " |
3928 | "adapter, error %d\n", -ret); | |
636f9d37 VP |
3929 | goto bye; |
3930 | } | |
3931 | } | |
3932 | ||
06640310 HS |
3933 | /* Give the SGE code a chance to pull in anything that it needs ... |
3934 | * Note that this must be called after we retrieve our VPD parameters | |
3935 | * in order to know how to convert core ticks to seconds, etc. | |
636f9d37 | 3936 | */ |
06640310 HS |
3937 | ret = t4_sge_init(adap); |
3938 | if (ret < 0) | |
3939 | goto bye; | |
636f9d37 | 3940 | |
9a4da2cd VP |
3941 | if (is_bypass_device(adap->pdev->device)) |
3942 | adap->params.bypass = 1; | |
3943 | ||
636f9d37 VP |
3944 | /* |
3945 | * Grab some of our basic fundamental operating parameters. | |
3946 | */ | |
3947 | #define FW_PARAM_DEV(param) \ | |
5167865a HS |
3948 | (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | \ |
3949 | FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_##param)) | |
636f9d37 | 3950 | |
b8ff05a9 | 3951 | #define FW_PARAM_PFVF(param) \ |
5167865a HS |
3952 | FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) | \ |
3953 | FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_##param)| \ | |
3954 | FW_PARAMS_PARAM_Y_V(0) | \ | |
3955 | FW_PARAMS_PARAM_Z_V(0) | |
b8ff05a9 | 3956 | |
636f9d37 | 3957 | params[0] = FW_PARAM_PFVF(EQ_START); |
b8ff05a9 DM |
3958 | params[1] = FW_PARAM_PFVF(L2T_START); |
3959 | params[2] = FW_PARAM_PFVF(L2T_END); | |
3960 | params[3] = FW_PARAM_PFVF(FILTER_START); | |
3961 | params[4] = FW_PARAM_PFVF(FILTER_END); | |
e46dab4d | 3962 | params[5] = FW_PARAM_PFVF(IQFLINT_START); |
b2612722 | 3963 | ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 6, params, val); |
b8ff05a9 DM |
3964 | if (ret < 0) |
3965 | goto bye; | |
636f9d37 VP |
3966 | adap->sge.egr_start = val[0]; |
3967 | adap->l2t_start = val[1]; | |
3968 | adap->l2t_end = val[2]; | |
b8ff05a9 DM |
3969 | adap->tids.ftid_base = val[3]; |
3970 | adap->tids.nftids = val[4] - val[3] + 1; | |
e46dab4d | 3971 | adap->sge.ingr_start = val[5]; |
b8ff05a9 | 3972 | |
4b8e27a8 HS |
3973 | /* qids (ingress/egress) returned from firmware can be anywhere |
3974 | * in the range from EQ(IQFLINT)_START to EQ(IQFLINT)_END. | |
3975 | * Hence driver needs to allocate memory for this range to | |
3976 | * store the queue info. Get the highest IQFLINT/EQ index returned | |
3977 | * in FW_EQ_*_CMD.alloc command. | |
3978 | */ | |
3979 | params[0] = FW_PARAM_PFVF(EQ_END); | |
3980 | params[1] = FW_PARAM_PFVF(IQFLINT_END); | |
b2612722 | 3981 | ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params, val); |
4b8e27a8 HS |
3982 | if (ret < 0) |
3983 | goto bye; | |
3984 | adap->sge.egr_sz = val[0] - adap->sge.egr_start + 1; | |
3985 | adap->sge.ingr_sz = val[1] - adap->sge.ingr_start + 1; | |
3986 | ||
3987 | adap->sge.egr_map = kcalloc(adap->sge.egr_sz, | |
3988 | sizeof(*adap->sge.egr_map), GFP_KERNEL); | |
3989 | if (!adap->sge.egr_map) { | |
3990 | ret = -ENOMEM; | |
3991 | goto bye; | |
3992 | } | |
3993 | ||
3994 | adap->sge.ingr_map = kcalloc(adap->sge.ingr_sz, | |
3995 | sizeof(*adap->sge.ingr_map), GFP_KERNEL); | |
3996 | if (!adap->sge.ingr_map) { | |
3997 | ret = -ENOMEM; | |
3998 | goto bye; | |
3999 | } | |
4000 | ||
4001 | /* Allocate the memory for the vaious egress queue bitmaps | |
5b377d11 | 4002 | * ie starving_fl, txq_maperr and blocked_fl. |
4b8e27a8 HS |
4003 | */ |
4004 | adap->sge.starving_fl = kcalloc(BITS_TO_LONGS(adap->sge.egr_sz), | |
4005 | sizeof(long), GFP_KERNEL); | |
4006 | if (!adap->sge.starving_fl) { | |
4007 | ret = -ENOMEM; | |
4008 | goto bye; | |
4009 | } | |
4010 | ||
4011 | adap->sge.txq_maperr = kcalloc(BITS_TO_LONGS(adap->sge.egr_sz), | |
4012 | sizeof(long), GFP_KERNEL); | |
4013 | if (!adap->sge.txq_maperr) { | |
4014 | ret = -ENOMEM; | |
4015 | goto bye; | |
4016 | } | |
4017 | ||
5b377d11 HS |
4018 | #ifdef CONFIG_DEBUG_FS |
4019 | adap->sge.blocked_fl = kcalloc(BITS_TO_LONGS(adap->sge.egr_sz), | |
4020 | sizeof(long), GFP_KERNEL); | |
4021 | if (!adap->sge.blocked_fl) { | |
4022 | ret = -ENOMEM; | |
4023 | goto bye; | |
4024 | } | |
4025 | #endif | |
4026 | ||
b5a02f50 AB |
4027 | params[0] = FW_PARAM_PFVF(CLIP_START); |
4028 | params[1] = FW_PARAM_PFVF(CLIP_END); | |
b2612722 | 4029 | ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params, val); |
b5a02f50 AB |
4030 | if (ret < 0) |
4031 | goto bye; | |
4032 | adap->clipt_start = val[0]; | |
4033 | adap->clipt_end = val[1]; | |
4034 | ||
b72a32da RL |
4035 | /* We don't yet have a PARAMs calls to retrieve the number of Traffic |
4036 | * Classes supported by the hardware/firmware so we hard code it here | |
4037 | * for now. | |
4038 | */ | |
4039 | adap->params.nsched_cls = is_t4(adap->params.chip) ? 15 : 16; | |
4040 | ||
636f9d37 VP |
4041 | /* query params related to active filter region */ |
4042 | params[0] = FW_PARAM_PFVF(ACTIVE_FILTER_START); | |
4043 | params[1] = FW_PARAM_PFVF(ACTIVE_FILTER_END); | |
b2612722 | 4044 | ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params, val); |
636f9d37 VP |
4045 | /* If Active filter size is set we enable establishing |
4046 | * offload connection through firmware work request | |
4047 | */ | |
4048 | if ((val[0] != val[1]) && (ret >= 0)) { | |
4049 | adap->flags |= FW_OFLD_CONN; | |
4050 | adap->tids.aftid_base = val[0]; | |
4051 | adap->tids.aftid_end = val[1]; | |
4052 | } | |
4053 | ||
b407a4a9 VP |
4054 | /* If we're running on newer firmware, let it know that we're |
4055 | * prepared to deal with encapsulated CPL messages. Older | |
4056 | * firmware won't understand this and we'll just get | |
4057 | * unencapsulated messages ... | |
4058 | */ | |
4059 | params[0] = FW_PARAM_PFVF(CPLFW4MSG_ENCAP); | |
4060 | val[0] = 1; | |
b2612722 | 4061 | (void)t4_set_params(adap, adap->mbox, adap->pf, 0, 1, params, val); |
b407a4a9 | 4062 | |
1ac0f095 KS |
4063 | /* |
4064 | * Find out whether we're allowed to use the T5+ ULPTX MEMWRITE DSGL | |
4065 | * capability. Earlier versions of the firmware didn't have the | |
4066 | * ULPTX_MEMWRITE_DSGL so we'll interpret a query failure as no | |
4067 | * permission to use ULPTX MEMWRITE DSGL. | |
4068 | */ | |
4069 | if (is_t4(adap->params.chip)) { | |
4070 | adap->params.ulptx_memwrite_dsgl = false; | |
4071 | } else { | |
4072 | params[0] = FW_PARAM_DEV(ULPTX_MEMWRITE_DSGL); | |
b2612722 | 4073 | ret = t4_query_params(adap, adap->mbox, adap->pf, 0, |
1ac0f095 KS |
4074 | 1, params, val); |
4075 | adap->params.ulptx_memwrite_dsgl = (ret == 0 && val[0] != 0); | |
4076 | } | |
4077 | ||
086de575 SW |
4078 | /* See if FW supports FW_RI_FR_NSMR_TPTE_WR work request */ |
4079 | params[0] = FW_PARAM_DEV(RI_FR_NSMR_TPTE_WR); | |
4080 | ret = t4_query_params(adap, adap->mbox, adap->pf, 0, | |
4081 | 1, params, val); | |
4082 | adap->params.fr_nsmr_tpte_wr_support = (ret == 0 && val[0] != 0); | |
4083 | ||
0ff90994 KS |
4084 | /* See if FW supports FW_FILTER2 work request */ |
4085 | if (is_t4(adap->params.chip)) { | |
4086 | adap->params.filter2_wr_support = 0; | |
4087 | } else { | |
4088 | params[0] = FW_PARAM_DEV(FILTER2_WR); | |
4089 | ret = t4_query_params(adap, adap->mbox, adap->pf, 0, | |
4090 | 1, params, val); | |
4091 | adap->params.filter2_wr_support = (ret == 0 && val[0] != 0); | |
4092 | } | |
4093 | ||
636f9d37 VP |
4094 | /* |
4095 | * Get device capabilities so we can determine what resources we need | |
4096 | * to manage. | |
4097 | */ | |
4098 | memset(&caps_cmd, 0, sizeof(caps_cmd)); | |
e2ac9628 HS |
4099 | caps_cmd.op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) | |
4100 | FW_CMD_REQUEST_F | FW_CMD_READ_F); | |
ce91a923 | 4101 | caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd)); |
636f9d37 VP |
4102 | ret = t4_wr_mbox(adap, adap->mbox, &caps_cmd, sizeof(caps_cmd), |
4103 | &caps_cmd); | |
4104 | if (ret < 0) | |
4105 | goto bye; | |
4106 | ||
5c31254e KS |
4107 | if (caps_cmd.ofldcaps || |
4108 | (caps_cmd.niccaps & htons(FW_CAPS_CONFIG_NIC_HASHFILTER))) { | |
b8ff05a9 DM |
4109 | /* query offload-related parameters */ |
4110 | params[0] = FW_PARAM_DEV(NTID); | |
4111 | params[1] = FW_PARAM_PFVF(SERVER_START); | |
4112 | params[2] = FW_PARAM_PFVF(SERVER_END); | |
4113 | params[3] = FW_PARAM_PFVF(TDDP_START); | |
4114 | params[4] = FW_PARAM_PFVF(TDDP_END); | |
4115 | params[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ); | |
b2612722 | 4116 | ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 6, |
636f9d37 | 4117 | params, val); |
b8ff05a9 DM |
4118 | if (ret < 0) |
4119 | goto bye; | |
4120 | adap->tids.ntids = val[0]; | |
4121 | adap->tids.natids = min(adap->tids.ntids / 2, MAX_ATIDS); | |
4122 | adap->tids.stid_base = val[1]; | |
4123 | adap->tids.nstids = val[2] - val[1] + 1; | |
636f9d37 | 4124 | /* |
dbedd44e | 4125 | * Setup server filter region. Divide the available filter |
636f9d37 VP |
4126 | * region into two parts. Regular filters get 1/3rd and server |
4127 | * filters get 2/3rd part. This is only enabled if workarond | |
4128 | * path is enabled. | |
4129 | * 1. For regular filters. | |
4130 | * 2. Server filter: This are special filters which are used | |
4131 | * to redirect SYN packets to offload queue. | |
4132 | */ | |
4133 | if (adap->flags & FW_OFLD_CONN && !is_bypass(adap)) { | |
4134 | adap->tids.sftid_base = adap->tids.ftid_base + | |
4135 | DIV_ROUND_UP(adap->tids.nftids, 3); | |
4136 | adap->tids.nsftids = adap->tids.nftids - | |
4137 | DIV_ROUND_UP(adap->tids.nftids, 3); | |
4138 | adap->tids.nftids = adap->tids.sftid_base - | |
4139 | adap->tids.ftid_base; | |
4140 | } | |
b8ff05a9 DM |
4141 | adap->vres.ddp.start = val[3]; |
4142 | adap->vres.ddp.size = val[4] - val[3] + 1; | |
4143 | adap->params.ofldq_wr_cred = val[5]; | |
636f9d37 | 4144 | |
5c31254e KS |
4145 | if (caps_cmd.niccaps & htons(FW_CAPS_CONFIG_NIC_HASHFILTER)) { |
4146 | if (init_hash_filter(adap) < 0) | |
4147 | goto bye; | |
4148 | } else { | |
4149 | adap->params.offload = 1; | |
4150 | adap->num_ofld_uld += 1; | |
4151 | } | |
b8ff05a9 | 4152 | } |
636f9d37 | 4153 | if (caps_cmd.rdmacaps) { |
b8ff05a9 DM |
4154 | params[0] = FW_PARAM_PFVF(STAG_START); |
4155 | params[1] = FW_PARAM_PFVF(STAG_END); | |
4156 | params[2] = FW_PARAM_PFVF(RQ_START); | |
4157 | params[3] = FW_PARAM_PFVF(RQ_END); | |
4158 | params[4] = FW_PARAM_PFVF(PBL_START); | |
4159 | params[5] = FW_PARAM_PFVF(PBL_END); | |
b2612722 | 4160 | ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 6, |
636f9d37 | 4161 | params, val); |
b8ff05a9 DM |
4162 | if (ret < 0) |
4163 | goto bye; | |
4164 | adap->vres.stag.start = val[0]; | |
4165 | adap->vres.stag.size = val[1] - val[0] + 1; | |
4166 | adap->vres.rq.start = val[2]; | |
4167 | adap->vres.rq.size = val[3] - val[2] + 1; | |
4168 | adap->vres.pbl.start = val[4]; | |
4169 | adap->vres.pbl.size = val[5] - val[4] + 1; | |
a0881cab DM |
4170 | |
4171 | params[0] = FW_PARAM_PFVF(SQRQ_START); | |
4172 | params[1] = FW_PARAM_PFVF(SQRQ_END); | |
4173 | params[2] = FW_PARAM_PFVF(CQ_START); | |
4174 | params[3] = FW_PARAM_PFVF(CQ_END); | |
1ae970e0 DM |
4175 | params[4] = FW_PARAM_PFVF(OCQ_START); |
4176 | params[5] = FW_PARAM_PFVF(OCQ_END); | |
b2612722 | 4177 | ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 6, params, |
5c937dd3 | 4178 | val); |
a0881cab DM |
4179 | if (ret < 0) |
4180 | goto bye; | |
4181 | adap->vres.qp.start = val[0]; | |
4182 | adap->vres.qp.size = val[1] - val[0] + 1; | |
4183 | adap->vres.cq.start = val[2]; | |
4184 | adap->vres.cq.size = val[3] - val[2] + 1; | |
1ae970e0 DM |
4185 | adap->vres.ocq.start = val[4]; |
4186 | adap->vres.ocq.size = val[5] - val[4] + 1; | |
4c2c5763 HS |
4187 | |
4188 | params[0] = FW_PARAM_DEV(MAXORDIRD_QP); | |
4189 | params[1] = FW_PARAM_DEV(MAXIRD_ADAPTER); | |
b2612722 | 4190 | ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params, |
5c937dd3 | 4191 | val); |
4c2c5763 HS |
4192 | if (ret < 0) { |
4193 | adap->params.max_ordird_qp = 8; | |
4194 | adap->params.max_ird_adapter = 32 * adap->tids.ntids; | |
4195 | ret = 0; | |
4196 | } else { | |
4197 | adap->params.max_ordird_qp = val[0]; | |
4198 | adap->params.max_ird_adapter = val[1]; | |
4199 | } | |
4200 | dev_info(adap->pdev_dev, | |
4201 | "max_ordird_qp %d max_ird_adapter %d\n", | |
4202 | adap->params.max_ordird_qp, | |
4203 | adap->params.max_ird_adapter); | |
0fbc81b3 | 4204 | adap->num_ofld_uld += 2; |
b8ff05a9 | 4205 | } |
636f9d37 | 4206 | if (caps_cmd.iscsicaps) { |
b8ff05a9 DM |
4207 | params[0] = FW_PARAM_PFVF(ISCSI_START); |
4208 | params[1] = FW_PARAM_PFVF(ISCSI_END); | |
b2612722 | 4209 | ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, |
636f9d37 | 4210 | params, val); |
b8ff05a9 DM |
4211 | if (ret < 0) |
4212 | goto bye; | |
4213 | adap->vres.iscsi.start = val[0]; | |
4214 | adap->vres.iscsi.size = val[1] - val[0] + 1; | |
0fbc81b3 HS |
4215 | /* LIO target and cxgb4i initiaitor */ |
4216 | adap->num_ofld_uld += 2; | |
b8ff05a9 | 4217 | } |
94cdb8bb HS |
4218 | if (caps_cmd.cryptocaps) { |
4219 | /* Should query params here...TODO */ | |
72a56ca9 HJ |
4220 | params[0] = FW_PARAM_PFVF(NCRYPTO_LOOKASIDE); |
4221 | ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, | |
4222 | params, val); | |
4223 | if (ret < 0) { | |
4224 | if (ret != -EINVAL) | |
4225 | goto bye; | |
4226 | } else { | |
4227 | adap->vres.ncrypto_fc = val[0]; | |
4228 | } | |
94cdb8bb HS |
4229 | adap->params.crypto |= ULP_CRYPTO_LOOKASIDE; |
4230 | adap->num_uld += 1; | |
4231 | } | |
b8ff05a9 DM |
4232 | #undef FW_PARAM_PFVF |
4233 | #undef FW_PARAM_DEV | |
4234 | ||
92e7ae71 HS |
4235 | /* The MTU/MSS Table is initialized by now, so load their values. If |
4236 | * we're initializing the adapter, then we'll make any modifications | |
4237 | * we want to the MTU/MSS Table and also initialize the congestion | |
4238 | * parameters. | |
636f9d37 | 4239 | */ |
b8ff05a9 | 4240 | t4_read_mtu_tbl(adap, adap->params.mtus, NULL); |
92e7ae71 HS |
4241 | if (state != DEV_STATE_INIT) { |
4242 | int i; | |
4243 | ||
4244 | /* The default MTU Table contains values 1492 and 1500. | |
4245 | * However, for TCP, it's better to have two values which are | |
4246 | * a multiple of 8 +/- 4 bytes apart near this popular MTU. | |
4247 | * This allows us to have a TCP Data Payload which is a | |
4248 | * multiple of 8 regardless of what combination of TCP Options | |
4249 | * are in use (always a multiple of 4 bytes) which is | |
4250 | * important for performance reasons. For instance, if no | |
4251 | * options are in use, then we have a 20-byte IP header and a | |
4252 | * 20-byte TCP header. In this case, a 1500-byte MSS would | |
4253 | * result in a TCP Data Payload of 1500 - 40 == 1460 bytes | |
4254 | * which is not a multiple of 8. So using an MSS of 1488 in | |
4255 | * this case results in a TCP Data Payload of 1448 bytes which | |
4256 | * is a multiple of 8. On the other hand, if 12-byte TCP Time | |
4257 | * Stamps have been negotiated, then an MTU of 1500 bytes | |
4258 | * results in a TCP Data Payload of 1448 bytes which, as | |
4259 | * above, is a multiple of 8 bytes ... | |
4260 | */ | |
4261 | for (i = 0; i < NMTUS; i++) | |
4262 | if (adap->params.mtus[i] == 1492) { | |
4263 | adap->params.mtus[i] = 1488; | |
4264 | break; | |
4265 | } | |
7ee9ff94 | 4266 | |
92e7ae71 HS |
4267 | t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd, |
4268 | adap->params.b_wnd); | |
4269 | } | |
df64e4d3 | 4270 | t4_init_sge_params(adap); |
636f9d37 | 4271 | adap->flags |= FW_OK; |
5ccf9d04 | 4272 | t4_init_tp_params(adap, true); |
b8ff05a9 DM |
4273 | return 0; |
4274 | ||
4275 | /* | |
636f9d37 VP |
4276 | * Something bad happened. If a command timed out or failed with EIO |
4277 | * FW does not operate within its spec or something catastrophic | |
4278 | * happened to HW/FW, stop issuing commands. | |
b8ff05a9 | 4279 | */ |
636f9d37 | 4280 | bye: |
4b8e27a8 HS |
4281 | kfree(adap->sge.egr_map); |
4282 | kfree(adap->sge.ingr_map); | |
4283 | kfree(adap->sge.starving_fl); | |
4284 | kfree(adap->sge.txq_maperr); | |
5b377d11 HS |
4285 | #ifdef CONFIG_DEBUG_FS |
4286 | kfree(adap->sge.blocked_fl); | |
4287 | #endif | |
636f9d37 VP |
4288 | if (ret != -ETIMEDOUT && ret != -EIO) |
4289 | t4_fw_bye(adap, adap->mbox); | |
b8ff05a9 DM |
4290 | return ret; |
4291 | } | |
4292 | ||
204dc3c0 DM |
4293 | /* EEH callbacks */ |
4294 | ||
4295 | static pci_ers_result_t eeh_err_detected(struct pci_dev *pdev, | |
4296 | pci_channel_state_t state) | |
4297 | { | |
4298 | int i; | |
4299 | struct adapter *adap = pci_get_drvdata(pdev); | |
4300 | ||
4301 | if (!adap) | |
4302 | goto out; | |
4303 | ||
4304 | rtnl_lock(); | |
4305 | adap->flags &= ~FW_OK; | |
4306 | notify_ulds(adap, CXGB4_STATE_START_RECOVERY); | |
9fe6cb58 | 4307 | spin_lock(&adap->stats_lock); |
204dc3c0 DM |
4308 | for_each_port(adap, i) { |
4309 | struct net_device *dev = adap->port[i]; | |
025d0973 GP |
4310 | if (dev) { |
4311 | netif_device_detach(dev); | |
4312 | netif_carrier_off(dev); | |
4313 | } | |
204dc3c0 | 4314 | } |
9fe6cb58 | 4315 | spin_unlock(&adap->stats_lock); |
b37987e8 | 4316 | disable_interrupts(adap); |
204dc3c0 DM |
4317 | if (adap->flags & FULL_INIT_DONE) |
4318 | cxgb_down(adap); | |
4319 | rtnl_unlock(); | |
144be3d9 GS |
4320 | if ((adap->flags & DEV_ENABLED)) { |
4321 | pci_disable_device(pdev); | |
4322 | adap->flags &= ~DEV_ENABLED; | |
4323 | } | |
204dc3c0 DM |
4324 | out: return state == pci_channel_io_perm_failure ? |
4325 | PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET; | |
4326 | } | |
4327 | ||
4328 | static pci_ers_result_t eeh_slot_reset(struct pci_dev *pdev) | |
4329 | { | |
4330 | int i, ret; | |
4331 | struct fw_caps_config_cmd c; | |
4332 | struct adapter *adap = pci_get_drvdata(pdev); | |
4333 | ||
4334 | if (!adap) { | |
4335 | pci_restore_state(pdev); | |
4336 | pci_save_state(pdev); | |
4337 | return PCI_ERS_RESULT_RECOVERED; | |
4338 | } | |
4339 | ||
144be3d9 GS |
4340 | if (!(adap->flags & DEV_ENABLED)) { |
4341 | if (pci_enable_device(pdev)) { | |
4342 | dev_err(&pdev->dev, "Cannot reenable PCI " | |
4343 | "device after reset\n"); | |
4344 | return PCI_ERS_RESULT_DISCONNECT; | |
4345 | } | |
4346 | adap->flags |= DEV_ENABLED; | |
204dc3c0 DM |
4347 | } |
4348 | ||
4349 | pci_set_master(pdev); | |
4350 | pci_restore_state(pdev); | |
4351 | pci_save_state(pdev); | |
4352 | pci_cleanup_aer_uncorrect_error_status(pdev); | |
4353 | ||
8203b509 | 4354 | if (t4_wait_dev_ready(adap->regs) < 0) |
204dc3c0 | 4355 | return PCI_ERS_RESULT_DISCONNECT; |
b2612722 | 4356 | if (t4_fw_hello(adap, adap->mbox, adap->pf, MASTER_MUST, NULL) < 0) |
204dc3c0 DM |
4357 | return PCI_ERS_RESULT_DISCONNECT; |
4358 | adap->flags |= FW_OK; | |
4359 | if (adap_init1(adap, &c)) | |
4360 | return PCI_ERS_RESULT_DISCONNECT; | |
4361 | ||
4362 | for_each_port(adap, i) { | |
4363 | struct port_info *p = adap2pinfo(adap, i); | |
4364 | ||
b2612722 | 4365 | ret = t4_alloc_vi(adap, adap->mbox, p->tx_chan, adap->pf, 0, 1, |
060e0c75 | 4366 | NULL, NULL); |
204dc3c0 DM |
4367 | if (ret < 0) |
4368 | return PCI_ERS_RESULT_DISCONNECT; | |
4369 | p->viid = ret; | |
4370 | p->xact_addr_filt = -1; | |
4371 | } | |
4372 | ||
4373 | t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd, | |
4374 | adap->params.b_wnd); | |
1ae970e0 | 4375 | setup_memwin(adap); |
204dc3c0 DM |
4376 | if (cxgb_up(adap)) |
4377 | return PCI_ERS_RESULT_DISCONNECT; | |
4378 | return PCI_ERS_RESULT_RECOVERED; | |
4379 | } | |
4380 | ||
4381 | static void eeh_resume(struct pci_dev *pdev) | |
4382 | { | |
4383 | int i; | |
4384 | struct adapter *adap = pci_get_drvdata(pdev); | |
4385 | ||
4386 | if (!adap) | |
4387 | return; | |
4388 | ||
4389 | rtnl_lock(); | |
4390 | for_each_port(adap, i) { | |
4391 | struct net_device *dev = adap->port[i]; | |
025d0973 GP |
4392 | if (dev) { |
4393 | if (netif_running(dev)) { | |
4394 | link_start(dev); | |
4395 | cxgb_set_rxmode(dev); | |
4396 | } | |
4397 | netif_device_attach(dev); | |
204dc3c0 | 4398 | } |
204dc3c0 DM |
4399 | } |
4400 | rtnl_unlock(); | |
4401 | } | |
4402 | ||
3646f0e5 | 4403 | static const struct pci_error_handlers cxgb4_eeh = { |
204dc3c0 DM |
4404 | .error_detected = eeh_err_detected, |
4405 | .slot_reset = eeh_slot_reset, | |
4406 | .resume = eeh_resume, | |
4407 | }; | |
4408 | ||
9b86a8d1 HS |
4409 | /* Return true if the Link Configuration supports "High Speeds" (those greater |
4410 | * than 1Gb/s). | |
4411 | */ | |
57d8b764 | 4412 | static inline bool is_x_10g_port(const struct link_config *lc) |
b8ff05a9 | 4413 | { |
9b86a8d1 HS |
4414 | unsigned int speeds, high_speeds; |
4415 | ||
c3168cab GG |
4416 | speeds = FW_PORT_CAP32_SPEED_V(FW_PORT_CAP32_SPEED_G(lc->pcaps)); |
4417 | high_speeds = speeds & | |
4418 | ~(FW_PORT_CAP32_SPEED_100M | FW_PORT_CAP32_SPEED_1G); | |
9b86a8d1 HS |
4419 | |
4420 | return high_speeds != 0; | |
b8ff05a9 DM |
4421 | } |
4422 | ||
b8ff05a9 DM |
4423 | /* |
4424 | * Perform default configuration of DMA queues depending on the number and type | |
4425 | * of ports we found and the number of available CPUs. Most settings can be | |
4426 | * modified by the admin prior to actual use. | |
4427 | */ | |
91744948 | 4428 | static void cfg_queues(struct adapter *adap) |
b8ff05a9 DM |
4429 | { |
4430 | struct sge *s = &adap->sge; | |
ab677ff4 | 4431 | int i = 0, n10g = 0, qidx = 0; |
688848b1 AB |
4432 | #ifndef CONFIG_CHELSIO_T4_DCB |
4433 | int q10g = 0; | |
4434 | #endif | |
b8ff05a9 | 4435 | |
94cdb8bb HS |
4436 | /* Reduce memory usage in kdump environment, disable all offload. |
4437 | */ | |
85eacf3f | 4438 | if (is_kdump_kernel() || (is_uld(adap) && t4_uld_mem_alloc(adap))) { |
0fbc81b3 | 4439 | adap->params.offload = 0; |
94cdb8bb HS |
4440 | adap->params.crypto = 0; |
4441 | } | |
4442 | ||
ab677ff4 | 4443 | n10g += is_x_10g_port(&adap2pinfo(adap, i)->link_cfg); |
688848b1 AB |
4444 | #ifdef CONFIG_CHELSIO_T4_DCB |
4445 | /* For Data Center Bridging support we need to be able to support up | |
4446 | * to 8 Traffic Priorities; each of which will be assigned to its | |
4447 | * own TX Queue in order to prevent Head-Of-Line Blocking. | |
4448 | */ | |
4449 | if (adap->params.nports * 8 > MAX_ETH_QSETS) { | |
4450 | dev_err(adap->pdev_dev, "MAX_ETH_QSETS=%d < %d!\n", | |
4451 | MAX_ETH_QSETS, adap->params.nports * 8); | |
4452 | BUG_ON(1); | |
4453 | } | |
b8ff05a9 | 4454 | |
688848b1 AB |
4455 | for_each_port(adap, i) { |
4456 | struct port_info *pi = adap2pinfo(adap, i); | |
4457 | ||
4458 | pi->first_qset = qidx; | |
85eacf3f | 4459 | pi->nqsets = is_kdump_kernel() ? 1 : 8; |
688848b1 AB |
4460 | qidx += pi->nqsets; |
4461 | } | |
4462 | #else /* !CONFIG_CHELSIO_T4_DCB */ | |
b8ff05a9 DM |
4463 | /* |
4464 | * We default to 1 queue per non-10G port and up to # of cores queues | |
4465 | * per 10G port. | |
4466 | */ | |
4467 | if (n10g) | |
4468 | q10g = (MAX_ETH_QSETS - (adap->params.nports - n10g)) / n10g; | |
5952dde7 YM |
4469 | if (q10g > netif_get_num_default_rss_queues()) |
4470 | q10g = netif_get_num_default_rss_queues(); | |
b8ff05a9 | 4471 | |
85eacf3f GG |
4472 | if (is_kdump_kernel()) |
4473 | q10g = 1; | |
4474 | ||
b8ff05a9 DM |
4475 | for_each_port(adap, i) { |
4476 | struct port_info *pi = adap2pinfo(adap, i); | |
4477 | ||
4478 | pi->first_qset = qidx; | |
57d8b764 | 4479 | pi->nqsets = is_x_10g_port(&pi->link_cfg) ? q10g : 1; |
b8ff05a9 DM |
4480 | qidx += pi->nqsets; |
4481 | } | |
688848b1 | 4482 | #endif /* !CONFIG_CHELSIO_T4_DCB */ |
b8ff05a9 DM |
4483 | |
4484 | s->ethqsets = qidx; | |
4485 | s->max_ethqsets = qidx; /* MSI-X may lower it later */ | |
4486 | ||
0fbc81b3 | 4487 | if (is_uld(adap)) { |
b8ff05a9 DM |
4488 | /* |
4489 | * For offload we use 1 queue/channel if all ports are up to 1G, | |
4490 | * otherwise we divide all available queues amongst the channels | |
4491 | * capped by the number of available cores. | |
4492 | */ | |
4493 | if (n10g) { | |
a56177e1 | 4494 | i = min_t(int, MAX_OFLD_QSETS, num_online_cpus()); |
0fbc81b3 HS |
4495 | s->ofldqsets = roundup(i, adap->params.nports); |
4496 | } else { | |
4497 | s->ofldqsets = adap->params.nports; | |
4498 | } | |
b8ff05a9 DM |
4499 | } |
4500 | ||
4501 | for (i = 0; i < ARRAY_SIZE(s->ethrxq); i++) { | |
4502 | struct sge_eth_rxq *r = &s->ethrxq[i]; | |
4503 | ||
c887ad0e | 4504 | init_rspq(adap, &r->rspq, 5, 10, 1024, 64); |
b8ff05a9 DM |
4505 | r->fl.size = 72; |
4506 | } | |
4507 | ||
4508 | for (i = 0; i < ARRAY_SIZE(s->ethtxq); i++) | |
4509 | s->ethtxq[i].q.size = 1024; | |
4510 | ||
4511 | for (i = 0; i < ARRAY_SIZE(s->ctrlq); i++) | |
4512 | s->ctrlq[i].q.size = 512; | |
4513 | ||
a4569504 AG |
4514 | if (!is_t4(adap->params.chip)) |
4515 | s->ptptxq.q.size = 8; | |
4516 | ||
c887ad0e | 4517 | init_rspq(adap, &s->fw_evtq, 0, 1, 1024, 64); |
0fbc81b3 | 4518 | init_rspq(adap, &s->intrq, 0, 1, 512, 64); |
b8ff05a9 DM |
4519 | } |
4520 | ||
4521 | /* | |
4522 | * Reduce the number of Ethernet queues across all ports to at most n. | |
4523 | * n provides at least one queue per port. | |
4524 | */ | |
91744948 | 4525 | static void reduce_ethqs(struct adapter *adap, int n) |
b8ff05a9 DM |
4526 | { |
4527 | int i; | |
4528 | struct port_info *pi; | |
4529 | ||
4530 | while (n < adap->sge.ethqsets) | |
4531 | for_each_port(adap, i) { | |
4532 | pi = adap2pinfo(adap, i); | |
4533 | if (pi->nqsets > 1) { | |
4534 | pi->nqsets--; | |
4535 | adap->sge.ethqsets--; | |
4536 | if (adap->sge.ethqsets <= n) | |
4537 | break; | |
4538 | } | |
4539 | } | |
4540 | ||
4541 | n = 0; | |
4542 | for_each_port(adap, i) { | |
4543 | pi = adap2pinfo(adap, i); | |
4544 | pi->first_qset = n; | |
4545 | n += pi->nqsets; | |
4546 | } | |
4547 | } | |
4548 | ||
94cdb8bb HS |
4549 | static int get_msix_info(struct adapter *adap) |
4550 | { | |
4551 | struct uld_msix_info *msix_info; | |
0fbc81b3 HS |
4552 | unsigned int max_ingq = 0; |
4553 | ||
4554 | if (is_offload(adap)) | |
4555 | max_ingq += MAX_OFLD_QSETS * adap->num_ofld_uld; | |
4556 | if (is_pci_uld(adap)) | |
4557 | max_ingq += MAX_OFLD_QSETS * adap->num_uld; | |
4558 | ||
4559 | if (!max_ingq) | |
4560 | goto out; | |
94cdb8bb HS |
4561 | |
4562 | msix_info = kcalloc(max_ingq, sizeof(*msix_info), GFP_KERNEL); | |
4563 | if (!msix_info) | |
4564 | return -ENOMEM; | |
4565 | ||
4566 | adap->msix_bmap_ulds.msix_bmap = kcalloc(BITS_TO_LONGS(max_ingq), | |
4567 | sizeof(long), GFP_KERNEL); | |
4568 | if (!adap->msix_bmap_ulds.msix_bmap) { | |
4569 | kfree(msix_info); | |
4570 | return -ENOMEM; | |
4571 | } | |
4572 | spin_lock_init(&adap->msix_bmap_ulds.lock); | |
4573 | adap->msix_info_ulds = msix_info; | |
0fbc81b3 | 4574 | out: |
94cdb8bb HS |
4575 | return 0; |
4576 | } | |
4577 | ||
4578 | static void free_msix_info(struct adapter *adap) | |
4579 | { | |
0fbc81b3 | 4580 | if (!(adap->num_uld && adap->num_ofld_uld)) |
94cdb8bb HS |
4581 | return; |
4582 | ||
4583 | kfree(adap->msix_info_ulds); | |
4584 | kfree(adap->msix_bmap_ulds.msix_bmap); | |
4585 | } | |
4586 | ||
b8ff05a9 DM |
4587 | /* 2 MSI-X vectors needed for the FW queue and non-data interrupts */ |
4588 | #define EXTRA_VECS 2 | |
4589 | ||
91744948 | 4590 | static int enable_msix(struct adapter *adap) |
b8ff05a9 | 4591 | { |
94cdb8bb HS |
4592 | int ofld_need = 0, uld_need = 0; |
4593 | int i, j, want, need, allocated; | |
b8ff05a9 DM |
4594 | struct sge *s = &adap->sge; |
4595 | unsigned int nchan = adap->params.nports; | |
f36e58e5 | 4596 | struct msix_entry *entries; |
94cdb8bb | 4597 | int max_ingq = MAX_INGQ; |
f36e58e5 | 4598 | |
0fbc81b3 HS |
4599 | if (is_pci_uld(adap)) |
4600 | max_ingq += (MAX_OFLD_QSETS * adap->num_uld); | |
4601 | if (is_offload(adap)) | |
4602 | max_ingq += (MAX_OFLD_QSETS * adap->num_ofld_uld); | |
94cdb8bb | 4603 | entries = kmalloc(sizeof(*entries) * (max_ingq + 1), |
f36e58e5 HS |
4604 | GFP_KERNEL); |
4605 | if (!entries) | |
4606 | return -ENOMEM; | |
b8ff05a9 | 4607 | |
94cdb8bb | 4608 | /* map for msix */ |
0fbc81b3 HS |
4609 | if (get_msix_info(adap)) { |
4610 | adap->params.offload = 0; | |
94cdb8bb | 4611 | adap->params.crypto = 0; |
0fbc81b3 | 4612 | } |
94cdb8bb HS |
4613 | |
4614 | for (i = 0; i < max_ingq + 1; ++i) | |
b8ff05a9 DM |
4615 | entries[i].entry = i; |
4616 | ||
4617 | want = s->max_ethqsets + EXTRA_VECS; | |
4618 | if (is_offload(adap)) { | |
0fbc81b3 HS |
4619 | want += adap->num_ofld_uld * s->ofldqsets; |
4620 | ofld_need = adap->num_ofld_uld * nchan; | |
b8ff05a9 | 4621 | } |
94cdb8bb | 4622 | if (is_pci_uld(adap)) { |
0fbc81b3 HS |
4623 | want += adap->num_uld * s->ofldqsets; |
4624 | uld_need = adap->num_uld * nchan; | |
94cdb8bb | 4625 | } |
688848b1 AB |
4626 | #ifdef CONFIG_CHELSIO_T4_DCB |
4627 | /* For Data Center Bridging we need 8 Ethernet TX Priority Queues for | |
4628 | * each port. | |
4629 | */ | |
94cdb8bb | 4630 | need = 8 * adap->params.nports + EXTRA_VECS + ofld_need + uld_need; |
688848b1 | 4631 | #else |
94cdb8bb | 4632 | need = adap->params.nports + EXTRA_VECS + ofld_need + uld_need; |
688848b1 | 4633 | #endif |
f36e58e5 HS |
4634 | allocated = pci_enable_msix_range(adap->pdev, entries, need, want); |
4635 | if (allocated < 0) { | |
4636 | dev_info(adap->pdev_dev, "not enough MSI-X vectors left," | |
4637 | " not using MSI-X\n"); | |
4638 | kfree(entries); | |
4639 | return allocated; | |
4640 | } | |
b8ff05a9 | 4641 | |
f36e58e5 | 4642 | /* Distribute available vectors to the various queue groups. |
c32ad224 AG |
4643 | * Every group gets its minimum requirement and NIC gets top |
4644 | * priority for leftovers. | |
4645 | */ | |
94cdb8bb | 4646 | i = allocated - EXTRA_VECS - ofld_need - uld_need; |
c32ad224 AG |
4647 | if (i < s->max_ethqsets) { |
4648 | s->max_ethqsets = i; | |
4649 | if (i < s->ethqsets) | |
4650 | reduce_ethqs(adap, i); | |
4651 | } | |
0fbc81b3 | 4652 | if (is_uld(adap)) { |
94cdb8bb HS |
4653 | if (allocated < want) |
4654 | s->nqs_per_uld = nchan; | |
4655 | else | |
0fbc81b3 | 4656 | s->nqs_per_uld = s->ofldqsets; |
94cdb8bb HS |
4657 | } |
4658 | ||
0fbc81b3 | 4659 | for (i = 0; i < (s->max_ethqsets + EXTRA_VECS); ++i) |
c32ad224 | 4660 | adap->msix_info[i].vec = entries[i].vector; |
0fbc81b3 HS |
4661 | if (is_uld(adap)) { |
4662 | for (j = 0 ; i < allocated; ++i, j++) { | |
94cdb8bb | 4663 | adap->msix_info_ulds[j].vec = entries[i].vector; |
0fbc81b3 HS |
4664 | adap->msix_info_ulds[j].idx = i; |
4665 | } | |
94cdb8bb HS |
4666 | adap->msix_bmap_ulds.mapsize = j; |
4667 | } | |
43eb4e82 | 4668 | dev_info(adap->pdev_dev, "%d MSI-X vectors allocated, " |
0fbc81b3 HS |
4669 | "nic %d per uld %d\n", |
4670 | allocated, s->max_ethqsets, s->nqs_per_uld); | |
c32ad224 | 4671 | |
f36e58e5 | 4672 | kfree(entries); |
c32ad224 | 4673 | return 0; |
b8ff05a9 DM |
4674 | } |
4675 | ||
4676 | #undef EXTRA_VECS | |
4677 | ||
91744948 | 4678 | static int init_rss(struct adapter *adap) |
671b0060 | 4679 | { |
c035e183 HS |
4680 | unsigned int i; |
4681 | int err; | |
4682 | ||
4683 | err = t4_init_rss_mode(adap, adap->mbox); | |
4684 | if (err) | |
4685 | return err; | |
671b0060 DM |
4686 | |
4687 | for_each_port(adap, i) { | |
4688 | struct port_info *pi = adap2pinfo(adap, i); | |
4689 | ||
4690 | pi->rss = kcalloc(pi->rss_size, sizeof(u16), GFP_KERNEL); | |
4691 | if (!pi->rss) | |
4692 | return -ENOMEM; | |
671b0060 DM |
4693 | } |
4694 | return 0; | |
4695 | } | |
4696 | ||
547fd272 HS |
4697 | static int cxgb4_get_pcie_dev_link_caps(struct adapter *adap, |
4698 | enum pci_bus_speed *speed, | |
4699 | enum pcie_link_width *width) | |
4700 | { | |
4701 | u32 lnkcap1, lnkcap2; | |
4702 | int err1, err2; | |
4703 | ||
4704 | #define PCIE_MLW_CAP_SHIFT 4 /* start of MLW mask in link capabilities */ | |
4705 | ||
4706 | *speed = PCI_SPEED_UNKNOWN; | |
4707 | *width = PCIE_LNK_WIDTH_UNKNOWN; | |
4708 | ||
4709 | err1 = pcie_capability_read_dword(adap->pdev, PCI_EXP_LNKCAP, | |
4710 | &lnkcap1); | |
4711 | err2 = pcie_capability_read_dword(adap->pdev, PCI_EXP_LNKCAP2, | |
4712 | &lnkcap2); | |
4713 | if (!err2 && lnkcap2) { /* PCIe r3.0-compliant */ | |
4714 | if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_8_0GB) | |
4715 | *speed = PCIE_SPEED_8_0GT; | |
4716 | else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_5_0GB) | |
4717 | *speed = PCIE_SPEED_5_0GT; | |
4718 | else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_2_5GB) | |
4719 | *speed = PCIE_SPEED_2_5GT; | |
4720 | } | |
4721 | if (!err1) { | |
4722 | *width = (lnkcap1 & PCI_EXP_LNKCAP_MLW) >> PCIE_MLW_CAP_SHIFT; | |
4723 | if (!lnkcap2) { /* pre-r3.0 */ | |
4724 | if (lnkcap1 & PCI_EXP_LNKCAP_SLS_5_0GB) | |
4725 | *speed = PCIE_SPEED_5_0GT; | |
4726 | else if (lnkcap1 & PCI_EXP_LNKCAP_SLS_2_5GB) | |
4727 | *speed = PCIE_SPEED_2_5GT; | |
4728 | } | |
4729 | } | |
4730 | ||
4731 | if (*speed == PCI_SPEED_UNKNOWN || *width == PCIE_LNK_WIDTH_UNKNOWN) | |
4732 | return err1 ? err1 : err2 ? err2 : -EINVAL; | |
4733 | return 0; | |
4734 | } | |
4735 | ||
4736 | static void cxgb4_check_pcie_caps(struct adapter *adap) | |
4737 | { | |
4738 | enum pcie_link_width width, width_cap; | |
4739 | enum pci_bus_speed speed, speed_cap; | |
4740 | ||
4741 | #define PCIE_SPEED_STR(speed) \ | |
4742 | (speed == PCIE_SPEED_8_0GT ? "8.0GT/s" : \ | |
4743 | speed == PCIE_SPEED_5_0GT ? "5.0GT/s" : \ | |
4744 | speed == PCIE_SPEED_2_5GT ? "2.5GT/s" : \ | |
4745 | "Unknown") | |
4746 | ||
4747 | if (cxgb4_get_pcie_dev_link_caps(adap, &speed_cap, &width_cap)) { | |
4748 | dev_warn(adap->pdev_dev, | |
4749 | "Unable to determine PCIe device BW capabilities\n"); | |
4750 | return; | |
4751 | } | |
4752 | ||
4753 | if (pcie_get_minimum_link(adap->pdev, &speed, &width) || | |
4754 | speed == PCI_SPEED_UNKNOWN || width == PCIE_LNK_WIDTH_UNKNOWN) { | |
4755 | dev_warn(adap->pdev_dev, | |
4756 | "Unable to determine PCI Express bandwidth.\n"); | |
4757 | return; | |
4758 | } | |
4759 | ||
4760 | dev_info(adap->pdev_dev, "PCIe link speed is %s, device supports %s\n", | |
4761 | PCIE_SPEED_STR(speed), PCIE_SPEED_STR(speed_cap)); | |
4762 | dev_info(adap->pdev_dev, "PCIe link width is x%d, device supports x%d\n", | |
4763 | width, width_cap); | |
4764 | if (speed < speed_cap || width < width_cap) | |
4765 | dev_info(adap->pdev_dev, | |
4766 | "A slot with more lanes and/or higher speed is " | |
4767 | "suggested for optimal performance.\n"); | |
4768 | } | |
4769 | ||
0de72738 HS |
4770 | /* Dump basic information about the adapter */ |
4771 | static void print_adapter_info(struct adapter *adapter) | |
4772 | { | |
760446f9 GG |
4773 | /* Hardware/Firmware/etc. Version/Revision IDs */ |
4774 | t4_dump_version_info(adapter); | |
0de72738 HS |
4775 | |
4776 | /* Software/Hardware configuration */ | |
4777 | dev_info(adapter->pdev_dev, "Configuration: %sNIC %s, %s capable\n", | |
4778 | is_offload(adapter) ? "R" : "", | |
4779 | ((adapter->flags & USING_MSIX) ? "MSI-X" : | |
4780 | (adapter->flags & USING_MSI) ? "MSI" : ""), | |
4781 | is_offload(adapter) ? "Offload" : "non-Offload"); | |
4782 | } | |
4783 | ||
91744948 | 4784 | static void print_port_info(const struct net_device *dev) |
b8ff05a9 | 4785 | { |
b8ff05a9 | 4786 | char buf[80]; |
118969ed | 4787 | char *bufp = buf; |
f1a051b9 | 4788 | const char *spd = ""; |
118969ed DM |
4789 | const struct port_info *pi = netdev_priv(dev); |
4790 | const struct adapter *adap = pi->adapter; | |
f1a051b9 DM |
4791 | |
4792 | if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_2_5GB) | |
4793 | spd = " 2.5 GT/s"; | |
4794 | else if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_5_0GB) | |
4795 | spd = " 5 GT/s"; | |
d2e752db RD |
4796 | else if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_8_0GB) |
4797 | spd = " 8 GT/s"; | |
b8ff05a9 | 4798 | |
c3168cab | 4799 | if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_100M) |
5e78f7fd | 4800 | bufp += sprintf(bufp, "100M/"); |
c3168cab | 4801 | if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_1G) |
5e78f7fd | 4802 | bufp += sprintf(bufp, "1G/"); |
c3168cab | 4803 | if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_10G) |
118969ed | 4804 | bufp += sprintf(bufp, "10G/"); |
c3168cab | 4805 | if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_25G) |
9b86a8d1 | 4806 | bufp += sprintf(bufp, "25G/"); |
c3168cab | 4807 | if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_40G) |
72aca4bf | 4808 | bufp += sprintf(bufp, "40G/"); |
c3168cab GG |
4809 | if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_50G) |
4810 | bufp += sprintf(bufp, "50G/"); | |
4811 | if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_100G) | |
9b86a8d1 | 4812 | bufp += sprintf(bufp, "100G/"); |
c3168cab GG |
4813 | if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_200G) |
4814 | bufp += sprintf(bufp, "200G/"); | |
4815 | if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_400G) | |
4816 | bufp += sprintf(bufp, "400G/"); | |
118969ed DM |
4817 | if (bufp != buf) |
4818 | --bufp; | |
72aca4bf | 4819 | sprintf(bufp, "BASE-%s", t4_get_port_type_description(pi->port_type)); |
118969ed | 4820 | |
0de72738 HS |
4821 | netdev_info(dev, "%s: Chelsio %s (%s) %s\n", |
4822 | dev->name, adap->params.vpd.id, adap->name, buf); | |
b8ff05a9 DM |
4823 | } |
4824 | ||
06546391 DM |
4825 | /* |
4826 | * Free the following resources: | |
4827 | * - memory used for tables | |
4828 | * - MSI/MSI-X | |
4829 | * - net devices | |
4830 | * - resources FW is holding for us | |
4831 | */ | |
4832 | static void free_some_resources(struct adapter *adapter) | |
4833 | { | |
4834 | unsigned int i; | |
4835 | ||
3bdb376e | 4836 | kvfree(adapter->smt); |
752ade68 | 4837 | kvfree(adapter->l2t); |
b72a32da | 4838 | t4_cleanup_sched(adapter); |
752ade68 | 4839 | kvfree(adapter->tids.tid_tab); |
e0f911c8 | 4840 | cxgb4_cleanup_tc_flower(adapter); |
d8931847 | 4841 | cxgb4_cleanup_tc_u32(adapter); |
4b8e27a8 HS |
4842 | kfree(adapter->sge.egr_map); |
4843 | kfree(adapter->sge.ingr_map); | |
4844 | kfree(adapter->sge.starving_fl); | |
4845 | kfree(adapter->sge.txq_maperr); | |
5b377d11 HS |
4846 | #ifdef CONFIG_DEBUG_FS |
4847 | kfree(adapter->sge.blocked_fl); | |
4848 | #endif | |
06546391 DM |
4849 | disable_msi(adapter); |
4850 | ||
4851 | for_each_port(adapter, i) | |
671b0060 | 4852 | if (adapter->port[i]) { |
4f3a0fcf HS |
4853 | struct port_info *pi = adap2pinfo(adapter, i); |
4854 | ||
4855 | if (pi->viid != 0) | |
4856 | t4_free_vi(adapter, adapter->mbox, adapter->pf, | |
4857 | 0, pi->viid); | |
671b0060 | 4858 | kfree(adap2pinfo(adapter, i)->rss); |
06546391 | 4859 | free_netdev(adapter->port[i]); |
671b0060 | 4860 | } |
06546391 | 4861 | if (adapter->flags & FW_OK) |
b2612722 | 4862 | t4_fw_bye(adapter, adapter->pf); |
06546391 DM |
4863 | } |
4864 | ||
2ed28baa | 4865 | #define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN) |
35d35682 | 4866 | #define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \ |
b8ff05a9 | 4867 | NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA) |
22adfe0a | 4868 | #define SEGMENT_SIZE 128 |
b8ff05a9 | 4869 | |
d86bd29e HS |
4870 | static int get_chip_type(struct pci_dev *pdev, u32 pl_rev) |
4871 | { | |
d86bd29e HS |
4872 | u16 device_id; |
4873 | ||
4874 | /* Retrieve adapter's device ID */ | |
4875 | pci_read_config_word(pdev, PCI_DEVICE_ID, &device_id); | |
46cdc9be | 4876 | |
4877 | switch (device_id >> 12) { | |
d86bd29e | 4878 | case CHELSIO_T4: |
46cdc9be | 4879 | return CHELSIO_CHIP_CODE(CHELSIO_T4, pl_rev); |
d86bd29e | 4880 | case CHELSIO_T5: |
46cdc9be | 4881 | return CHELSIO_CHIP_CODE(CHELSIO_T5, pl_rev); |
d86bd29e | 4882 | case CHELSIO_T6: |
46cdc9be | 4883 | return CHELSIO_CHIP_CODE(CHELSIO_T6, pl_rev); |
d86bd29e HS |
4884 | default: |
4885 | dev_err(&pdev->dev, "Device %d is not supported\n", | |
4886 | device_id); | |
d86bd29e | 4887 | } |
46cdc9be | 4888 | return -EINVAL; |
d86bd29e HS |
4889 | } |
4890 | ||
b6244201 | 4891 | #ifdef CONFIG_PCI_IOV |
e7b48a32 HS |
4892 | static void dummy_setup(struct net_device *dev) |
4893 | { | |
4894 | dev->type = ARPHRD_NONE; | |
4895 | dev->mtu = 0; | |
4896 | dev->hard_header_len = 0; | |
4897 | dev->addr_len = 0; | |
4898 | dev->tx_queue_len = 0; | |
4899 | dev->flags |= IFF_NOARP; | |
4900 | dev->priv_flags |= IFF_NO_QUEUE; | |
4901 | ||
4902 | /* Initialize the device structure. */ | |
4903 | dev->netdev_ops = &cxgb4_mgmt_netdev_ops; | |
4904 | dev->ethtool_ops = &cxgb4_mgmt_ethtool_ops; | |
cf124db5 | 4905 | dev->needs_free_netdev = true; |
e7b48a32 HS |
4906 | } |
4907 | ||
4908 | static int config_mgmt_dev(struct pci_dev *pdev) | |
4909 | { | |
4910 | struct adapter *adap = pci_get_drvdata(pdev); | |
4911 | struct net_device *netdev; | |
4912 | struct port_info *pi; | |
4913 | char name[IFNAMSIZ]; | |
4914 | int err; | |
4915 | ||
4916 | snprintf(name, IFNAMSIZ, "mgmtpf%d%d", adap->adap_idx, adap->pf); | |
038c35a8 GG |
4917 | netdev = alloc_netdev(sizeof(struct port_info), name, NET_NAME_UNKNOWN, |
4918 | dummy_setup); | |
e7b48a32 HS |
4919 | if (!netdev) |
4920 | return -ENOMEM; | |
4921 | ||
4922 | pi = netdev_priv(netdev); | |
4923 | pi->adapter = adap; | |
c3168cab | 4924 | pi->tx_chan = adap->pf % adap->params.nports; |
e7b48a32 HS |
4925 | SET_NETDEV_DEV(netdev, &pdev->dev); |
4926 | ||
4927 | adap->port[0] = netdev; | |
c3168cab | 4928 | pi->port_id = 0; |
e7b48a32 HS |
4929 | |
4930 | err = register_netdev(adap->port[0]); | |
4931 | if (err) { | |
4932 | pr_info("Unable to register VF mgmt netdev %s\n", name); | |
4933 | free_netdev(adap->port[0]); | |
4934 | adap->port[0] = NULL; | |
4935 | return err; | |
4936 | } | |
4937 | return 0; | |
4938 | } | |
4939 | ||
b6244201 HS |
4940 | static int cxgb4_iov_configure(struct pci_dev *pdev, int num_vfs) |
4941 | { | |
7829451c | 4942 | struct adapter *adap = pci_get_drvdata(pdev); |
b6244201 HS |
4943 | int err = 0; |
4944 | int current_vfs = pci_num_vf(pdev); | |
4945 | u32 pcie_fw; | |
b6244201 | 4946 | |
7829451c | 4947 | pcie_fw = readl(adap->regs + PCIE_FW_A); |
b6244201 HS |
4948 | /* Check if cxgb4 is the MASTER and fw is initialized */ |
4949 | if (!(pcie_fw & PCIE_FW_INIT_F) || | |
4950 | !(pcie_fw & PCIE_FW_MASTER_VLD_F) || | |
4951 | PCIE_FW_MASTER_G(pcie_fw) != 4) { | |
4952 | dev_warn(&pdev->dev, | |
4953 | "cxgb4 driver needs to be MASTER to support SRIOV\n"); | |
4954 | return -EOPNOTSUPP; | |
4955 | } | |
4956 | ||
4957 | /* If any of the VF's is already assigned to Guest OS, then | |
4958 | * SRIOV for the same cannot be modified | |
4959 | */ | |
4960 | if (current_vfs && pci_vfs_assigned(pdev)) { | |
4961 | dev_err(&pdev->dev, | |
4962 | "Cannot modify SR-IOV while VFs are assigned\n"); | |
4963 | num_vfs = current_vfs; | |
4964 | return num_vfs; | |
4965 | } | |
4966 | ||
4967 | /* Disable SRIOV when zero is passed. | |
4968 | * One needs to disable SRIOV before modifying it, else | |
4969 | * stack throws the below warning: | |
4970 | * " 'n' VFs already enabled. Disable before enabling 'm' VFs." | |
4971 | */ | |
4972 | if (!num_vfs) { | |
4973 | pci_disable_sriov(pdev); | |
e7b48a32 | 4974 | if (adap->port[0]) { |
7829451c | 4975 | unregister_netdev(adap->port[0]); |
e7b48a32 HS |
4976 | adap->port[0] = NULL; |
4977 | } | |
661dbeb9 HS |
4978 | /* free VF resources */ |
4979 | kfree(adap->vfinfo); | |
4980 | adap->vfinfo = NULL; | |
4981 | adap->num_vfs = 0; | |
b6244201 HS |
4982 | return num_vfs; |
4983 | } | |
4984 | ||
4985 | if (num_vfs != current_vfs) { | |
4986 | err = pci_enable_sriov(pdev, num_vfs); | |
4987 | if (err) | |
4988 | return err; | |
7829451c | 4989 | |
661dbeb9 | 4990 | adap->num_vfs = num_vfs; |
e7b48a32 HS |
4991 | err = config_mgmt_dev(pdev); |
4992 | if (err) | |
4993 | return err; | |
b6244201 | 4994 | } |
661dbeb9 HS |
4995 | |
4996 | adap->vfinfo = kcalloc(adap->num_vfs, | |
4997 | sizeof(struct vf_info), GFP_KERNEL); | |
4998 | if (adap->vfinfo) | |
4999 | fill_vf_station_mac_addr(adap); | |
b6244201 HS |
5000 | return num_vfs; |
5001 | } | |
5002 | #endif | |
5003 | ||
1dd06ae8 | 5004 | static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) |
b8ff05a9 | 5005 | { |
22adfe0a | 5006 | int func, i, err, s_qpp, qpp, num_seg; |
b8ff05a9 | 5007 | struct port_info *pi; |
c8f44aff | 5008 | bool highdma = false; |
b8ff05a9 | 5009 | struct adapter *adapter = NULL; |
7829451c | 5010 | struct net_device *netdev; |
d6ce2628 | 5011 | void __iomem *regs; |
d86bd29e HS |
5012 | u32 whoami, pl_rev; |
5013 | enum chip_type chip; | |
7829451c | 5014 | static int adap_idx = 1; |
0a327889 | 5015 | #ifdef CONFIG_PCI_IOV |
96fe11f2 | 5016 | u32 v, port_vec; |
0a327889 | 5017 | #endif |
b8ff05a9 DM |
5018 | |
5019 | printk_once(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION); | |
5020 | ||
5021 | err = pci_request_regions(pdev, KBUILD_MODNAME); | |
5022 | if (err) { | |
5023 | /* Just info, some other driver may have claimed the device. */ | |
5024 | dev_info(&pdev->dev, "cannot obtain PCI resources\n"); | |
5025 | return err; | |
5026 | } | |
5027 | ||
b8ff05a9 DM |
5028 | err = pci_enable_device(pdev); |
5029 | if (err) { | |
5030 | dev_err(&pdev->dev, "cannot enable PCI device\n"); | |
5031 | goto out_release_regions; | |
5032 | } | |
5033 | ||
d6ce2628 HS |
5034 | regs = pci_ioremap_bar(pdev, 0); |
5035 | if (!regs) { | |
5036 | dev_err(&pdev->dev, "cannot map device registers\n"); | |
5037 | err = -ENOMEM; | |
5038 | goto out_disable_device; | |
5039 | } | |
5040 | ||
8203b509 HS |
5041 | err = t4_wait_dev_ready(regs); |
5042 | if (err < 0) | |
5043 | goto out_unmap_bar0; | |
5044 | ||
d6ce2628 | 5045 | /* We control everything through one PF */ |
d86bd29e HS |
5046 | whoami = readl(regs + PL_WHOAMI_A); |
5047 | pl_rev = REV_G(readl(regs + PL_REV_A)); | |
5048 | chip = get_chip_type(pdev, pl_rev); | |
5049 | func = CHELSIO_CHIP_VERSION(chip) <= CHELSIO_T5 ? | |
5050 | SOURCEPF_G(whoami) : T6_SOURCEPF_G(whoami); | |
d6ce2628 | 5051 | if (func != ent->driver_data) { |
7829451c | 5052 | #ifndef CONFIG_PCI_IOV |
d6ce2628 | 5053 | iounmap(regs); |
7829451c | 5054 | #endif |
d6ce2628 HS |
5055 | pci_disable_device(pdev); |
5056 | pci_save_state(pdev); /* to restore SR-IOV later */ | |
5057 | goto sriov; | |
5058 | } | |
5059 | ||
b8ff05a9 | 5060 | if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { |
c8f44aff | 5061 | highdma = true; |
b8ff05a9 DM |
5062 | err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); |
5063 | if (err) { | |
5064 | dev_err(&pdev->dev, "unable to obtain 64-bit DMA for " | |
5065 | "coherent allocations\n"); | |
d6ce2628 | 5066 | goto out_unmap_bar0; |
b8ff05a9 DM |
5067 | } |
5068 | } else { | |
5069 | err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); | |
5070 | if (err) { | |
5071 | dev_err(&pdev->dev, "no usable DMA configuration\n"); | |
d6ce2628 | 5072 | goto out_unmap_bar0; |
b8ff05a9 DM |
5073 | } |
5074 | } | |
5075 | ||
5076 | pci_enable_pcie_error_reporting(pdev); | |
5077 | pci_set_master(pdev); | |
5078 | pci_save_state(pdev); | |
5079 | ||
5080 | adapter = kzalloc(sizeof(*adapter), GFP_KERNEL); | |
5081 | if (!adapter) { | |
5082 | err = -ENOMEM; | |
d6ce2628 | 5083 | goto out_unmap_bar0; |
b8ff05a9 | 5084 | } |
7829451c | 5085 | adap_idx++; |
b8ff05a9 | 5086 | |
29aaee65 AB |
5087 | adapter->workq = create_singlethread_workqueue("cxgb4"); |
5088 | if (!adapter->workq) { | |
5089 | err = -ENOMEM; | |
5090 | goto out_free_adapter; | |
5091 | } | |
5092 | ||
7f080c3f HS |
5093 | adapter->mbox_log = kzalloc(sizeof(*adapter->mbox_log) + |
5094 | (sizeof(struct mbox_cmd) * | |
5095 | T4_OS_LOG_MBOX_CMDS), | |
5096 | GFP_KERNEL); | |
5097 | if (!adapter->mbox_log) { | |
5098 | err = -ENOMEM; | |
5099 | goto out_free_adapter; | |
5100 | } | |
5101 | adapter->mbox_log->size = T4_OS_LOG_MBOX_CMDS; | |
5102 | ||
144be3d9 GS |
5103 | /* PCI device has been enabled */ |
5104 | adapter->flags |= DEV_ENABLED; | |
5105 | ||
d6ce2628 | 5106 | adapter->regs = regs; |
b8ff05a9 DM |
5107 | adapter->pdev = pdev; |
5108 | adapter->pdev_dev = &pdev->dev; | |
0de72738 | 5109 | adapter->name = pci_name(pdev); |
3069ee9b | 5110 | adapter->mbox = func; |
b2612722 | 5111 | adapter->pf = func; |
ea1e76f7 | 5112 | adapter->msg_enable = DFLT_MSG_ENABLE; |
b8ff05a9 DM |
5113 | memset(adapter->chan_map, 0xff, sizeof(adapter->chan_map)); |
5114 | ||
b0ba9d5f CL |
5115 | /* If possible, we use PCIe Relaxed Ordering Attribute to deliver |
5116 | * Ingress Packet Data to Free List Buffers in order to allow for | |
5117 | * chipset performance optimizations between the Root Complex and | |
5118 | * Memory Controllers. (Messages to the associated Ingress Queue | |
5119 | * notifying new Packet Placement in the Free Lists Buffers will be | |
5120 | * send without the Relaxed Ordering Attribute thus guaranteeing that | |
5121 | * all preceding PCIe Transaction Layer Packets will be processed | |
5122 | * first.) But some Root Complexes have various issues with Upstream | |
5123 | * Transaction Layer Packets with the Relaxed Ordering Attribute set. | |
5124 | * The PCIe devices which under the Root Complexes will be cleared the | |
5125 | * Relaxed Ordering bit in the configuration space, So we check our | |
5126 | * PCIe configuration space to see if it's flagged with advice against | |
5127 | * using Relaxed Ordering. | |
5128 | */ | |
5129 | if (!pcie_relaxed_ordering_enabled(pdev)) | |
5130 | adapter->flags |= ROOT_NO_RELAXED_ORDERING; | |
5131 | ||
b8ff05a9 DM |
5132 | spin_lock_init(&adapter->stats_lock); |
5133 | spin_lock_init(&adapter->tid_release_lock); | |
e327c225 | 5134 | spin_lock_init(&adapter->win0_lock); |
4055ae5e HS |
5135 | spin_lock_init(&adapter->mbox_lock); |
5136 | ||
5137 | INIT_LIST_HEAD(&adapter->mlist.list); | |
b8ff05a9 DM |
5138 | |
5139 | INIT_WORK(&adapter->tid_release_task, process_tid_release_list); | |
881806bc VP |
5140 | INIT_WORK(&adapter->db_full_task, process_db_full); |
5141 | INIT_WORK(&adapter->db_drop_task, process_db_drop); | |
b8ff05a9 DM |
5142 | |
5143 | err = t4_prep_adapter(adapter); | |
5144 | if (err) | |
d6ce2628 HS |
5145 | goto out_free_adapter; |
5146 | ||
22adfe0a | 5147 | |
d14807dd | 5148 | if (!is_t4(adapter->params.chip)) { |
f612b815 HS |
5149 | s_qpp = (QUEUESPERPAGEPF0_S + |
5150 | (QUEUESPERPAGEPF1_S - QUEUESPERPAGEPF0_S) * | |
b2612722 | 5151 | adapter->pf); |
f612b815 HS |
5152 | qpp = 1 << QUEUESPERPAGEPF0_G(t4_read_reg(adapter, |
5153 | SGE_EGRESS_QUEUES_PER_PAGE_PF_A) >> s_qpp); | |
22adfe0a SR |
5154 | num_seg = PAGE_SIZE / SEGMENT_SIZE; |
5155 | ||
5156 | /* Each segment size is 128B. Write coalescing is enabled only | |
5157 | * when SGE_EGRESS_QUEUES_PER_PAGE_PF reg value for the | |
5158 | * queue is less no of segments that can be accommodated in | |
5159 | * a page size. | |
5160 | */ | |
5161 | if (qpp > num_seg) { | |
5162 | dev_err(&pdev->dev, | |
5163 | "Incorrect number of egress queues per page\n"); | |
5164 | err = -EINVAL; | |
d6ce2628 | 5165 | goto out_free_adapter; |
22adfe0a SR |
5166 | } |
5167 | adapter->bar2 = ioremap_wc(pci_resource_start(pdev, 2), | |
5168 | pci_resource_len(pdev, 2)); | |
5169 | if (!adapter->bar2) { | |
5170 | dev_err(&pdev->dev, "cannot map device bar2 region\n"); | |
5171 | err = -ENOMEM; | |
d6ce2628 | 5172 | goto out_free_adapter; |
22adfe0a SR |
5173 | } |
5174 | } | |
5175 | ||
636f9d37 | 5176 | setup_memwin(adapter); |
b8ff05a9 | 5177 | err = adap_init0(adapter); |
5b377d11 HS |
5178 | #ifdef CONFIG_DEBUG_FS |
5179 | bitmap_zero(adapter->sge.blocked_fl, adapter->sge.egr_sz); | |
5180 | #endif | |
636f9d37 | 5181 | setup_memwin_rdma(adapter); |
b8ff05a9 DM |
5182 | if (err) |
5183 | goto out_unmap_bar; | |
5184 | ||
2a485cf7 HS |
5185 | /* configure SGE_STAT_CFG_A to read WC stats */ |
5186 | if (!is_t4(adapter->params.chip)) | |
676d6a75 HS |
5187 | t4_write_reg(adapter, SGE_STAT_CFG_A, STATSOURCE_T5_V(7) | |
5188 | (is_t5(adapter->params.chip) ? STATMODE_V(0) : | |
5189 | T6_STATMODE_V(0))); | |
2a485cf7 | 5190 | |
b8ff05a9 | 5191 | for_each_port(adapter, i) { |
b8ff05a9 DM |
5192 | netdev = alloc_etherdev_mq(sizeof(struct port_info), |
5193 | MAX_ETH_QSETS); | |
5194 | if (!netdev) { | |
5195 | err = -ENOMEM; | |
5196 | goto out_free_dev; | |
5197 | } | |
5198 | ||
5199 | SET_NETDEV_DEV(netdev, &pdev->dev); | |
5200 | ||
5201 | adapter->port[i] = netdev; | |
5202 | pi = netdev_priv(netdev); | |
5203 | pi->adapter = adapter; | |
5204 | pi->xact_addr_filt = -1; | |
b8ff05a9 | 5205 | pi->port_id = i; |
b8ff05a9 DM |
5206 | netdev->irq = pdev->irq; |
5207 | ||
2ed28baa MM |
5208 | netdev->hw_features = NETIF_F_SG | TSO_FLAGS | |
5209 | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | | |
5210 | NETIF_F_RXCSUM | NETIF_F_RXHASH | | |
d8931847 RL |
5211 | NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | |
5212 | NETIF_F_HW_TC; | |
c8f44aff MM |
5213 | if (highdma) |
5214 | netdev->hw_features |= NETIF_F_HIGHDMA; | |
5215 | netdev->features |= netdev->hw_features; | |
b8ff05a9 DM |
5216 | netdev->vlan_features = netdev->features & VLAN_FEAT; |
5217 | ||
01789349 JP |
5218 | netdev->priv_flags |= IFF_UNICAST_FLT; |
5219 | ||
d894be57 | 5220 | /* MTU range: 81 - 9600 */ |
a047fbae | 5221 | netdev->min_mtu = 81; /* accommodate SACK */ |
d894be57 JW |
5222 | netdev->max_mtu = MAX_MTU; |
5223 | ||
b8ff05a9 | 5224 | netdev->netdev_ops = &cxgb4_netdev_ops; |
688848b1 AB |
5225 | #ifdef CONFIG_CHELSIO_T4_DCB |
5226 | netdev->dcbnl_ops = &cxgb4_dcb_ops; | |
5227 | cxgb4_dcb_state_init(netdev); | |
5228 | #endif | |
812034f1 | 5229 | cxgb4_set_ethtool_ops(netdev); |
b8ff05a9 DM |
5230 | } |
5231 | ||
ad75b7d3 RL |
5232 | cxgb4_init_ethtool_dump(adapter); |
5233 | ||
b8ff05a9 DM |
5234 | pci_set_drvdata(pdev, adapter); |
5235 | ||
5236 | if (adapter->flags & FW_OK) { | |
060e0c75 | 5237 | err = t4_port_init(adapter, func, func, 0); |
b8ff05a9 DM |
5238 | if (err) |
5239 | goto out_free_dev; | |
098ef6c2 HS |
5240 | } else if (adapter->params.nports == 1) { |
5241 | /* If we don't have a connection to the firmware -- possibly | |
5242 | * because of an error -- grab the raw VPD parameters so we | |
5243 | * can set the proper MAC Address on the debug network | |
5244 | * interface that we've created. | |
5245 | */ | |
5246 | u8 hw_addr[ETH_ALEN]; | |
5247 | u8 *na = adapter->params.vpd.na; | |
5248 | ||
5249 | err = t4_get_raw_vpd_params(adapter, &adapter->params.vpd); | |
5250 | if (!err) { | |
5251 | for (i = 0; i < ETH_ALEN; i++) | |
5252 | hw_addr[i] = (hex2val(na[2 * i + 0]) * 16 + | |
5253 | hex2val(na[2 * i + 1])); | |
5254 | t4_set_hw_addr(adapter, 0, hw_addr); | |
5255 | } | |
b8ff05a9 DM |
5256 | } |
5257 | ||
098ef6c2 | 5258 | /* Configure queues and allocate tables now, they can be needed as |
b8ff05a9 DM |
5259 | * soon as the first register_netdev completes. |
5260 | */ | |
5261 | cfg_queues(adapter); | |
5262 | ||
3bdb376e KS |
5263 | adapter->smt = t4_init_smt(); |
5264 | if (!adapter->smt) { | |
5265 | /* We tolerate a lack of SMT, giving up some functionality */ | |
5266 | dev_warn(&pdev->dev, "could not allocate SMT, continuing\n"); | |
5267 | } | |
5268 | ||
5be9ed8d | 5269 | adapter->l2t = t4_init_l2t(adapter->l2t_start, adapter->l2t_end); |
b8ff05a9 DM |
5270 | if (!adapter->l2t) { |
5271 | /* We tolerate a lack of L2T, giving up some functionality */ | |
5272 | dev_warn(&pdev->dev, "could not allocate L2T, continuing\n"); | |
5273 | adapter->params.offload = 0; | |
5274 | } | |
5275 | ||
b5a02f50 | 5276 | #if IS_ENABLED(CONFIG_IPV6) |
eb72f74f HS |
5277 | if ((CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5) && |
5278 | (!(t4_read_reg(adapter, LE_DB_CONFIG_A) & ASLIPCOMPEN_F))) { | |
5279 | /* CLIP functionality is not present in hardware, | |
5280 | * hence disable all offload features | |
b5a02f50 AB |
5281 | */ |
5282 | dev_warn(&pdev->dev, | |
eb72f74f | 5283 | "CLIP not enabled in hardware, continuing\n"); |
b5a02f50 | 5284 | adapter->params.offload = 0; |
eb72f74f HS |
5285 | } else { |
5286 | adapter->clipt = t4_init_clip_tbl(adapter->clipt_start, | |
5287 | adapter->clipt_end); | |
5288 | if (!adapter->clipt) { | |
5289 | /* We tolerate a lack of clip_table, giving up | |
5290 | * some functionality | |
5291 | */ | |
5292 | dev_warn(&pdev->dev, | |
5293 | "could not allocate Clip table, continuing\n"); | |
5294 | adapter->params.offload = 0; | |
5295 | } | |
b5a02f50 AB |
5296 | } |
5297 | #endif | |
b72a32da RL |
5298 | |
5299 | for_each_port(adapter, i) { | |
5300 | pi = adap2pinfo(adapter, i); | |
5301 | pi->sched_tbl = t4_init_sched(adapter->params.nsched_cls); | |
5302 | if (!pi->sched_tbl) | |
5303 | dev_warn(&pdev->dev, | |
5304 | "could not activate scheduling on port %d\n", | |
5305 | i); | |
5306 | } | |
5307 | ||
578b46b9 | 5308 | if (tid_init(&adapter->tids) < 0) { |
b8ff05a9 DM |
5309 | dev_warn(&pdev->dev, "could not allocate TID table, " |
5310 | "continuing\n"); | |
5311 | adapter->params.offload = 0; | |
d8931847 | 5312 | } else { |
45da1ca2 | 5313 | adapter->tc_u32 = cxgb4_init_tc_u32(adapter); |
d8931847 RL |
5314 | if (!adapter->tc_u32) |
5315 | dev_warn(&pdev->dev, | |
5316 | "could not offload tc u32, continuing\n"); | |
62488e4b | 5317 | |
79e6d46a KS |
5318 | if (cxgb4_init_tc_flower(adapter)) |
5319 | dev_warn(&pdev->dev, | |
5320 | "could not offload tc flower, continuing\n"); | |
b8ff05a9 DM |
5321 | } |
5322 | ||
5c31254e | 5323 | if (is_offload(adapter) || is_hashfilter(adapter)) { |
9a1bb9f6 HS |
5324 | if (t4_read_reg(adapter, LE_DB_CONFIG_A) & HASHEN_F) { |
5325 | u32 hash_base, hash_reg; | |
5326 | ||
5327 | if (chip <= CHELSIO_T5) { | |
5328 | hash_reg = LE_DB_TID_HASHBASE_A; | |
5329 | hash_base = t4_read_reg(adapter, hash_reg); | |
5330 | adapter->tids.hash_base = hash_base / 4; | |
5331 | } else { | |
5332 | hash_reg = T6_LE_DB_HASH_TID_BASE_A; | |
5333 | hash_base = t4_read_reg(adapter, hash_reg); | |
5334 | adapter->tids.hash_base = hash_base; | |
5335 | } | |
5336 | } | |
5337 | } | |
5338 | ||
f7cabcdd DM |
5339 | /* See what interrupts we'll be using */ |
5340 | if (msi > 1 && enable_msix(adapter) == 0) | |
5341 | adapter->flags |= USING_MSIX; | |
94cdb8bb | 5342 | else if (msi > 0 && pci_enable_msi(pdev) == 0) { |
f7cabcdd | 5343 | adapter->flags |= USING_MSI; |
94cdb8bb HS |
5344 | if (msi > 1) |
5345 | free_msix_info(adapter); | |
5346 | } | |
f7cabcdd | 5347 | |
547fd272 HS |
5348 | /* check for PCI Express bandwidth capabiltites */ |
5349 | cxgb4_check_pcie_caps(adapter); | |
5350 | ||
671b0060 DM |
5351 | err = init_rss(adapter); |
5352 | if (err) | |
5353 | goto out_free_dev; | |
5354 | ||
b8ff05a9 DM |
5355 | /* |
5356 | * The card is now ready to go. If any errors occur during device | |
5357 | * registration we do not fail the whole card but rather proceed only | |
5358 | * with the ports we manage to register successfully. However we must | |
5359 | * register at least one net device. | |
5360 | */ | |
5361 | for_each_port(adapter, i) { | |
a57cabe0 | 5362 | pi = adap2pinfo(adapter, i); |
d2a007ab | 5363 | adapter->port[i]->dev_port = pi->lport; |
a57cabe0 DM |
5364 | netif_set_real_num_tx_queues(adapter->port[i], pi->nqsets); |
5365 | netif_set_real_num_rx_queues(adapter->port[i], pi->nqsets); | |
5366 | ||
b1a73af9 SM |
5367 | netif_carrier_off(adapter->port[i]); |
5368 | ||
b8ff05a9 DM |
5369 | err = register_netdev(adapter->port[i]); |
5370 | if (err) | |
b1a3c2b6 | 5371 | break; |
b1a3c2b6 DM |
5372 | adapter->chan_map[pi->tx_chan] = i; |
5373 | print_port_info(adapter->port[i]); | |
b8ff05a9 | 5374 | } |
b1a3c2b6 | 5375 | if (i == 0) { |
b8ff05a9 DM |
5376 | dev_err(&pdev->dev, "could not register any net devices\n"); |
5377 | goto out_free_dev; | |
5378 | } | |
b1a3c2b6 DM |
5379 | if (err) { |
5380 | dev_warn(&pdev->dev, "only %d net devices registered\n", i); | |
5381 | err = 0; | |
6403eab1 | 5382 | } |
b8ff05a9 DM |
5383 | |
5384 | if (cxgb4_debugfs_root) { | |
5385 | adapter->debugfs_root = debugfs_create_dir(pci_name(pdev), | |
5386 | cxgb4_debugfs_root); | |
5387 | setup_debugfs(adapter); | |
5388 | } | |
5389 | ||
6482aa7c DLR |
5390 | /* PCIe EEH recovery on powerpc platforms needs fundamental reset */ |
5391 | pdev->needs_freset = 1; | |
5392 | ||
0fbc81b3 HS |
5393 | if (is_uld(adapter)) { |
5394 | mutex_lock(&uld_mutex); | |
5395 | list_add_tail(&adapter->list_node, &adapter_list); | |
5396 | mutex_unlock(&uld_mutex); | |
5397 | } | |
b8ff05a9 | 5398 | |
9c33e420 AG |
5399 | if (!is_t4(adapter->params.chip)) |
5400 | cxgb4_ptp_init(adapter); | |
5401 | ||
0de72738 | 5402 | print_adapter_info(adapter); |
0fbc81b3 | 5403 | setup_fw_sge_queues(adapter); |
7829451c | 5404 | return 0; |
0de72738 | 5405 | |
8e1e6059 | 5406 | sriov: |
b8ff05a9 | 5407 | #ifdef CONFIG_PCI_IOV |
7829451c HS |
5408 | adapter = kzalloc(sizeof(*adapter), GFP_KERNEL); |
5409 | if (!adapter) { | |
5410 | err = -ENOMEM; | |
5411 | goto free_pci_region; | |
5412 | } | |
5413 | ||
7829451c HS |
5414 | adapter->pdev = pdev; |
5415 | adapter->pdev_dev = &pdev->dev; | |
5416 | adapter->name = pci_name(pdev); | |
5417 | adapter->mbox = func; | |
5418 | adapter->pf = func; | |
5419 | adapter->regs = regs; | |
e7b48a32 | 5420 | adapter->adap_idx = adap_idx; |
7829451c HS |
5421 | adapter->mbox_log = kzalloc(sizeof(*adapter->mbox_log) + |
5422 | (sizeof(struct mbox_cmd) * | |
5423 | T4_OS_LOG_MBOX_CMDS), | |
5424 | GFP_KERNEL); | |
5425 | if (!adapter->mbox_log) { | |
5426 | err = -ENOMEM; | |
e7b48a32 | 5427 | goto free_adapter; |
7829451c | 5428 | } |
038c35a8 GG |
5429 | spin_lock_init(&adapter->mbox_lock); |
5430 | INIT_LIST_HEAD(&adapter->mlist.list); | |
96fe11f2 GG |
5431 | |
5432 | v = FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | | |
5433 | FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PORTVEC); | |
5434 | err = t4_query_params(adapter, adapter->mbox, adapter->pf, 0, 1, | |
5435 | &v, &port_vec); | |
5436 | if (err < 0) { | |
5437 | dev_err(adapter->pdev_dev, "Could not fetch port params\n"); | |
d0417849 | 5438 | goto free_mbox_log; |
96fe11f2 GG |
5439 | } |
5440 | ||
5441 | adapter->params.nports = hweight32(port_vec); | |
7829451c | 5442 | pci_set_drvdata(pdev, adapter); |
7829451c HS |
5443 | return 0; |
5444 | ||
d0417849 GG |
5445 | free_mbox_log: |
5446 | kfree(adapter->mbox_log); | |
7829451c HS |
5447 | free_adapter: |
5448 | kfree(adapter); | |
5449 | free_pci_region: | |
5450 | iounmap(regs); | |
5451 | pci_disable_sriov(pdev); | |
5452 | pci_release_regions(pdev); | |
5453 | return err; | |
5454 | #else | |
b8ff05a9 | 5455 | return 0; |
7829451c | 5456 | #endif |
b8ff05a9 DM |
5457 | |
5458 | out_free_dev: | |
06546391 | 5459 | free_some_resources(adapter); |
94cdb8bb HS |
5460 | if (adapter->flags & USING_MSIX) |
5461 | free_msix_info(adapter); | |
0fbc81b3 HS |
5462 | if (adapter->num_uld || adapter->num_ofld_uld) |
5463 | t4_uld_mem_free(adapter); | |
b8ff05a9 | 5464 | out_unmap_bar: |
d14807dd | 5465 | if (!is_t4(adapter->params.chip)) |
22adfe0a | 5466 | iounmap(adapter->bar2); |
b8ff05a9 | 5467 | out_free_adapter: |
29aaee65 AB |
5468 | if (adapter->workq) |
5469 | destroy_workqueue(adapter->workq); | |
5470 | ||
7f080c3f | 5471 | kfree(adapter->mbox_log); |
b8ff05a9 | 5472 | kfree(adapter); |
d6ce2628 HS |
5473 | out_unmap_bar0: |
5474 | iounmap(regs); | |
b8ff05a9 DM |
5475 | out_disable_device: |
5476 | pci_disable_pcie_error_reporting(pdev); | |
5477 | pci_disable_device(pdev); | |
5478 | out_release_regions: | |
5479 | pci_release_regions(pdev); | |
b8ff05a9 DM |
5480 | return err; |
5481 | } | |
5482 | ||
91744948 | 5483 | static void remove_one(struct pci_dev *pdev) |
b8ff05a9 DM |
5484 | { |
5485 | struct adapter *adapter = pci_get_drvdata(pdev); | |
5486 | ||
7829451c HS |
5487 | if (!adapter) { |
5488 | pci_release_regions(pdev); | |
5489 | return; | |
5490 | } | |
636f9d37 | 5491 | |
e1f6198e GG |
5492 | adapter->flags |= SHUTTING_DOWN; |
5493 | ||
7829451c | 5494 | if (adapter->pf == 4) { |
b8ff05a9 DM |
5495 | int i; |
5496 | ||
29aaee65 AB |
5497 | /* Tear down per-adapter Work Queue first since it can contain |
5498 | * references to our adapter data structure. | |
5499 | */ | |
5500 | destroy_workqueue(adapter->workq); | |
5501 | ||
6a146f3a | 5502 | if (is_uld(adapter)) { |
b8ff05a9 | 5503 | detach_ulds(adapter); |
6a146f3a GP |
5504 | t4_uld_clean_up(adapter); |
5505 | } | |
b8ff05a9 | 5506 | |
b37987e8 HS |
5507 | disable_interrupts(adapter); |
5508 | ||
b8ff05a9 | 5509 | for_each_port(adapter, i) |
8f3a7676 | 5510 | if (adapter->port[i]->reg_state == NETREG_REGISTERED) |
b8ff05a9 DM |
5511 | unregister_netdev(adapter->port[i]); |
5512 | ||
9f16dc2e | 5513 | debugfs_remove_recursive(adapter->debugfs_root); |
b8ff05a9 | 5514 | |
9c33e420 AG |
5515 | if (!is_t4(adapter->params.chip)) |
5516 | cxgb4_ptp_stop(adapter); | |
5517 | ||
f2b7e78d VP |
5518 | /* If we allocated filters, free up state associated with any |
5519 | * valid filters ... | |
5520 | */ | |
578b46b9 | 5521 | clear_all_filters(adapter); |
f2b7e78d | 5522 | |
aaefae9b DM |
5523 | if (adapter->flags & FULL_INIT_DONE) |
5524 | cxgb_down(adapter); | |
b8ff05a9 | 5525 | |
94cdb8bb HS |
5526 | if (adapter->flags & USING_MSIX) |
5527 | free_msix_info(adapter); | |
0fbc81b3 HS |
5528 | if (adapter->num_uld || adapter->num_ofld_uld) |
5529 | t4_uld_mem_free(adapter); | |
06546391 | 5530 | free_some_resources(adapter); |
b5a02f50 AB |
5531 | #if IS_ENABLED(CONFIG_IPV6) |
5532 | t4_cleanup_clip_tbl(adapter); | |
5533 | #endif | |
b8ff05a9 | 5534 | iounmap(adapter->regs); |
d14807dd | 5535 | if (!is_t4(adapter->params.chip)) |
22adfe0a | 5536 | iounmap(adapter->bar2); |
b8ff05a9 | 5537 | pci_disable_pcie_error_reporting(pdev); |
144be3d9 GS |
5538 | if ((adapter->flags & DEV_ENABLED)) { |
5539 | pci_disable_device(pdev); | |
5540 | adapter->flags &= ~DEV_ENABLED; | |
5541 | } | |
b8ff05a9 | 5542 | pci_release_regions(pdev); |
7f080c3f | 5543 | kfree(adapter->mbox_log); |
ee9a33b2 | 5544 | synchronize_rcu(); |
8b662fe7 | 5545 | kfree(adapter); |
7829451c HS |
5546 | } |
5547 | #ifdef CONFIG_PCI_IOV | |
5548 | else { | |
e7b48a32 | 5549 | if (adapter->port[0]) |
7829451c | 5550 | unregister_netdev(adapter->port[0]); |
7829451c | 5551 | iounmap(adapter->regs); |
661dbeb9 | 5552 | kfree(adapter->vfinfo); |
d0417849 | 5553 | kfree(adapter->mbox_log); |
7829451c HS |
5554 | kfree(adapter); |
5555 | pci_disable_sriov(pdev); | |
b8ff05a9 | 5556 | pci_release_regions(pdev); |
7829451c HS |
5557 | } |
5558 | #endif | |
b8ff05a9 DM |
5559 | } |
5560 | ||
0fbc81b3 HS |
5561 | /* "Shutdown" quiesces the device, stopping Ingress Packet and Interrupt |
5562 | * delivery. This is essentially a stripped down version of the PCI remove() | |
5563 | * function where we do the minimal amount of work necessary to shutdown any | |
5564 | * further activity. | |
5565 | */ | |
5566 | static void shutdown_one(struct pci_dev *pdev) | |
5567 | { | |
5568 | struct adapter *adapter = pci_get_drvdata(pdev); | |
5569 | ||
5570 | /* As with remove_one() above (see extended comment), we only want do | |
5571 | * do cleanup on PCI Devices which went all the way through init_one() | |
5572 | * ... | |
5573 | */ | |
5574 | if (!adapter) { | |
5575 | pci_release_regions(pdev); | |
5576 | return; | |
5577 | } | |
5578 | ||
e1f6198e GG |
5579 | adapter->flags |= SHUTTING_DOWN; |
5580 | ||
0fbc81b3 HS |
5581 | if (adapter->pf == 4) { |
5582 | int i; | |
5583 | ||
5584 | for_each_port(adapter, i) | |
5585 | if (adapter->port[i]->reg_state == NETREG_REGISTERED) | |
5586 | cxgb_close(adapter->port[i]); | |
5587 | ||
6a146f3a GP |
5588 | if (is_uld(adapter)) { |
5589 | detach_ulds(adapter); | |
5590 | t4_uld_clean_up(adapter); | |
5591 | } | |
5592 | ||
0fbc81b3 HS |
5593 | disable_interrupts(adapter); |
5594 | disable_msi(adapter); | |
5595 | ||
5596 | t4_sge_stop(adapter); | |
5597 | if (adapter->flags & FW_OK) | |
5598 | t4_fw_bye(adapter, adapter->mbox); | |
5599 | } | |
5600 | #ifdef CONFIG_PCI_IOV | |
5601 | else { | |
5602 | if (adapter->port[0]) | |
5603 | unregister_netdev(adapter->port[0]); | |
5604 | iounmap(adapter->regs); | |
5605 | kfree(adapter->vfinfo); | |
d0417849 | 5606 | kfree(adapter->mbox_log); |
0fbc81b3 HS |
5607 | kfree(adapter); |
5608 | pci_disable_sriov(pdev); | |
5609 | pci_release_regions(pdev); | |
5610 | } | |
5611 | #endif | |
5612 | } | |
5613 | ||
b8ff05a9 DM |
5614 | static struct pci_driver cxgb4_driver = { |
5615 | .name = KBUILD_MODNAME, | |
5616 | .id_table = cxgb4_pci_tbl, | |
5617 | .probe = init_one, | |
91744948 | 5618 | .remove = remove_one, |
0fbc81b3 | 5619 | .shutdown = shutdown_one, |
b6244201 HS |
5620 | #ifdef CONFIG_PCI_IOV |
5621 | .sriov_configure = cxgb4_iov_configure, | |
5622 | #endif | |
204dc3c0 | 5623 | .err_handler = &cxgb4_eeh, |
b8ff05a9 DM |
5624 | }; |
5625 | ||
5626 | static int __init cxgb4_init_module(void) | |
5627 | { | |
5628 | int ret; | |
5629 | ||
5630 | /* Debugfs support is optional, just warn if this fails */ | |
5631 | cxgb4_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL); | |
5632 | if (!cxgb4_debugfs_root) | |
428ac43f | 5633 | pr_warn("could not create debugfs entry, continuing\n"); |
b8ff05a9 DM |
5634 | |
5635 | ret = pci_register_driver(&cxgb4_driver); | |
29aaee65 | 5636 | if (ret < 0) |
b8ff05a9 | 5637 | debugfs_remove(cxgb4_debugfs_root); |
01bcca68 | 5638 | |
1bb60376 | 5639 | #if IS_ENABLED(CONFIG_IPV6) |
b5a02f50 AB |
5640 | if (!inet6addr_registered) { |
5641 | register_inet6addr_notifier(&cxgb4_inet6addr_notifier); | |
5642 | inet6addr_registered = true; | |
5643 | } | |
1bb60376 | 5644 | #endif |
01bcca68 | 5645 | |
b8ff05a9 DM |
5646 | return ret; |
5647 | } | |
5648 | ||
5649 | static void __exit cxgb4_cleanup_module(void) | |
5650 | { | |
1bb60376 | 5651 | #if IS_ENABLED(CONFIG_IPV6) |
1793c798 | 5652 | if (inet6addr_registered) { |
b5a02f50 AB |
5653 | unregister_inet6addr_notifier(&cxgb4_inet6addr_notifier); |
5654 | inet6addr_registered = false; | |
5655 | } | |
1bb60376 | 5656 | #endif |
b8ff05a9 DM |
5657 | pci_unregister_driver(&cxgb4_driver); |
5658 | debugfs_remove(cxgb4_debugfs_root); /* NULL ok */ | |
5659 | } | |
5660 | ||
5661 | module_init(cxgb4_init_module); | |
5662 | module_exit(cxgb4_cleanup_module); |