2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
4 * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
37 #include <linux/bitmap.h>
38 #include <linux/crc32.h>
39 #include <linux/ctype.h>
40 #include <linux/debugfs.h>
41 #include <linux/err.h>
42 #include <linux/etherdevice.h>
43 #include <linux/firmware.h>
45 #include <linux/if_vlan.h>
46 #include <linux/init.h>
47 #include <linux/log2.h>
48 #include <linux/mdio.h>
49 #include <linux/module.h>
50 #include <linux/moduleparam.h>
51 #include <linux/mutex.h>
52 #include <linux/netdevice.h>
53 #include <linux/pci.h>
54 #include <linux/aer.h>
55 #include <linux/rtnetlink.h>
56 #include <linux/sched.h>
57 #include <linux/seq_file.h>
58 #include <linux/sockios.h>
59 #include <linux/vmalloc.h>
60 #include <linux/workqueue.h>
61 #include <net/neighbour.h>
62 #include <net/netevent.h>
63 #include <net/addrconf.h>
64 #include <net/bonding.h>
65 #include <linux/uaccess.h>
66 #include <linux/crash_dump.h>
67 #include <net/udp_tunnel.h>
70 #include "cxgb4_filter.h"
72 #include "t4_values.h"
75 #include "t4fw_version.h"
76 #include "cxgb4_dcb.h"
78 #include "cxgb4_debugfs.h"
83 #include "cxgb4_tc_u32.h"
84 #include "cxgb4_tc_flower.h"
85 #include "cxgb4_ptp.h"
86 #include "cxgb4_cudbg.h"
88 char cxgb4_driver_name
[] = KBUILD_MODNAME
;
93 #define DRV_VERSION "2.0.0-ko"
94 const char cxgb4_driver_version
[] = DRV_VERSION
;
95 #define DRV_DESC "Chelsio T4/T5/T6 Network Driver"
97 #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
98 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
99 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
101 /* Macros needed to support the PCI Device ID Table ...
103 #define CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN \
104 static const struct pci_device_id cxgb4_pci_tbl[] = {
105 #define CXGB4_UNIFIED_PF 0x4
107 #define CH_PCI_DEVICE_ID_FUNCTION CXGB4_UNIFIED_PF
109 /* Include PCI Device IDs for both PF4 and PF0-3 so our PCI probe() routine is
112 #define CH_PCI_DEVICE_ID_FUNCTION2 0x0
114 #define CH_PCI_ID_TABLE_ENTRY(devid) \
115 {PCI_VDEVICE(CHELSIO, (devid)), CXGB4_UNIFIED_PF}
117 #define CH_PCI_DEVICE_ID_TABLE_DEFINE_END \
121 #include "t4_pci_id_tbl.h"
123 #define FW4_FNAME "cxgb4/t4fw.bin"
124 #define FW5_FNAME "cxgb4/t5fw.bin"
125 #define FW6_FNAME "cxgb4/t6fw.bin"
126 #define FW4_CFNAME "cxgb4/t4-config.txt"
127 #define FW5_CFNAME "cxgb4/t5-config.txt"
128 #define FW6_CFNAME "cxgb4/t6-config.txt"
129 #define PHY_AQ1202_FIRMWARE "cxgb4/aq1202_fw.cld"
130 #define PHY_BCM84834_FIRMWARE "cxgb4/bcm8483.bin"
131 #define PHY_AQ1202_DEVICEID 0x4409
132 #define PHY_BCM84834_DEVICEID 0x4486
134 MODULE_DESCRIPTION(DRV_DESC
);
135 MODULE_AUTHOR("Chelsio Communications");
136 MODULE_LICENSE("Dual BSD/GPL");
137 MODULE_VERSION(DRV_VERSION
);
138 MODULE_DEVICE_TABLE(pci
, cxgb4_pci_tbl
);
139 MODULE_FIRMWARE(FW4_FNAME
);
140 MODULE_FIRMWARE(FW5_FNAME
);
141 MODULE_FIRMWARE(FW6_FNAME
);
144 * The driver uses the best interrupt scheme available on a platform in the
145 * order MSI-X, MSI, legacy INTx interrupts. This parameter determines which
146 * of these schemes the driver may consider as follows:
148 * msi = 2: choose from among all three options
149 * msi = 1: only consider MSI and INTx interrupts
150 * msi = 0: force INTx interrupts
154 module_param(msi
, int, 0644);
155 MODULE_PARM_DESC(msi
, "whether to use INTx (0), MSI (1) or MSI-X (2)");
158 * Normally we tell the chip to deliver Ingress Packets into our DMA buffers
159 * offset by 2 bytes in order to have the IP headers line up on 4-byte
160 * boundaries. This is a requirement for many architectures which will throw
161 * a machine check fault if an attempt is made to access one of the 4-byte IP
162 * header fields on a non-4-byte boundary. And it's a major performance issue
163 * even on some architectures which allow it like some implementations of the
164 * x86 ISA. However, some architectures don't mind this and for some very
165 * edge-case performance sensitive applications (like forwarding large volumes
166 * of small packets), setting this DMA offset to 0 will decrease the number of
167 * PCI-E Bus transfers enough to measurably affect performance.
169 static int rx_dma_offset
= 2;
171 /* TX Queue select used to determine what algorithm to use for selecting TX
172 * queue. Select between the kernel provided function (select_queue=0) or user
173 * cxgb_select_queue function (select_queue=1)
175 * Default: select_queue=0
177 static int select_queue
;
178 module_param(select_queue
, int, 0644);
179 MODULE_PARM_DESC(select_queue
,
180 "Select between kernel provided method of selecting or driver method of selecting TX queue. Default is kernel method.");
182 static struct dentry
*cxgb4_debugfs_root
;
184 LIST_HEAD(adapter_list
);
185 DEFINE_MUTEX(uld_mutex
);
187 static void link_report(struct net_device
*dev
)
189 if (!netif_carrier_ok(dev
))
190 netdev_info(dev
, "link down\n");
192 static const char *fc
[] = { "no", "Rx", "Tx", "Tx/Rx" };
195 const struct port_info
*p
= netdev_priv(dev
);
197 switch (p
->link_cfg
.speed
) {
220 pr_info("%s: unsupported speed: %d\n",
221 dev
->name
, p
->link_cfg
.speed
);
225 netdev_info(dev
, "link up, %s, full-duplex, %s PAUSE\n", s
,
230 #ifdef CONFIG_CHELSIO_T4_DCB
231 /* Set up/tear down Data Center Bridging Priority mapping for a net device. */
232 static void dcb_tx_queue_prio_enable(struct net_device
*dev
, int enable
)
234 struct port_info
*pi
= netdev_priv(dev
);
235 struct adapter
*adap
= pi
->adapter
;
236 struct sge_eth_txq
*txq
= &adap
->sge
.ethtxq
[pi
->first_qset
];
239 /* We use a simple mapping of Port TX Queue Index to DCB
240 * Priority when we're enabling DCB.
242 for (i
= 0; i
< pi
->nqsets
; i
++, txq
++) {
246 name
= (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ
) |
248 FW_PARAMS_PARAM_DMAQ_EQ_DCBPRIO_ETH
) |
249 FW_PARAMS_PARAM_YZ_V(txq
->q
.cntxt_id
));
250 value
= enable
? i
: 0xffffffff;
252 /* Since we can be called while atomic (from "interrupt
253 * level") we need to issue the Set Parameters Commannd
254 * without sleeping (timeout < 0).
256 err
= t4_set_params_timeout(adap
, adap
->mbox
, adap
->pf
, 0, 1,
258 -FW_CMD_MAX_TIMEOUT
);
261 dev_err(adap
->pdev_dev
,
262 "Can't %s DCB Priority on port %d, TX Queue %d: err=%d\n",
263 enable
? "set" : "unset", pi
->port_id
, i
, -err
);
265 txq
->dcb_prio
= enable
? value
: 0;
269 int cxgb4_dcb_enabled(const struct net_device
*dev
)
271 struct port_info
*pi
= netdev_priv(dev
);
273 if (!pi
->dcb
.enabled
)
276 return ((pi
->dcb
.state
== CXGB4_DCB_STATE_FW_ALLSYNCED
) ||
277 (pi
->dcb
.state
== CXGB4_DCB_STATE_HOST
));
279 #endif /* CONFIG_CHELSIO_T4_DCB */
281 void t4_os_link_changed(struct adapter
*adapter
, int port_id
, int link_stat
)
283 struct net_device
*dev
= adapter
->port
[port_id
];
285 /* Skip changes from disabled ports. */
286 if (netif_running(dev
) && link_stat
!= netif_carrier_ok(dev
)) {
288 netif_carrier_on(dev
);
290 #ifdef CONFIG_CHELSIO_T4_DCB
291 if (cxgb4_dcb_enabled(dev
)) {
292 cxgb4_dcb_reset(dev
);
293 dcb_tx_queue_prio_enable(dev
, false);
295 #endif /* CONFIG_CHELSIO_T4_DCB */
296 netif_carrier_off(dev
);
303 void t4_os_portmod_changed(struct adapter
*adap
, int port_id
)
305 static const char *mod_str
[] = {
306 NULL
, "LR", "SR", "ER", "passive DA", "active DA", "LRM"
309 struct net_device
*dev
= adap
->port
[port_id
];
310 struct port_info
*pi
= netdev_priv(dev
);
312 if (pi
->mod_type
== FW_PORT_MOD_TYPE_NONE
)
313 netdev_info(dev
, "port module unplugged\n");
314 else if (pi
->mod_type
< ARRAY_SIZE(mod_str
))
315 netdev_info(dev
, "%s module inserted\n", mod_str
[pi
->mod_type
]);
316 else if (pi
->mod_type
== FW_PORT_MOD_TYPE_NOTSUPPORTED
)
317 netdev_info(dev
, "%s: unsupported port module inserted\n",
319 else if (pi
->mod_type
== FW_PORT_MOD_TYPE_UNKNOWN
)
320 netdev_info(dev
, "%s: unknown port module inserted\n",
322 else if (pi
->mod_type
== FW_PORT_MOD_TYPE_ERROR
)
323 netdev_info(dev
, "%s: transceiver module error\n", dev
->name
);
325 netdev_info(dev
, "%s: unknown module type %d inserted\n",
326 dev
->name
, pi
->mod_type
);
328 /* If the interface is running, then we'll need any "sticky" Link
329 * Parameters redone with a new Transceiver Module.
331 pi
->link_cfg
.redo_l1cfg
= netif_running(dev
);
334 int dbfifo_int_thresh
= 10; /* 10 == 640 entry threshold */
335 module_param(dbfifo_int_thresh
, int, 0644);
336 MODULE_PARM_DESC(dbfifo_int_thresh
, "doorbell fifo interrupt threshold");
339 * usecs to sleep while draining the dbfifo
341 static int dbfifo_drain_delay
= 1000;
342 module_param(dbfifo_drain_delay
, int, 0644);
343 MODULE_PARM_DESC(dbfifo_drain_delay
,
344 "usecs to sleep while draining the dbfifo");
346 static inline int cxgb4_set_addr_hash(struct port_info
*pi
)
348 struct adapter
*adap
= pi
->adapter
;
351 struct hash_mac_addr
*entry
;
353 /* Calculate the hash vector for the updated list and program it */
354 list_for_each_entry(entry
, &adap
->mac_hlist
, list
) {
355 ucast
|= is_unicast_ether_addr(entry
->addr
);
356 vec
|= (1ULL << hash_mac_addr(entry
->addr
));
358 return t4_set_addr_hash(adap
, adap
->mbox
, pi
->viid
, ucast
,
362 static int cxgb4_mac_sync(struct net_device
*netdev
, const u8
*mac_addr
)
364 struct port_info
*pi
= netdev_priv(netdev
);
365 struct adapter
*adap
= pi
->adapter
;
369 /* idx stores the index of allocated filters,
370 * its size should be modified based on the number of
371 * MAC addresses that we allocate filters for
376 bool ucast
= is_unicast_ether_addr(mac_addr
);
377 const u8
*maclist
[1] = {mac_addr
};
378 struct hash_mac_addr
*new_entry
;
380 ret
= cxgb4_alloc_mac_filt(adap
, pi
->viid
, free
, 1, maclist
,
381 idx
, ucast
? &uhash
: &mhash
, false);
384 /* if hash != 0, then add the addr to hash addr list
385 * so on the end we will calculate the hash for the
386 * list and program it
388 if (uhash
|| mhash
) {
389 new_entry
= kzalloc(sizeof(*new_entry
), GFP_ATOMIC
);
392 ether_addr_copy(new_entry
->addr
, mac_addr
);
393 list_add_tail(&new_entry
->list
, &adap
->mac_hlist
);
394 ret
= cxgb4_set_addr_hash(pi
);
397 return ret
< 0 ? ret
: 0;
400 static int cxgb4_mac_unsync(struct net_device
*netdev
, const u8
*mac_addr
)
402 struct port_info
*pi
= netdev_priv(netdev
);
403 struct adapter
*adap
= pi
->adapter
;
405 const u8
*maclist
[1] = {mac_addr
};
406 struct hash_mac_addr
*entry
, *tmp
;
408 /* If the MAC address to be removed is in the hash addr
409 * list, delete it from the list and update hash vector
411 list_for_each_entry_safe(entry
, tmp
, &adap
->mac_hlist
, list
) {
412 if (ether_addr_equal(entry
->addr
, mac_addr
)) {
413 list_del(&entry
->list
);
415 return cxgb4_set_addr_hash(pi
);
419 ret
= cxgb4_free_mac_filt(adap
, pi
->viid
, 1, maclist
, false);
420 return ret
< 0 ? -EINVAL
: 0;
424 * Set Rx properties of a port, such as promiscruity, address filters, and MTU.
425 * If @mtu is -1 it is left unchanged.
427 static int set_rxmode(struct net_device
*dev
, int mtu
, bool sleep_ok
)
429 struct port_info
*pi
= netdev_priv(dev
);
430 struct adapter
*adapter
= pi
->adapter
;
432 __dev_uc_sync(dev
, cxgb4_mac_sync
, cxgb4_mac_unsync
);
433 __dev_mc_sync(dev
, cxgb4_mac_sync
, cxgb4_mac_unsync
);
435 return t4_set_rxmode(adapter
, adapter
->mbox
, pi
->viid
, mtu
,
436 (dev
->flags
& IFF_PROMISC
) ? 1 : 0,
437 (dev
->flags
& IFF_ALLMULTI
) ? 1 : 0, 1, -1,
442 * cxgb4_change_mac - Update match filter for a MAC address.
445 * @tcam_idx: TCAM index of existing filter for old value of MAC address,
447 * @addr: the new MAC address value
448 * @persist: whether a new MAC allocation should be persistent
449 * @add_smt: if true also add the address to the HW SMT
451 * Modifies an MPS filter and sets it to the new MAC address if
452 * @tcam_idx >= 0, or adds the MAC address to a new filter if
453 * @tcam_idx < 0. In the latter case the address is added persistently
454 * if @persist is %true.
455 * Addresses are programmed to hash region, if tcam runs out of entries.
458 int cxgb4_change_mac(struct port_info
*pi
, unsigned int viid
,
459 int *tcam_idx
, const u8
*addr
, bool persist
,
462 struct adapter
*adapter
= pi
->adapter
;
463 struct hash_mac_addr
*entry
, *new_entry
;
466 ret
= t4_change_mac(adapter
, adapter
->mbox
, viid
,
467 *tcam_idx
, addr
, persist
, smt_idx
);
468 /* We ran out of TCAM entries. try programming hash region. */
469 if (ret
== -ENOMEM
) {
470 /* If the MAC address to be updated is in the hash addr
471 * list, update it from the list
473 list_for_each_entry(entry
, &adapter
->mac_hlist
, list
) {
474 if (entry
->iface_mac
) {
475 ether_addr_copy(entry
->addr
, addr
);
479 new_entry
= kzalloc(sizeof(*new_entry
), GFP_KERNEL
);
482 ether_addr_copy(new_entry
->addr
, addr
);
483 new_entry
->iface_mac
= true;
484 list_add_tail(&new_entry
->list
, &adapter
->mac_hlist
);
486 ret
= cxgb4_set_addr_hash(pi
);
487 } else if (ret
>= 0) {
496 * link_start - enable a port
497 * @dev: the port to enable
499 * Performs the MAC and PHY actions needed to enable a port.
501 static int link_start(struct net_device
*dev
)
504 struct port_info
*pi
= netdev_priv(dev
);
505 unsigned int mb
= pi
->adapter
->pf
;
508 * We do not set address filters and promiscuity here, the stack does
509 * that step explicitly.
511 ret
= t4_set_rxmode(pi
->adapter
, mb
, pi
->viid
, dev
->mtu
, -1, -1, -1,
512 !!(dev
->features
& NETIF_F_HW_VLAN_CTAG_RX
), true);
514 ret
= cxgb4_update_mac_filt(pi
, pi
->viid
, &pi
->xact_addr_filt
,
515 dev
->dev_addr
, true, &pi
->smt_idx
);
517 ret
= t4_link_l1cfg(pi
->adapter
, mb
, pi
->tx_chan
,
521 ret
= t4_enable_pi_params(pi
->adapter
, mb
, pi
, true,
522 true, CXGB4_DCB_ENABLED
);
529 #ifdef CONFIG_CHELSIO_T4_DCB
530 /* Handle a Data Center Bridging update message from the firmware. */
531 static void dcb_rpl(struct adapter
*adap
, const struct fw_port_cmd
*pcmd
)
533 int port
= FW_PORT_CMD_PORTID_G(ntohl(pcmd
->op_to_portid
));
534 struct net_device
*dev
= adap
->port
[adap
->chan_map
[port
]];
535 int old_dcb_enabled
= cxgb4_dcb_enabled(dev
);
538 cxgb4_dcb_handle_fw_update(adap
, pcmd
);
539 new_dcb_enabled
= cxgb4_dcb_enabled(dev
);
541 /* If the DCB has become enabled or disabled on the port then we're
542 * going to need to set up/tear down DCB Priority parameters for the
543 * TX Queues associated with the port.
545 if (new_dcb_enabled
!= old_dcb_enabled
)
546 dcb_tx_queue_prio_enable(dev
, new_dcb_enabled
);
548 #endif /* CONFIG_CHELSIO_T4_DCB */
550 /* Response queue handler for the FW event queue.
552 static int fwevtq_handler(struct sge_rspq
*q
, const __be64
*rsp
,
553 const struct pkt_gl
*gl
)
555 u8 opcode
= ((const struct rss_header
*)rsp
)->opcode
;
557 rsp
++; /* skip RSS header */
559 /* FW can send EGR_UPDATEs encapsulated in a CPL_FW4_MSG.
561 if (unlikely(opcode
== CPL_FW4_MSG
&&
562 ((const struct cpl_fw4_msg
*)rsp
)->type
== FW_TYPE_RSSCPL
)) {
564 opcode
= ((const struct rss_header
*)rsp
)->opcode
;
566 if (opcode
!= CPL_SGE_EGR_UPDATE
) {
567 dev_err(q
->adap
->pdev_dev
, "unexpected FW4/CPL %#x on FW event queue\n"
573 if (likely(opcode
== CPL_SGE_EGR_UPDATE
)) {
574 const struct cpl_sge_egr_update
*p
= (void *)rsp
;
575 unsigned int qid
= EGR_QID_G(ntohl(p
->opcode_qid
));
578 txq
= q
->adap
->sge
.egr_map
[qid
- q
->adap
->sge
.egr_start
];
580 if (txq
->q_type
== CXGB4_TXQ_ETH
) {
581 struct sge_eth_txq
*eq
;
583 eq
= container_of(txq
, struct sge_eth_txq
, q
);
584 t4_sge_eth_txq_egress_update(q
->adap
, eq
, -1);
586 struct sge_uld_txq
*oq
;
588 oq
= container_of(txq
, struct sge_uld_txq
, q
);
589 tasklet_schedule(&oq
->qresume_tsk
);
591 } else if (opcode
== CPL_FW6_MSG
|| opcode
== CPL_FW4_MSG
) {
592 const struct cpl_fw6_msg
*p
= (void *)rsp
;
594 #ifdef CONFIG_CHELSIO_T4_DCB
595 const struct fw_port_cmd
*pcmd
= (const void *)p
->data
;
596 unsigned int cmd
= FW_CMD_OP_G(ntohl(pcmd
->op_to_portid
));
597 unsigned int action
=
598 FW_PORT_CMD_ACTION_G(ntohl(pcmd
->action_to_len16
));
600 if (cmd
== FW_PORT_CMD
&&
601 (action
== FW_PORT_ACTION_GET_PORT_INFO
||
602 action
== FW_PORT_ACTION_GET_PORT_INFO32
)) {
603 int port
= FW_PORT_CMD_PORTID_G(
604 be32_to_cpu(pcmd
->op_to_portid
));
605 struct net_device
*dev
;
606 int dcbxdis
, state_input
;
608 dev
= q
->adap
->port
[q
->adap
->chan_map
[port
]];
609 dcbxdis
= (action
== FW_PORT_ACTION_GET_PORT_INFO
610 ? !!(pcmd
->u
.info
.dcbxdis_pkd
& FW_PORT_CMD_DCBXDIS_F
)
611 : !!(be32_to_cpu(pcmd
->u
.info32
.lstatus32_to_cbllen32
)
612 & FW_PORT_CMD_DCBXDIS32_F
));
613 state_input
= (dcbxdis
614 ? CXGB4_DCB_INPUT_FW_DISABLED
615 : CXGB4_DCB_INPUT_FW_ENABLED
);
617 cxgb4_dcb_state_fsm(dev
, state_input
);
620 if (cmd
== FW_PORT_CMD
&&
621 action
== FW_PORT_ACTION_L2_DCB_CFG
)
622 dcb_rpl(q
->adap
, pcmd
);
626 t4_handle_fw_rpl(q
->adap
, p
->data
);
627 } else if (opcode
== CPL_L2T_WRITE_RPL
) {
628 const struct cpl_l2t_write_rpl
*p
= (void *)rsp
;
630 do_l2t_write_rpl(q
->adap
, p
);
631 } else if (opcode
== CPL_SMT_WRITE_RPL
) {
632 const struct cpl_smt_write_rpl
*p
= (void *)rsp
;
634 do_smt_write_rpl(q
->adap
, p
);
635 } else if (opcode
== CPL_SET_TCB_RPL
) {
636 const struct cpl_set_tcb_rpl
*p
= (void *)rsp
;
638 filter_rpl(q
->adap
, p
);
639 } else if (opcode
== CPL_ACT_OPEN_RPL
) {
640 const struct cpl_act_open_rpl
*p
= (void *)rsp
;
642 hash_filter_rpl(q
->adap
, p
);
643 } else if (opcode
== CPL_ABORT_RPL_RSS
) {
644 const struct cpl_abort_rpl_rss
*p
= (void *)rsp
;
646 hash_del_filter_rpl(q
->adap
, p
);
647 } else if (opcode
== CPL_SRQ_TABLE_RPL
) {
648 const struct cpl_srq_table_rpl
*p
= (void *)rsp
;
650 do_srq_table_rpl(q
->adap
, p
);
652 dev_err(q
->adap
->pdev_dev
,
653 "unexpected CPL %#x on FW event queue\n", opcode
);
658 static void disable_msi(struct adapter
*adapter
)
660 if (adapter
->flags
& CXGB4_USING_MSIX
) {
661 pci_disable_msix(adapter
->pdev
);
662 adapter
->flags
&= ~CXGB4_USING_MSIX
;
663 } else if (adapter
->flags
& CXGB4_USING_MSI
) {
664 pci_disable_msi(adapter
->pdev
);
665 adapter
->flags
&= ~CXGB4_USING_MSI
;
670 * Interrupt handler for non-data events used with MSI-X.
672 static irqreturn_t
t4_nondata_intr(int irq
, void *cookie
)
674 struct adapter
*adap
= cookie
;
675 u32 v
= t4_read_reg(adap
, MYPF_REG(PL_PF_INT_CAUSE_A
));
679 t4_write_reg(adap
, MYPF_REG(PL_PF_INT_CAUSE_A
), v
);
681 if (adap
->flags
& CXGB4_MASTER_PF
)
682 t4_slow_intr_handler(adap
);
687 * Name the MSI-X interrupts.
689 static void name_msix_vecs(struct adapter
*adap
)
691 int i
, j
, msi_idx
= 2, n
= sizeof(adap
->msix_info
[0].desc
);
693 /* non-data interrupts */
694 snprintf(adap
->msix_info
[0].desc
, n
, "%s", adap
->port
[0]->name
);
697 snprintf(adap
->msix_info
[1].desc
, n
, "%s-FWeventq",
698 adap
->port
[0]->name
);
700 /* Ethernet queues */
701 for_each_port(adap
, j
) {
702 struct net_device
*d
= adap
->port
[j
];
703 const struct port_info
*pi
= netdev_priv(d
);
705 for (i
= 0; i
< pi
->nqsets
; i
++, msi_idx
++)
706 snprintf(adap
->msix_info
[msi_idx
].desc
, n
, "%s-Rx%d",
711 int cxgb4_set_msix_aff(struct adapter
*adap
, unsigned short vec
,
712 cpumask_var_t
*aff_mask
, int idx
)
716 if (!zalloc_cpumask_var(aff_mask
, GFP_KERNEL
)) {
717 dev_err(adap
->pdev_dev
, "alloc_cpumask_var failed\n");
721 cpumask_set_cpu(cpumask_local_spread(idx
, dev_to_node(adap
->pdev_dev
)),
724 rv
= irq_set_affinity_hint(vec
, *aff_mask
);
726 dev_warn(adap
->pdev_dev
,
727 "irq_set_affinity_hint %u failed %d\n",
733 void cxgb4_clear_msix_aff(unsigned short vec
, cpumask_var_t aff_mask
)
735 irq_set_affinity_hint(vec
, NULL
);
736 free_cpumask_var(aff_mask
);
739 static int request_msix_queue_irqs(struct adapter
*adap
)
741 struct sge
*s
= &adap
->sge
;
742 struct msix_info
*minfo
;
746 err
= request_irq(adap
->msix_info
[1].vec
, t4_sge_intr_msix
, 0,
747 adap
->msix_info
[1].desc
, &s
->fw_evtq
);
751 for_each_ethrxq(s
, ethqidx
) {
752 minfo
= &adap
->msix_info
[msi_index
];
753 err
= request_irq(minfo
->vec
,
756 &s
->ethrxq
[ethqidx
].rspq
);
760 cxgb4_set_msix_aff(adap
, minfo
->vec
,
761 &minfo
->aff_mask
, ethqidx
);
767 while (--ethqidx
>= 0) {
769 minfo
= &adap
->msix_info
[msi_index
];
770 cxgb4_clear_msix_aff(minfo
->vec
, minfo
->aff_mask
);
771 free_irq(minfo
->vec
, &s
->ethrxq
[ethqidx
].rspq
);
773 free_irq(adap
->msix_info
[1].vec
, &s
->fw_evtq
);
777 static void free_msix_queue_irqs(struct adapter
*adap
)
779 struct sge
*s
= &adap
->sge
;
780 struct msix_info
*minfo
;
781 int i
, msi_index
= 2;
783 free_irq(adap
->msix_info
[1].vec
, &s
->fw_evtq
);
784 for_each_ethrxq(s
, i
) {
785 minfo
= &adap
->msix_info
[msi_index
++];
786 cxgb4_clear_msix_aff(minfo
->vec
, minfo
->aff_mask
);
787 free_irq(minfo
->vec
, &s
->ethrxq
[i
].rspq
);
791 static int setup_ppod_edram(struct adapter
*adap
)
793 unsigned int param
, val
;
796 /* Driver sends FW_PARAMS_PARAM_DEV_PPOD_EDRAM read command to check
797 * if firmware supports ppod edram feature or not. If firmware
798 * returns 1, then driver can enable this feature by sending
799 * FW_PARAMS_PARAM_DEV_PPOD_EDRAM write command with value 1 to
800 * enable ppod edram feature.
802 param
= (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV
) |
803 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PPOD_EDRAM
));
805 ret
= t4_query_params(adap
, adap
->mbox
, adap
->pf
, 0, 1, ¶m
, &val
);
807 dev_warn(adap
->pdev_dev
,
808 "querying PPOD_EDRAM support failed: %d\n",
816 ret
= t4_set_params(adap
, adap
->mbox
, adap
->pf
, 0, 1, ¶m
, &val
);
818 dev_err(adap
->pdev_dev
,
819 "setting PPOD_EDRAM failed: %d\n", ret
);
826 * cxgb4_write_rss - write the RSS table for a given port
828 * @queues: array of queue indices for RSS
830 * Sets up the portion of the HW RSS table for the port's VI to distribute
831 * packets to the Rx queues in @queues.
832 * Should never be called before setting up sge eth rx queues
834 int cxgb4_write_rss(const struct port_info
*pi
, const u16
*queues
)
838 struct adapter
*adapter
= pi
->adapter
;
839 const struct sge_eth_rxq
*rxq
;
841 rxq
= &adapter
->sge
.ethrxq
[pi
->first_qset
];
842 rss
= kmalloc_array(pi
->rss_size
, sizeof(u16
), GFP_KERNEL
);
846 /* map the queue indices to queue ids */
847 for (i
= 0; i
< pi
->rss_size
; i
++, queues
++)
848 rss
[i
] = rxq
[*queues
].rspq
.abs_id
;
850 err
= t4_config_rss_range(adapter
, adapter
->pf
, pi
->viid
, 0,
851 pi
->rss_size
, rss
, pi
->rss_size
);
852 /* If Tunnel All Lookup isn't specified in the global RSS
853 * Configuration, then we need to specify a default Ingress
854 * Queue for any ingress packets which aren't hashed. We'll
855 * use our first ingress queue ...
858 err
= t4_config_vi_rss(adapter
, adapter
->mbox
, pi
->viid
,
859 FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F
|
860 FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F
|
861 FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F
|
862 FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F
|
863 FW_RSS_VI_CONFIG_CMD_UDPEN_F
,
870 * setup_rss - configure RSS
873 * Sets up RSS for each port.
875 static int setup_rss(struct adapter
*adap
)
879 for_each_port(adap
, i
) {
880 const struct port_info
*pi
= adap2pinfo(adap
, i
);
882 /* Fill default values with equal distribution */
883 for (j
= 0; j
< pi
->rss_size
; j
++)
884 pi
->rss
[j
] = j
% pi
->nqsets
;
886 err
= cxgb4_write_rss(pi
, pi
->rss
);
894 * Return the channel of the ingress queue with the given qid.
896 static unsigned int rxq_to_chan(const struct sge
*p
, unsigned int qid
)
898 qid
-= p
->ingr_start
;
899 return netdev2pinfo(p
->ingr_map
[qid
]->netdev
)->tx_chan
;
903 * Wait until all NAPI handlers are descheduled.
905 static void quiesce_rx(struct adapter
*adap
)
909 for (i
= 0; i
< adap
->sge
.ingr_sz
; i
++) {
910 struct sge_rspq
*q
= adap
->sge
.ingr_map
[i
];
913 napi_disable(&q
->napi
);
917 /* Disable interrupt and napi handler */
918 static void disable_interrupts(struct adapter
*adap
)
920 if (adap
->flags
& CXGB4_FULL_INIT_DONE
) {
921 t4_intr_disable(adap
);
922 if (adap
->flags
& CXGB4_USING_MSIX
) {
923 free_msix_queue_irqs(adap
);
924 free_irq(adap
->msix_info
[0].vec
, adap
);
926 free_irq(adap
->pdev
->irq
, adap
);
933 * Enable NAPI scheduling and interrupt generation for all Rx queues.
935 static void enable_rx(struct adapter
*adap
)
939 for (i
= 0; i
< adap
->sge
.ingr_sz
; i
++) {
940 struct sge_rspq
*q
= adap
->sge
.ingr_map
[i
];
945 napi_enable(&q
->napi
);
947 /* 0-increment GTS to start the timer and enable interrupts */
948 t4_write_reg(adap
, MYPF_REG(SGE_PF_GTS_A
),
949 SEINTARM_V(q
->intr_params
) |
950 INGRESSQID_V(q
->cntxt_id
));
955 static int setup_fw_sge_queues(struct adapter
*adap
)
957 struct sge
*s
= &adap
->sge
;
960 bitmap_zero(s
->starving_fl
, s
->egr_sz
);
961 bitmap_zero(s
->txq_maperr
, s
->egr_sz
);
963 if (adap
->flags
& CXGB4_USING_MSIX
)
964 adap
->msi_idx
= 1; /* vector 0 is for non-queue interrupts */
966 err
= t4_sge_alloc_rxq(adap
, &s
->intrq
, false, adap
->port
[0], 0,
967 NULL
, NULL
, NULL
, -1);
970 adap
->msi_idx
= -((int)s
->intrq
.abs_id
+ 1);
973 err
= t4_sge_alloc_rxq(adap
, &s
->fw_evtq
, true, adap
->port
[0],
974 adap
->msi_idx
, NULL
, fwevtq_handler
, NULL
, -1);
979 * setup_sge_queues - configure SGE Tx/Rx/response queues
982 * Determines how many sets of SGE queues to use and initializes them.
983 * We support multiple queue sets per port if we have MSI-X, otherwise
984 * just one queue set per port.
986 static int setup_sge_queues(struct adapter
*adap
)
989 struct sge
*s
= &adap
->sge
;
990 struct sge_uld_rxq_info
*rxq_info
= NULL
;
991 unsigned int cmplqid
= 0;
994 rxq_info
= s
->uld_rxq_info
[CXGB4_ULD_RDMA
];
996 for_each_port(adap
, i
) {
997 struct net_device
*dev
= adap
->port
[i
];
998 struct port_info
*pi
= netdev_priv(dev
);
999 struct sge_eth_rxq
*q
= &s
->ethrxq
[pi
->first_qset
];
1000 struct sge_eth_txq
*t
= &s
->ethtxq
[pi
->first_qset
];
1002 for (j
= 0; j
< pi
->nqsets
; j
++, q
++) {
1003 if (adap
->msi_idx
> 0)
1005 err
= t4_sge_alloc_rxq(adap
, &q
->rspq
, false, dev
,
1006 adap
->msi_idx
, &q
->fl
,
1009 t4_get_tp_ch_map(adap
,
1014 memset(&q
->stats
, 0, sizeof(q
->stats
));
1017 q
= &s
->ethrxq
[pi
->first_qset
];
1018 for (j
= 0; j
< pi
->nqsets
; j
++, t
++, q
++) {
1019 err
= t4_sge_alloc_eth_txq(adap
, t
, dev
,
1020 netdev_get_tx_queue(dev
, j
),
1022 !!(adap
->flags
& CXGB4_SGE_DBQ_TIMER
));
1028 for_each_port(adap
, i
) {
1029 /* Note that cmplqid below is 0 if we don't
1030 * have RDMA queues, and that's the right value.
1033 cmplqid
= rxq_info
->uldrxq
[i
].rspq
.cntxt_id
;
1035 err
= t4_sge_alloc_ctrl_txq(adap
, &s
->ctrlq
[i
], adap
->port
[i
],
1036 s
->fw_evtq
.cntxt_id
, cmplqid
);
1041 if (!is_t4(adap
->params
.chip
)) {
1042 err
= t4_sge_alloc_eth_txq(adap
, &s
->ptptxq
, adap
->port
[0],
1043 netdev_get_tx_queue(adap
->port
[0], 0)
1044 , s
->fw_evtq
.cntxt_id
, false);
1049 t4_write_reg(adap
, is_t4(adap
->params
.chip
) ?
1050 MPS_TRC_RSS_CONTROL_A
:
1051 MPS_T5_TRC_RSS_CONTROL_A
,
1052 RSSCONTROL_V(netdev2pinfo(adap
->port
[0])->tx_chan
) |
1053 QUEUENUMBER_V(s
->ethrxq
[0].rspq
.abs_id
));
1056 dev_err(adap
->pdev_dev
, "Can't allocate queues, err=%d\n", -err
);
1057 t4_free_sge_resources(adap
);
1061 static u16
cxgb_select_queue(struct net_device
*dev
, struct sk_buff
*skb
,
1062 struct net_device
*sb_dev
)
1066 #ifdef CONFIG_CHELSIO_T4_DCB
1067 /* If a Data Center Bridging has been successfully negotiated on this
1068 * link then we'll use the skb's priority to map it to a TX Queue.
1069 * The skb's priority is determined via the VLAN Tag Priority Code
1072 if (cxgb4_dcb_enabled(dev
) && !is_kdump_kernel()) {
1076 err
= vlan_get_tag(skb
, &vlan_tci
);
1077 if (unlikely(err
)) {
1078 if (net_ratelimit())
1080 "TX Packet without VLAN Tag on DCB Link\n");
1083 txq
= (vlan_tci
& VLAN_PRIO_MASK
) >> VLAN_PRIO_SHIFT
;
1084 #ifdef CONFIG_CHELSIO_T4_FCOE
1085 if (skb
->protocol
== htons(ETH_P_FCOE
))
1086 txq
= skb
->priority
& 0x7;
1087 #endif /* CONFIG_CHELSIO_T4_FCOE */
1091 #endif /* CONFIG_CHELSIO_T4_DCB */
1094 txq
= (skb_rx_queue_recorded(skb
)
1095 ? skb_get_rx_queue(skb
)
1096 : smp_processor_id());
1098 while (unlikely(txq
>= dev
->real_num_tx_queues
))
1099 txq
-= dev
->real_num_tx_queues
;
1104 return netdev_pick_tx(dev
, skb
, NULL
) % dev
->real_num_tx_queues
;
1107 static int closest_timer(const struct sge
*s
, int time
)
1109 int i
, delta
, match
= 0, min_delta
= INT_MAX
;
1111 for (i
= 0; i
< ARRAY_SIZE(s
->timer_val
); i
++) {
1112 delta
= time
- s
->timer_val
[i
];
1115 if (delta
< min_delta
) {
1123 static int closest_thres(const struct sge
*s
, int thres
)
1125 int i
, delta
, match
= 0, min_delta
= INT_MAX
;
1127 for (i
= 0; i
< ARRAY_SIZE(s
->counter_val
); i
++) {
1128 delta
= thres
- s
->counter_val
[i
];
1131 if (delta
< min_delta
) {
1140 * cxgb4_set_rspq_intr_params - set a queue's interrupt holdoff parameters
1142 * @us: the hold-off time in us, or 0 to disable timer
1143 * @cnt: the hold-off packet count, or 0 to disable counter
1145 * Sets an Rx queue's interrupt hold-off time and packet count. At least
1146 * one of the two needs to be enabled for the queue to generate interrupts.
1148 int cxgb4_set_rspq_intr_params(struct sge_rspq
*q
,
1149 unsigned int us
, unsigned int cnt
)
1151 struct adapter
*adap
= q
->adap
;
1153 if ((us
| cnt
) == 0)
1160 new_idx
= closest_thres(&adap
->sge
, cnt
);
1161 if (q
->desc
&& q
->pktcnt_idx
!= new_idx
) {
1162 /* the queue has already been created, update it */
1163 v
= FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ
) |
1164 FW_PARAMS_PARAM_X_V(
1165 FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH
) |
1166 FW_PARAMS_PARAM_YZ_V(q
->cntxt_id
);
1167 err
= t4_set_params(adap
, adap
->mbox
, adap
->pf
, 0, 1,
1172 q
->pktcnt_idx
= new_idx
;
1175 us
= us
== 0 ? 6 : closest_timer(&adap
->sge
, us
);
1176 q
->intr_params
= QINTR_TIMER_IDX_V(us
) | QINTR_CNT_EN_V(cnt
> 0);
1180 static int cxgb_set_features(struct net_device
*dev
, netdev_features_t features
)
1182 const struct port_info
*pi
= netdev_priv(dev
);
1183 netdev_features_t changed
= dev
->features
^ features
;
1186 if (!(changed
& NETIF_F_HW_VLAN_CTAG_RX
))
1189 err
= t4_set_rxmode(pi
->adapter
, pi
->adapter
->pf
, pi
->viid
, -1,
1191 !!(features
& NETIF_F_HW_VLAN_CTAG_RX
), true);
1193 dev
->features
= features
^ NETIF_F_HW_VLAN_CTAG_RX
;
1197 static int setup_debugfs(struct adapter
*adap
)
1199 if (IS_ERR_OR_NULL(adap
->debugfs_root
))
1202 #ifdef CONFIG_DEBUG_FS
1203 t4_setup_debugfs(adap
);
1209 * upper-layer driver support
1213 * Allocate an active-open TID and set it to the supplied value.
1215 int cxgb4_alloc_atid(struct tid_info
*t
, void *data
)
1219 spin_lock_bh(&t
->atid_lock
);
1221 union aopen_entry
*p
= t
->afree
;
1223 atid
= (p
- t
->atid_tab
) + t
->atid_base
;
1228 spin_unlock_bh(&t
->atid_lock
);
1231 EXPORT_SYMBOL(cxgb4_alloc_atid
);
1234 * Release an active-open TID.
1236 void cxgb4_free_atid(struct tid_info
*t
, unsigned int atid
)
1238 union aopen_entry
*p
= &t
->atid_tab
[atid
- t
->atid_base
];
1240 spin_lock_bh(&t
->atid_lock
);
1244 spin_unlock_bh(&t
->atid_lock
);
1246 EXPORT_SYMBOL(cxgb4_free_atid
);
1249 * Allocate a server TID and set it to the supplied value.
1251 int cxgb4_alloc_stid(struct tid_info
*t
, int family
, void *data
)
1255 spin_lock_bh(&t
->stid_lock
);
1256 if (family
== PF_INET
) {
1257 stid
= find_first_zero_bit(t
->stid_bmap
, t
->nstids
);
1258 if (stid
< t
->nstids
)
1259 __set_bit(stid
, t
->stid_bmap
);
1263 stid
= bitmap_find_free_region(t
->stid_bmap
, t
->nstids
, 1);
1268 t
->stid_tab
[stid
].data
= data
;
1269 stid
+= t
->stid_base
;
1270 /* IPv6 requires max of 520 bits or 16 cells in TCAM
1271 * This is equivalent to 4 TIDs. With CLIP enabled it
1274 if (family
== PF_INET6
) {
1275 t
->stids_in_use
+= 2;
1276 t
->v6_stids_in_use
+= 2;
1281 spin_unlock_bh(&t
->stid_lock
);
1284 EXPORT_SYMBOL(cxgb4_alloc_stid
);
1286 /* Allocate a server filter TID and set it to the supplied value.
1288 int cxgb4_alloc_sftid(struct tid_info
*t
, int family
, void *data
)
1292 spin_lock_bh(&t
->stid_lock
);
1293 if (family
== PF_INET
) {
1294 stid
= find_next_zero_bit(t
->stid_bmap
,
1295 t
->nstids
+ t
->nsftids
, t
->nstids
);
1296 if (stid
< (t
->nstids
+ t
->nsftids
))
1297 __set_bit(stid
, t
->stid_bmap
);
1304 t
->stid_tab
[stid
].data
= data
;
1306 stid
+= t
->sftid_base
;
1309 spin_unlock_bh(&t
->stid_lock
);
1312 EXPORT_SYMBOL(cxgb4_alloc_sftid
);
1314 /* Release a server TID.
1316 void cxgb4_free_stid(struct tid_info
*t
, unsigned int stid
, int family
)
1318 /* Is it a server filter TID? */
1319 if (t
->nsftids
&& (stid
>= t
->sftid_base
)) {
1320 stid
-= t
->sftid_base
;
1323 stid
-= t
->stid_base
;
1326 spin_lock_bh(&t
->stid_lock
);
1327 if (family
== PF_INET
)
1328 __clear_bit(stid
, t
->stid_bmap
);
1330 bitmap_release_region(t
->stid_bmap
, stid
, 1);
1331 t
->stid_tab
[stid
].data
= NULL
;
1332 if (stid
< t
->nstids
) {
1333 if (family
== PF_INET6
) {
1334 t
->stids_in_use
-= 2;
1335 t
->v6_stids_in_use
-= 2;
1343 spin_unlock_bh(&t
->stid_lock
);
1345 EXPORT_SYMBOL(cxgb4_free_stid
);
1348 * Populate a TID_RELEASE WR. Caller must properly size the skb.
1350 static void mk_tid_release(struct sk_buff
*skb
, unsigned int chan
,
1353 struct cpl_tid_release
*req
;
1355 set_wr_txq(skb
, CPL_PRIORITY_SETUP
, chan
);
1356 req
= __skb_put(skb
, sizeof(*req
));
1357 INIT_TP_WR(req
, tid
);
1358 OPCODE_TID(req
) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE
, tid
));
1362 * Queue a TID release request and if necessary schedule a work queue to
1365 static void cxgb4_queue_tid_release(struct tid_info
*t
, unsigned int chan
,
1368 void **p
= &t
->tid_tab
[tid
];
1369 struct adapter
*adap
= container_of(t
, struct adapter
, tids
);
1371 spin_lock_bh(&adap
->tid_release_lock
);
1372 *p
= adap
->tid_release_head
;
1373 /* Low 2 bits encode the Tx channel number */
1374 adap
->tid_release_head
= (void **)((uintptr_t)p
| chan
);
1375 if (!adap
->tid_release_task_busy
) {
1376 adap
->tid_release_task_busy
= true;
1377 queue_work(adap
->workq
, &adap
->tid_release_task
);
1379 spin_unlock_bh(&adap
->tid_release_lock
);
1383 * Process the list of pending TID release requests.
1385 static void process_tid_release_list(struct work_struct
*work
)
1387 struct sk_buff
*skb
;
1388 struct adapter
*adap
;
1390 adap
= container_of(work
, struct adapter
, tid_release_task
);
1392 spin_lock_bh(&adap
->tid_release_lock
);
1393 while (adap
->tid_release_head
) {
1394 void **p
= adap
->tid_release_head
;
1395 unsigned int chan
= (uintptr_t)p
& 3;
1396 p
= (void *)p
- chan
;
1398 adap
->tid_release_head
= *p
;
1400 spin_unlock_bh(&adap
->tid_release_lock
);
1402 while (!(skb
= alloc_skb(sizeof(struct cpl_tid_release
),
1404 schedule_timeout_uninterruptible(1);
1406 mk_tid_release(skb
, chan
, p
- adap
->tids
.tid_tab
);
1407 t4_ofld_send(adap
, skb
);
1408 spin_lock_bh(&adap
->tid_release_lock
);
1410 adap
->tid_release_task_busy
= false;
1411 spin_unlock_bh(&adap
->tid_release_lock
);
1415 * Release a TID and inform HW. If we are unable to allocate the release
1416 * message we defer to a work queue.
1418 void cxgb4_remove_tid(struct tid_info
*t
, unsigned int chan
, unsigned int tid
,
1419 unsigned short family
)
1421 struct sk_buff
*skb
;
1422 struct adapter
*adap
= container_of(t
, struct adapter
, tids
);
1424 WARN_ON(tid
>= t
->ntids
);
1426 if (t
->tid_tab
[tid
]) {
1427 t
->tid_tab
[tid
] = NULL
;
1428 atomic_dec(&t
->conns_in_use
);
1429 if (t
->hash_base
&& (tid
>= t
->hash_base
)) {
1430 if (family
== AF_INET6
)
1431 atomic_sub(2, &t
->hash_tids_in_use
);
1433 atomic_dec(&t
->hash_tids_in_use
);
1435 if (family
== AF_INET6
)
1436 atomic_sub(2, &t
->tids_in_use
);
1438 atomic_dec(&t
->tids_in_use
);
1442 skb
= alloc_skb(sizeof(struct cpl_tid_release
), GFP_ATOMIC
);
1444 mk_tid_release(skb
, chan
, tid
);
1445 t4_ofld_send(adap
, skb
);
1447 cxgb4_queue_tid_release(t
, chan
, tid
);
1449 EXPORT_SYMBOL(cxgb4_remove_tid
);
1452 * Allocate and initialize the TID tables. Returns 0 on success.
1454 static int tid_init(struct tid_info
*t
)
1456 struct adapter
*adap
= container_of(t
, struct adapter
, tids
);
1457 unsigned int max_ftids
= t
->nftids
+ t
->nsftids
;
1458 unsigned int natids
= t
->natids
;
1459 unsigned int stid_bmap_size
;
1460 unsigned int ftid_bmap_size
;
1463 stid_bmap_size
= BITS_TO_LONGS(t
->nstids
+ t
->nsftids
);
1464 ftid_bmap_size
= BITS_TO_LONGS(t
->nftids
);
1465 size
= t
->ntids
* sizeof(*t
->tid_tab
) +
1466 natids
* sizeof(*t
->atid_tab
) +
1467 t
->nstids
* sizeof(*t
->stid_tab
) +
1468 t
->nsftids
* sizeof(*t
->stid_tab
) +
1469 stid_bmap_size
* sizeof(long) +
1470 max_ftids
* sizeof(*t
->ftid_tab
) +
1471 ftid_bmap_size
* sizeof(long);
1473 t
->tid_tab
= kvzalloc(size
, GFP_KERNEL
);
1477 t
->atid_tab
= (union aopen_entry
*)&t
->tid_tab
[t
->ntids
];
1478 t
->stid_tab
= (struct serv_entry
*)&t
->atid_tab
[natids
];
1479 t
->stid_bmap
= (unsigned long *)&t
->stid_tab
[t
->nstids
+ t
->nsftids
];
1480 t
->ftid_tab
= (struct filter_entry
*)&t
->stid_bmap
[stid_bmap_size
];
1481 t
->ftid_bmap
= (unsigned long *)&t
->ftid_tab
[max_ftids
];
1482 spin_lock_init(&t
->stid_lock
);
1483 spin_lock_init(&t
->atid_lock
);
1484 spin_lock_init(&t
->ftid_lock
);
1486 t
->stids_in_use
= 0;
1487 t
->v6_stids_in_use
= 0;
1488 t
->sftids_in_use
= 0;
1490 t
->atids_in_use
= 0;
1491 atomic_set(&t
->tids_in_use
, 0);
1492 atomic_set(&t
->conns_in_use
, 0);
1493 atomic_set(&t
->hash_tids_in_use
, 0);
1495 /* Setup the free list for atid_tab and clear the stid bitmap. */
1498 t
->atid_tab
[natids
- 1].next
= &t
->atid_tab
[natids
];
1499 t
->afree
= t
->atid_tab
;
1502 if (is_offload(adap
)) {
1503 bitmap_zero(t
->stid_bmap
, t
->nstids
+ t
->nsftids
);
1504 /* Reserve stid 0 for T4/T5 adapters */
1505 if (!t
->stid_base
&&
1506 CHELSIO_CHIP_VERSION(adap
->params
.chip
) <= CHELSIO_T5
)
1507 __set_bit(0, t
->stid_bmap
);
1510 bitmap_zero(t
->ftid_bmap
, t
->nftids
);
1515 * cxgb4_create_server - create an IP server
1517 * @stid: the server TID
1518 * @sip: local IP address to bind server to
1519 * @sport: the server's TCP port
1520 * @queue: queue to direct messages from this server to
1522 * Create an IP server for the given port and address.
1523 * Returns <0 on error and one of the %NET_XMIT_* values on success.
1525 int cxgb4_create_server(const struct net_device
*dev
, unsigned int stid
,
1526 __be32 sip
, __be16 sport
, __be16 vlan
,
1530 struct sk_buff
*skb
;
1531 struct adapter
*adap
;
1532 struct cpl_pass_open_req
*req
;
1535 skb
= alloc_skb(sizeof(*req
), GFP_KERNEL
);
1539 adap
= netdev2adap(dev
);
1540 req
= __skb_put(skb
, sizeof(*req
));
1542 OPCODE_TID(req
) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ
, stid
));
1543 req
->local_port
= sport
;
1544 req
->peer_port
= htons(0);
1545 req
->local_ip
= sip
;
1546 req
->peer_ip
= htonl(0);
1547 chan
= rxq_to_chan(&adap
->sge
, queue
);
1548 req
->opt0
= cpu_to_be64(TX_CHAN_V(chan
));
1549 req
->opt1
= cpu_to_be64(CONN_POLICY_V(CPL_CONN_POLICY_ASK
) |
1550 SYN_RSS_ENABLE_F
| SYN_RSS_QUEUE_V(queue
));
1551 ret
= t4_mgmt_tx(adap
, skb
);
1552 return net_xmit_eval(ret
);
1554 EXPORT_SYMBOL(cxgb4_create_server
);
1556 /* cxgb4_create_server6 - create an IPv6 server
1558 * @stid: the server TID
1559 * @sip: local IPv6 address to bind server to
1560 * @sport: the server's TCP port
1561 * @queue: queue to direct messages from this server to
1563 * Create an IPv6 server for the given port and address.
1564 * Returns <0 on error and one of the %NET_XMIT_* values on success.
1566 int cxgb4_create_server6(const struct net_device
*dev
, unsigned int stid
,
1567 const struct in6_addr
*sip
, __be16 sport
,
1571 struct sk_buff
*skb
;
1572 struct adapter
*adap
;
1573 struct cpl_pass_open_req6
*req
;
1576 skb
= alloc_skb(sizeof(*req
), GFP_KERNEL
);
1580 adap
= netdev2adap(dev
);
1581 req
= __skb_put(skb
, sizeof(*req
));
1583 OPCODE_TID(req
) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ6
, stid
));
1584 req
->local_port
= sport
;
1585 req
->peer_port
= htons(0);
1586 req
->local_ip_hi
= *(__be64
*)(sip
->s6_addr
);
1587 req
->local_ip_lo
= *(__be64
*)(sip
->s6_addr
+ 8);
1588 req
->peer_ip_hi
= cpu_to_be64(0);
1589 req
->peer_ip_lo
= cpu_to_be64(0);
1590 chan
= rxq_to_chan(&adap
->sge
, queue
);
1591 req
->opt0
= cpu_to_be64(TX_CHAN_V(chan
));
1592 req
->opt1
= cpu_to_be64(CONN_POLICY_V(CPL_CONN_POLICY_ASK
) |
1593 SYN_RSS_ENABLE_F
| SYN_RSS_QUEUE_V(queue
));
1594 ret
= t4_mgmt_tx(adap
, skb
);
1595 return net_xmit_eval(ret
);
1597 EXPORT_SYMBOL(cxgb4_create_server6
);
1599 int cxgb4_remove_server(const struct net_device
*dev
, unsigned int stid
,
1600 unsigned int queue
, bool ipv6
)
1602 struct sk_buff
*skb
;
1603 struct adapter
*adap
;
1604 struct cpl_close_listsvr_req
*req
;
1607 adap
= netdev2adap(dev
);
1609 skb
= alloc_skb(sizeof(*req
), GFP_KERNEL
);
1613 req
= __skb_put(skb
, sizeof(*req
));
1615 OPCODE_TID(req
) = htonl(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ
, stid
));
1616 req
->reply_ctrl
= htons(NO_REPLY_V(0) | (ipv6
? LISTSVR_IPV6_V(1) :
1617 LISTSVR_IPV6_V(0)) | QUEUENO_V(queue
));
1618 ret
= t4_mgmt_tx(adap
, skb
);
1619 return net_xmit_eval(ret
);
1621 EXPORT_SYMBOL(cxgb4_remove_server
);
1624 * cxgb4_best_mtu - find the entry in the MTU table closest to an MTU
1625 * @mtus: the HW MTU table
1626 * @mtu: the target MTU
1627 * @idx: index of selected entry in the MTU table
1629 * Returns the index and the value in the HW MTU table that is closest to
1630 * but does not exceed @mtu, unless @mtu is smaller than any value in the
1631 * table, in which case that smallest available value is selected.
1633 unsigned int cxgb4_best_mtu(const unsigned short *mtus
, unsigned short mtu
,
1638 while (i
< NMTUS
- 1 && mtus
[i
+ 1] <= mtu
)
1644 EXPORT_SYMBOL(cxgb4_best_mtu
);
1647 * cxgb4_best_aligned_mtu - find best MTU, [hopefully] data size aligned
1648 * @mtus: the HW MTU table
1649 * @header_size: Header Size
1650 * @data_size_max: maximum Data Segment Size
1651 * @data_size_align: desired Data Segment Size Alignment (2^N)
1652 * @mtu_idxp: HW MTU Table Index return value pointer (possibly NULL)
1654 * Similar to cxgb4_best_mtu() but instead of searching the Hardware
1655 * MTU Table based solely on a Maximum MTU parameter, we break that
1656 * parameter up into a Header Size and Maximum Data Segment Size, and
1657 * provide a desired Data Segment Size Alignment. If we find an MTU in
1658 * the Hardware MTU Table which will result in a Data Segment Size with
1659 * the requested alignment _and_ that MTU isn't "too far" from the
1660 * closest MTU, then we'll return that rather than the closest MTU.
1662 unsigned int cxgb4_best_aligned_mtu(const unsigned short *mtus
,
1663 unsigned short header_size
,
1664 unsigned short data_size_max
,
1665 unsigned short data_size_align
,
1666 unsigned int *mtu_idxp
)
1668 unsigned short max_mtu
= header_size
+ data_size_max
;
1669 unsigned short data_size_align_mask
= data_size_align
- 1;
1670 int mtu_idx
, aligned_mtu_idx
;
1672 /* Scan the MTU Table till we find an MTU which is larger than our
1673 * Maximum MTU or we reach the end of the table. Along the way,
1674 * record the last MTU found, if any, which will result in a Data
1675 * Segment Length matching the requested alignment.
1677 for (mtu_idx
= 0, aligned_mtu_idx
= -1; mtu_idx
< NMTUS
; mtu_idx
++) {
1678 unsigned short data_size
= mtus
[mtu_idx
] - header_size
;
1680 /* If this MTU minus the Header Size would result in a
1681 * Data Segment Size of the desired alignment, remember it.
1683 if ((data_size
& data_size_align_mask
) == 0)
1684 aligned_mtu_idx
= mtu_idx
;
1686 /* If we're not at the end of the Hardware MTU Table and the
1687 * next element is larger than our Maximum MTU, drop out of
1690 if (mtu_idx
+1 < NMTUS
&& mtus
[mtu_idx
+1] > max_mtu
)
1694 /* If we fell out of the loop because we ran to the end of the table,
1695 * then we just have to use the last [largest] entry.
1697 if (mtu_idx
== NMTUS
)
1700 /* If we found an MTU which resulted in the requested Data Segment
1701 * Length alignment and that's "not far" from the largest MTU which is
1702 * less than or equal to the maximum MTU, then use that.
1704 if (aligned_mtu_idx
>= 0 &&
1705 mtu_idx
- aligned_mtu_idx
<= 1)
1706 mtu_idx
= aligned_mtu_idx
;
1708 /* If the caller has passed in an MTU Index pointer, pass the
1709 * MTU Index back. Return the MTU value.
1712 *mtu_idxp
= mtu_idx
;
1713 return mtus
[mtu_idx
];
1715 EXPORT_SYMBOL(cxgb4_best_aligned_mtu
);
1718 * cxgb4_port_chan - get the HW channel of a port
1719 * @dev: the net device for the port
1721 * Return the HW Tx channel of the given port.
1723 unsigned int cxgb4_port_chan(const struct net_device
*dev
)
1725 return netdev2pinfo(dev
)->tx_chan
;
1727 EXPORT_SYMBOL(cxgb4_port_chan
);
1730 * cxgb4_port_e2cchan - get the HW c-channel of a port
1731 * @dev: the net device for the port
1733 * Return the HW RX c-channel of the given port.
1735 unsigned int cxgb4_port_e2cchan(const struct net_device
*dev
)
1737 return netdev2pinfo(dev
)->rx_cchan
;
1739 EXPORT_SYMBOL(cxgb4_port_e2cchan
);
1741 unsigned int cxgb4_dbfifo_count(const struct net_device
*dev
, int lpfifo
)
1743 struct adapter
*adap
= netdev2adap(dev
);
1744 u32 v1
, v2
, lp_count
, hp_count
;
1746 v1
= t4_read_reg(adap
, SGE_DBFIFO_STATUS_A
);
1747 v2
= t4_read_reg(adap
, SGE_DBFIFO_STATUS2_A
);
1748 if (is_t4(adap
->params
.chip
)) {
1749 lp_count
= LP_COUNT_G(v1
);
1750 hp_count
= HP_COUNT_G(v1
);
1752 lp_count
= LP_COUNT_T5_G(v1
);
1753 hp_count
= HP_COUNT_T5_G(v2
);
1755 return lpfifo
? lp_count
: hp_count
;
1757 EXPORT_SYMBOL(cxgb4_dbfifo_count
);
1760 * cxgb4_port_viid - get the VI id of a port
1761 * @dev: the net device for the port
1763 * Return the VI id of the given port.
1765 unsigned int cxgb4_port_viid(const struct net_device
*dev
)
1767 return netdev2pinfo(dev
)->viid
;
1769 EXPORT_SYMBOL(cxgb4_port_viid
);
1772 * cxgb4_port_idx - get the index of a port
1773 * @dev: the net device for the port
1775 * Return the index of the given port.
1777 unsigned int cxgb4_port_idx(const struct net_device
*dev
)
1779 return netdev2pinfo(dev
)->port_id
;
1781 EXPORT_SYMBOL(cxgb4_port_idx
);
1783 void cxgb4_get_tcp_stats(struct pci_dev
*pdev
, struct tp_tcp_stats
*v4
,
1784 struct tp_tcp_stats
*v6
)
1786 struct adapter
*adap
= pci_get_drvdata(pdev
);
1788 spin_lock(&adap
->stats_lock
);
1789 t4_tp_get_tcp_stats(adap
, v4
, v6
, false);
1790 spin_unlock(&adap
->stats_lock
);
1792 EXPORT_SYMBOL(cxgb4_get_tcp_stats
);
1794 void cxgb4_iscsi_init(struct net_device
*dev
, unsigned int tag_mask
,
1795 const unsigned int *pgsz_order
)
1797 struct adapter
*adap
= netdev2adap(dev
);
1799 t4_write_reg(adap
, ULP_RX_ISCSI_TAGMASK_A
, tag_mask
);
1800 t4_write_reg(adap
, ULP_RX_ISCSI_PSZ_A
, HPZ0_V(pgsz_order
[0]) |
1801 HPZ1_V(pgsz_order
[1]) | HPZ2_V(pgsz_order
[2]) |
1802 HPZ3_V(pgsz_order
[3]));
1804 EXPORT_SYMBOL(cxgb4_iscsi_init
);
1806 int cxgb4_flush_eq_cache(struct net_device
*dev
)
1808 struct adapter
*adap
= netdev2adap(dev
);
1810 return t4_sge_ctxt_flush(adap
, adap
->mbox
, CTXT_EGRESS
);
1812 EXPORT_SYMBOL(cxgb4_flush_eq_cache
);
1814 static int read_eq_indices(struct adapter
*adap
, u16 qid
, u16
*pidx
, u16
*cidx
)
1816 u32 addr
= t4_read_reg(adap
, SGE_DBQ_CTXT_BADDR_A
) + 24 * qid
+ 8;
1820 spin_lock(&adap
->win0_lock
);
1821 ret
= t4_memory_rw(adap
, 0, MEM_EDC0
, addr
,
1822 sizeof(indices
), (__be32
*)&indices
,
1824 spin_unlock(&adap
->win0_lock
);
1826 *cidx
= (be64_to_cpu(indices
) >> 25) & 0xffff;
1827 *pidx
= (be64_to_cpu(indices
) >> 9) & 0xffff;
1832 int cxgb4_sync_txq_pidx(struct net_device
*dev
, u16 qid
, u16 pidx
,
1835 struct adapter
*adap
= netdev2adap(dev
);
1836 u16 hw_pidx
, hw_cidx
;
1839 ret
= read_eq_indices(adap
, qid
, &hw_pidx
, &hw_cidx
);
1843 if (pidx
!= hw_pidx
) {
1847 if (pidx
>= hw_pidx
)
1848 delta
= pidx
- hw_pidx
;
1850 delta
= size
- hw_pidx
+ pidx
;
1852 if (is_t4(adap
->params
.chip
))
1853 val
= PIDX_V(delta
);
1855 val
= PIDX_T5_V(delta
);
1857 t4_write_reg(adap
, MYPF_REG(SGE_PF_KDOORBELL_A
),
1863 EXPORT_SYMBOL(cxgb4_sync_txq_pidx
);
1865 int cxgb4_read_tpte(struct net_device
*dev
, u32 stag
, __be32
*tpte
)
1867 u32 edc0_size
, edc1_size
, mc0_size
, mc1_size
, size
;
1868 u32 edc0_end
, edc1_end
, mc0_end
, mc1_end
;
1869 u32 offset
, memtype
, memaddr
;
1870 struct adapter
*adap
;
1874 adap
= netdev2adap(dev
);
1876 offset
= ((stag
>> 8) * 32) + adap
->vres
.stag
.start
;
1878 /* Figure out where the offset lands in the Memory Type/Address scheme.
1879 * This code assumes that the memory is laid out starting at offset 0
1880 * with no breaks as: EDC0, EDC1, MC0, MC1. All cards have both EDC0
1881 * and EDC1. Some cards will have neither MC0 nor MC1, most cards have
1882 * MC0, and some have both MC0 and MC1.
1884 size
= t4_read_reg(adap
, MA_EDRAM0_BAR_A
);
1885 edc0_size
= EDRAM0_SIZE_G(size
) << 20;
1886 size
= t4_read_reg(adap
, MA_EDRAM1_BAR_A
);
1887 edc1_size
= EDRAM1_SIZE_G(size
) << 20;
1888 size
= t4_read_reg(adap
, MA_EXT_MEMORY0_BAR_A
);
1889 mc0_size
= EXT_MEM0_SIZE_G(size
) << 20;
1891 if (t4_read_reg(adap
, MA_TARGET_MEM_ENABLE_A
) & HMA_MUX_F
) {
1892 size
= t4_read_reg(adap
, MA_EXT_MEMORY1_BAR_A
);
1893 hma_size
= EXT_MEM1_SIZE_G(size
) << 20;
1895 edc0_end
= edc0_size
;
1896 edc1_end
= edc0_end
+ edc1_size
;
1897 mc0_end
= edc1_end
+ mc0_size
;
1899 if (offset
< edc0_end
) {
1902 } else if (offset
< edc1_end
) {
1904 memaddr
= offset
- edc0_end
;
1906 if (hma_size
&& (offset
< (edc1_end
+ hma_size
))) {
1908 memaddr
= offset
- edc1_end
;
1909 } else if (offset
< mc0_end
) {
1911 memaddr
= offset
- edc1_end
;
1912 } else if (is_t5(adap
->params
.chip
)) {
1913 size
= t4_read_reg(adap
, MA_EXT_MEMORY1_BAR_A
);
1914 mc1_size
= EXT_MEM1_SIZE_G(size
) << 20;
1915 mc1_end
= mc0_end
+ mc1_size
;
1916 if (offset
< mc1_end
) {
1918 memaddr
= offset
- mc0_end
;
1920 /* offset beyond the end of any memory */
1924 /* T4/T6 only has a single memory channel */
1929 spin_lock(&adap
->win0_lock
);
1930 ret
= t4_memory_rw(adap
, 0, memtype
, memaddr
, 32, tpte
, T4_MEMORY_READ
);
1931 spin_unlock(&adap
->win0_lock
);
1935 dev_err(adap
->pdev_dev
, "stag %#x, offset %#x out of range\n",
1939 EXPORT_SYMBOL(cxgb4_read_tpte
);
1941 u64
cxgb4_read_sge_timestamp(struct net_device
*dev
)
1944 struct adapter
*adap
;
1946 adap
= netdev2adap(dev
);
1947 lo
= t4_read_reg(adap
, SGE_TIMESTAMP_LO_A
);
1948 hi
= TSVAL_G(t4_read_reg(adap
, SGE_TIMESTAMP_HI_A
));
1950 return ((u64
)hi
<< 32) | (u64
)lo
;
1952 EXPORT_SYMBOL(cxgb4_read_sge_timestamp
);
1954 int cxgb4_bar2_sge_qregs(struct net_device
*dev
,
1956 enum cxgb4_bar2_qtype qtype
,
1959 unsigned int *pbar2_qid
)
1961 return t4_bar2_sge_qregs(netdev2adap(dev
),
1963 (qtype
== CXGB4_BAR2_QTYPE_EGRESS
1964 ? T4_BAR2_QTYPE_EGRESS
1965 : T4_BAR2_QTYPE_INGRESS
),
1970 EXPORT_SYMBOL(cxgb4_bar2_sge_qregs
);
1972 static struct pci_driver cxgb4_driver
;
1974 static void check_neigh_update(struct neighbour
*neigh
)
1976 const struct device
*parent
;
1977 const struct net_device
*netdev
= neigh
->dev
;
1979 if (is_vlan_dev(netdev
))
1980 netdev
= vlan_dev_real_dev(netdev
);
1981 parent
= netdev
->dev
.parent
;
1982 if (parent
&& parent
->driver
== &cxgb4_driver
.driver
)
1983 t4_l2t_update(dev_get_drvdata(parent
), neigh
);
1986 static int netevent_cb(struct notifier_block
*nb
, unsigned long event
,
1990 case NETEVENT_NEIGH_UPDATE
:
1991 check_neigh_update(data
);
1993 case NETEVENT_REDIRECT
:
2000 static bool netevent_registered
;
2001 static struct notifier_block cxgb4_netevent_nb
= {
2002 .notifier_call
= netevent_cb
2005 static void drain_db_fifo(struct adapter
*adap
, int usecs
)
2007 u32 v1
, v2
, lp_count
, hp_count
;
2010 v1
= t4_read_reg(adap
, SGE_DBFIFO_STATUS_A
);
2011 v2
= t4_read_reg(adap
, SGE_DBFIFO_STATUS2_A
);
2012 if (is_t4(adap
->params
.chip
)) {
2013 lp_count
= LP_COUNT_G(v1
);
2014 hp_count
= HP_COUNT_G(v1
);
2016 lp_count
= LP_COUNT_T5_G(v1
);
2017 hp_count
= HP_COUNT_T5_G(v2
);
2020 if (lp_count
== 0 && hp_count
== 0)
2022 set_current_state(TASK_UNINTERRUPTIBLE
);
2023 schedule_timeout(usecs_to_jiffies(usecs
));
2027 static void disable_txq_db(struct sge_txq
*q
)
2029 unsigned long flags
;
2031 spin_lock_irqsave(&q
->db_lock
, flags
);
2033 spin_unlock_irqrestore(&q
->db_lock
, flags
);
2036 static void enable_txq_db(struct adapter
*adap
, struct sge_txq
*q
)
2038 spin_lock_irq(&q
->db_lock
);
2039 if (q
->db_pidx_inc
) {
2040 /* Make sure that all writes to the TX descriptors
2041 * are committed before we tell HW about them.
2044 t4_write_reg(adap
, MYPF_REG(SGE_PF_KDOORBELL_A
),
2045 QID_V(q
->cntxt_id
) | PIDX_V(q
->db_pidx_inc
));
2049 spin_unlock_irq(&q
->db_lock
);
2052 static void disable_dbs(struct adapter
*adap
)
2056 for_each_ethrxq(&adap
->sge
, i
)
2057 disable_txq_db(&adap
->sge
.ethtxq
[i
].q
);
2058 if (is_offload(adap
)) {
2059 struct sge_uld_txq_info
*txq_info
=
2060 adap
->sge
.uld_txq_info
[CXGB4_TX_OFLD
];
2063 for_each_ofldtxq(&adap
->sge
, i
) {
2064 struct sge_uld_txq
*txq
= &txq_info
->uldtxq
[i
];
2066 disable_txq_db(&txq
->q
);
2070 for_each_port(adap
, i
)
2071 disable_txq_db(&adap
->sge
.ctrlq
[i
].q
);
2074 static void enable_dbs(struct adapter
*adap
)
2078 for_each_ethrxq(&adap
->sge
, i
)
2079 enable_txq_db(adap
, &adap
->sge
.ethtxq
[i
].q
);
2080 if (is_offload(adap
)) {
2081 struct sge_uld_txq_info
*txq_info
=
2082 adap
->sge
.uld_txq_info
[CXGB4_TX_OFLD
];
2085 for_each_ofldtxq(&adap
->sge
, i
) {
2086 struct sge_uld_txq
*txq
= &txq_info
->uldtxq
[i
];
2088 enable_txq_db(adap
, &txq
->q
);
2092 for_each_port(adap
, i
)
2093 enable_txq_db(adap
, &adap
->sge
.ctrlq
[i
].q
);
2096 static void notify_rdma_uld(struct adapter
*adap
, enum cxgb4_control cmd
)
2098 enum cxgb4_uld type
= CXGB4_ULD_RDMA
;
2100 if (adap
->uld
&& adap
->uld
[type
].handle
)
2101 adap
->uld
[type
].control(adap
->uld
[type
].handle
, cmd
);
2104 static void process_db_full(struct work_struct
*work
)
2106 struct adapter
*adap
;
2108 adap
= container_of(work
, struct adapter
, db_full_task
);
2110 drain_db_fifo(adap
, dbfifo_drain_delay
);
2112 notify_rdma_uld(adap
, CXGB4_CONTROL_DB_EMPTY
);
2113 if (CHELSIO_CHIP_VERSION(adap
->params
.chip
) <= CHELSIO_T5
)
2114 t4_set_reg_field(adap
, SGE_INT_ENABLE3_A
,
2115 DBFIFO_HP_INT_F
| DBFIFO_LP_INT_F
,
2116 DBFIFO_HP_INT_F
| DBFIFO_LP_INT_F
);
2118 t4_set_reg_field(adap
, SGE_INT_ENABLE3_A
,
2119 DBFIFO_LP_INT_F
, DBFIFO_LP_INT_F
);
2122 static void sync_txq_pidx(struct adapter
*adap
, struct sge_txq
*q
)
2124 u16 hw_pidx
, hw_cidx
;
2127 spin_lock_irq(&q
->db_lock
);
2128 ret
= read_eq_indices(adap
, (u16
)q
->cntxt_id
, &hw_pidx
, &hw_cidx
);
2131 if (q
->db_pidx
!= hw_pidx
) {
2135 if (q
->db_pidx
>= hw_pidx
)
2136 delta
= q
->db_pidx
- hw_pidx
;
2138 delta
= q
->size
- hw_pidx
+ q
->db_pidx
;
2140 if (is_t4(adap
->params
.chip
))
2141 val
= PIDX_V(delta
);
2143 val
= PIDX_T5_V(delta
);
2145 t4_write_reg(adap
, MYPF_REG(SGE_PF_KDOORBELL_A
),
2146 QID_V(q
->cntxt_id
) | val
);
2151 spin_unlock_irq(&q
->db_lock
);
2153 CH_WARN(adap
, "DB drop recovery failed.\n");
2156 static void recover_all_queues(struct adapter
*adap
)
2160 for_each_ethrxq(&adap
->sge
, i
)
2161 sync_txq_pidx(adap
, &adap
->sge
.ethtxq
[i
].q
);
2162 if (is_offload(adap
)) {
2163 struct sge_uld_txq_info
*txq_info
=
2164 adap
->sge
.uld_txq_info
[CXGB4_TX_OFLD
];
2166 for_each_ofldtxq(&adap
->sge
, i
) {
2167 struct sge_uld_txq
*txq
= &txq_info
->uldtxq
[i
];
2169 sync_txq_pidx(adap
, &txq
->q
);
2173 for_each_port(adap
, i
)
2174 sync_txq_pidx(adap
, &adap
->sge
.ctrlq
[i
].q
);
2177 static void process_db_drop(struct work_struct
*work
)
2179 struct adapter
*adap
;
2181 adap
= container_of(work
, struct adapter
, db_drop_task
);
2183 if (is_t4(adap
->params
.chip
)) {
2184 drain_db_fifo(adap
, dbfifo_drain_delay
);
2185 notify_rdma_uld(adap
, CXGB4_CONTROL_DB_DROP
);
2186 drain_db_fifo(adap
, dbfifo_drain_delay
);
2187 recover_all_queues(adap
);
2188 drain_db_fifo(adap
, dbfifo_drain_delay
);
2190 notify_rdma_uld(adap
, CXGB4_CONTROL_DB_EMPTY
);
2191 } else if (is_t5(adap
->params
.chip
)) {
2192 u32 dropped_db
= t4_read_reg(adap
, 0x010ac);
2193 u16 qid
= (dropped_db
>> 15) & 0x1ffff;
2194 u16 pidx_inc
= dropped_db
& 0x1fff;
2196 unsigned int bar2_qid
;
2199 ret
= t4_bar2_sge_qregs(adap
, qid
, T4_BAR2_QTYPE_EGRESS
,
2200 0, &bar2_qoffset
, &bar2_qid
);
2202 dev_err(adap
->pdev_dev
, "doorbell drop recovery: "
2203 "qid=%d, pidx_inc=%d\n", qid
, pidx_inc
);
2205 writel(PIDX_T5_V(pidx_inc
) | QID_V(bar2_qid
),
2206 adap
->bar2
+ bar2_qoffset
+ SGE_UDB_KDOORBELL
);
2208 /* Re-enable BAR2 WC */
2209 t4_set_reg_field(adap
, 0x10b0, 1<<15, 1<<15);
2212 if (CHELSIO_CHIP_VERSION(adap
->params
.chip
) <= CHELSIO_T5
)
2213 t4_set_reg_field(adap
, SGE_DOORBELL_CONTROL_A
, DROPPED_DB_F
, 0);
2216 void t4_db_full(struct adapter
*adap
)
2218 if (is_t4(adap
->params
.chip
)) {
2220 notify_rdma_uld(adap
, CXGB4_CONTROL_DB_FULL
);
2221 t4_set_reg_field(adap
, SGE_INT_ENABLE3_A
,
2222 DBFIFO_HP_INT_F
| DBFIFO_LP_INT_F
, 0);
2223 queue_work(adap
->workq
, &adap
->db_full_task
);
2227 void t4_db_dropped(struct adapter
*adap
)
2229 if (is_t4(adap
->params
.chip
)) {
2231 notify_rdma_uld(adap
, CXGB4_CONTROL_DB_FULL
);
2233 queue_work(adap
->workq
, &adap
->db_drop_task
);
2236 void t4_register_netevent_notifier(void)
2238 if (!netevent_registered
) {
2239 register_netevent_notifier(&cxgb4_netevent_nb
);
2240 netevent_registered
= true;
2244 static void detach_ulds(struct adapter
*adap
)
2248 mutex_lock(&uld_mutex
);
2249 list_del(&adap
->list_node
);
2251 for (i
= 0; i
< CXGB4_ULD_MAX
; i
++)
2252 if (adap
->uld
&& adap
->uld
[i
].handle
)
2253 adap
->uld
[i
].state_change(adap
->uld
[i
].handle
,
2254 CXGB4_STATE_DETACH
);
2256 if (netevent_registered
&& list_empty(&adapter_list
)) {
2257 unregister_netevent_notifier(&cxgb4_netevent_nb
);
2258 netevent_registered
= false;
2260 mutex_unlock(&uld_mutex
);
2263 static void notify_ulds(struct adapter
*adap
, enum cxgb4_state new_state
)
2267 mutex_lock(&uld_mutex
);
2268 for (i
= 0; i
< CXGB4_ULD_MAX
; i
++)
2269 if (adap
->uld
&& adap
->uld
[i
].handle
)
2270 adap
->uld
[i
].state_change(adap
->uld
[i
].handle
,
2272 mutex_unlock(&uld_mutex
);
2275 #if IS_ENABLED(CONFIG_IPV6)
2276 static int cxgb4_inet6addr_handler(struct notifier_block
*this,
2277 unsigned long event
, void *data
)
2279 struct inet6_ifaddr
*ifa
= data
;
2280 struct net_device
*event_dev
= ifa
->idev
->dev
;
2281 const struct device
*parent
= NULL
;
2282 #if IS_ENABLED(CONFIG_BONDING)
2283 struct adapter
*adap
;
2285 if (is_vlan_dev(event_dev
))
2286 event_dev
= vlan_dev_real_dev(event_dev
);
2287 #if IS_ENABLED(CONFIG_BONDING)
2288 if (event_dev
->flags
& IFF_MASTER
) {
2289 list_for_each_entry(adap
, &adapter_list
, list_node
) {
2292 cxgb4_clip_get(adap
->port
[0],
2293 (const u32
*)ifa
, 1);
2296 cxgb4_clip_release(adap
->port
[0],
2297 (const u32
*)ifa
, 1);
2308 parent
= event_dev
->dev
.parent
;
2310 if (parent
&& parent
->driver
== &cxgb4_driver
.driver
) {
2313 cxgb4_clip_get(event_dev
, (const u32
*)ifa
, 1);
2316 cxgb4_clip_release(event_dev
, (const u32
*)ifa
, 1);
2325 static bool inet6addr_registered
;
2326 static struct notifier_block cxgb4_inet6addr_notifier
= {
2327 .notifier_call
= cxgb4_inet6addr_handler
2330 static void update_clip(const struct adapter
*adap
)
2333 struct net_device
*dev
;
2338 for (i
= 0; i
< MAX_NPORTS
; i
++) {
2339 dev
= adap
->port
[i
];
2343 ret
= cxgb4_update_root_dev_clip(dev
);
2350 #endif /* IS_ENABLED(CONFIG_IPV6) */
2353 * cxgb_up - enable the adapter
2354 * @adap: adapter being enabled
2356 * Called when the first port is enabled, this function performs the
2357 * actions necessary to make an adapter operational, such as completing
2358 * the initialization of HW modules, and enabling interrupts.
2360 * Must be called with the rtnl lock held.
2362 static int cxgb_up(struct adapter
*adap
)
2366 mutex_lock(&uld_mutex
);
2367 err
= setup_sge_queues(adap
);
2370 err
= setup_rss(adap
);
2374 if (adap
->flags
& CXGB4_USING_MSIX
) {
2375 name_msix_vecs(adap
);
2376 err
= request_irq(adap
->msix_info
[0].vec
, t4_nondata_intr
, 0,
2377 adap
->msix_info
[0].desc
, adap
);
2380 err
= request_msix_queue_irqs(adap
);
2382 free_irq(adap
->msix_info
[0].vec
, adap
);
2386 err
= request_irq(adap
->pdev
->irq
, t4_intr_handler(adap
),
2387 (adap
->flags
& CXGB4_USING_MSI
) ? 0
2389 adap
->port
[0]->name
, adap
);
2396 t4_intr_enable(adap
);
2397 adap
->flags
|= CXGB4_FULL_INIT_DONE
;
2398 mutex_unlock(&uld_mutex
);
2400 notify_ulds(adap
, CXGB4_STATE_UP
);
2401 #if IS_ENABLED(CONFIG_IPV6)
2407 dev_err(adap
->pdev_dev
, "request_irq failed, err %d\n", err
);
2409 t4_free_sge_resources(adap
);
2411 mutex_unlock(&uld_mutex
);
2415 static void cxgb_down(struct adapter
*adapter
)
2417 cancel_work_sync(&adapter
->tid_release_task
);
2418 cancel_work_sync(&adapter
->db_full_task
);
2419 cancel_work_sync(&adapter
->db_drop_task
);
2420 adapter
->tid_release_task_busy
= false;
2421 adapter
->tid_release_head
= NULL
;
2423 t4_sge_stop(adapter
);
2424 t4_free_sge_resources(adapter
);
2426 adapter
->flags
&= ~CXGB4_FULL_INIT_DONE
;
2430 * net_device operations
2432 static int cxgb_open(struct net_device
*dev
)
2435 struct port_info
*pi
= netdev_priv(dev
);
2436 struct adapter
*adapter
= pi
->adapter
;
2438 netif_carrier_off(dev
);
2440 if (!(adapter
->flags
& CXGB4_FULL_INIT_DONE
)) {
2441 err
= cxgb_up(adapter
);
2446 /* It's possible that the basic port information could have
2447 * changed since we first read it.
2449 err
= t4_update_port_info(pi
);
2453 err
= link_start(dev
);
2455 netif_tx_start_all_queues(dev
);
2459 static int cxgb_close(struct net_device
*dev
)
2461 struct port_info
*pi
= netdev_priv(dev
);
2462 struct adapter
*adapter
= pi
->adapter
;
2465 netif_tx_stop_all_queues(dev
);
2466 netif_carrier_off(dev
);
2467 ret
= t4_enable_pi_params(adapter
, adapter
->pf
, pi
,
2468 false, false, false);
2469 #ifdef CONFIG_CHELSIO_T4_DCB
2470 cxgb4_dcb_reset(dev
);
2471 dcb_tx_queue_prio_enable(dev
, false);
2476 int cxgb4_create_server_filter(const struct net_device
*dev
, unsigned int stid
,
2477 __be32 sip
, __be16 sport
, __be16 vlan
,
2478 unsigned int queue
, unsigned char port
, unsigned char mask
)
2481 struct filter_entry
*f
;
2482 struct adapter
*adap
;
2486 adap
= netdev2adap(dev
);
2488 /* Adjust stid to correct filter index */
2489 stid
-= adap
->tids
.sftid_base
;
2490 stid
+= adap
->tids
.nftids
;
2492 /* Check to make sure the filter requested is writable ...
2494 f
= &adap
->tids
.ftid_tab
[stid
];
2495 ret
= writable_filter(f
);
2499 /* Clear out any old resources being used by the filter before
2500 * we start constructing the new filter.
2503 clear_filter(adap
, f
);
2505 /* Clear out filter specifications */
2506 memset(&f
->fs
, 0, sizeof(struct ch_filter_specification
));
2507 f
->fs
.val
.lport
= cpu_to_be16(sport
);
2508 f
->fs
.mask
.lport
= ~0;
2510 if ((val
[0] | val
[1] | val
[2] | val
[3]) != 0) {
2511 for (i
= 0; i
< 4; i
++) {
2512 f
->fs
.val
.lip
[i
] = val
[i
];
2513 f
->fs
.mask
.lip
[i
] = ~0;
2515 if (adap
->params
.tp
.vlan_pri_map
& PORT_F
) {
2516 f
->fs
.val
.iport
= port
;
2517 f
->fs
.mask
.iport
= mask
;
2521 if (adap
->params
.tp
.vlan_pri_map
& PROTOCOL_F
) {
2522 f
->fs
.val
.proto
= IPPROTO_TCP
;
2523 f
->fs
.mask
.proto
= ~0;
2528 /* Mark filter as locked */
2532 /* Save the actual tid. We need this to get the corresponding
2533 * filter entry structure in filter_rpl.
2535 f
->tid
= stid
+ adap
->tids
.ftid_base
;
2536 ret
= set_filter_wr(adap
, stid
);
2538 clear_filter(adap
, f
);
2544 EXPORT_SYMBOL(cxgb4_create_server_filter
);
2546 int cxgb4_remove_server_filter(const struct net_device
*dev
, unsigned int stid
,
2547 unsigned int queue
, bool ipv6
)
2549 struct filter_entry
*f
;
2550 struct adapter
*adap
;
2552 adap
= netdev2adap(dev
);
2554 /* Adjust stid to correct filter index */
2555 stid
-= adap
->tids
.sftid_base
;
2556 stid
+= adap
->tids
.nftids
;
2558 f
= &adap
->tids
.ftid_tab
[stid
];
2559 /* Unlock the filter */
2562 return delete_filter(adap
, stid
);
2564 EXPORT_SYMBOL(cxgb4_remove_server_filter
);
2566 static void cxgb_get_stats(struct net_device
*dev
,
2567 struct rtnl_link_stats64
*ns
)
2569 struct port_stats stats
;
2570 struct port_info
*p
= netdev_priv(dev
);
2571 struct adapter
*adapter
= p
->adapter
;
2573 /* Block retrieving statistics during EEH error
2574 * recovery. Otherwise, the recovery might fail
2575 * and the PCI device will be removed permanently
2577 spin_lock(&adapter
->stats_lock
);
2578 if (!netif_device_present(dev
)) {
2579 spin_unlock(&adapter
->stats_lock
);
2582 t4_get_port_stats_offset(adapter
, p
->tx_chan
, &stats
,
2584 spin_unlock(&adapter
->stats_lock
);
2586 ns
->tx_bytes
= stats
.tx_octets
;
2587 ns
->tx_packets
= stats
.tx_frames
;
2588 ns
->rx_bytes
= stats
.rx_octets
;
2589 ns
->rx_packets
= stats
.rx_frames
;
2590 ns
->multicast
= stats
.rx_mcast_frames
;
2592 /* detailed rx_errors */
2593 ns
->rx_length_errors
= stats
.rx_jabber
+ stats
.rx_too_long
+
2595 ns
->rx_over_errors
= 0;
2596 ns
->rx_crc_errors
= stats
.rx_fcs_err
;
2597 ns
->rx_frame_errors
= stats
.rx_symbol_err
;
2598 ns
->rx_dropped
= stats
.rx_ovflow0
+ stats
.rx_ovflow1
+
2599 stats
.rx_ovflow2
+ stats
.rx_ovflow3
+
2600 stats
.rx_trunc0
+ stats
.rx_trunc1
+
2601 stats
.rx_trunc2
+ stats
.rx_trunc3
;
2602 ns
->rx_missed_errors
= 0;
2604 /* detailed tx_errors */
2605 ns
->tx_aborted_errors
= 0;
2606 ns
->tx_carrier_errors
= 0;
2607 ns
->tx_fifo_errors
= 0;
2608 ns
->tx_heartbeat_errors
= 0;
2609 ns
->tx_window_errors
= 0;
2611 ns
->tx_errors
= stats
.tx_error_frames
;
2612 ns
->rx_errors
= stats
.rx_symbol_err
+ stats
.rx_fcs_err
+
2613 ns
->rx_length_errors
+ stats
.rx_len_err
+ ns
->rx_fifo_errors
;
2616 static int cxgb_ioctl(struct net_device
*dev
, struct ifreq
*req
, int cmd
)
2619 int ret
= 0, prtad
, devad
;
2620 struct port_info
*pi
= netdev_priv(dev
);
2621 struct adapter
*adapter
= pi
->adapter
;
2622 struct mii_ioctl_data
*data
= (struct mii_ioctl_data
*)&req
->ifr_data
;
2626 if (pi
->mdio_addr
< 0)
2628 data
->phy_id
= pi
->mdio_addr
;
2632 if (mdio_phy_id_is_c45(data
->phy_id
)) {
2633 prtad
= mdio_phy_id_prtad(data
->phy_id
);
2634 devad
= mdio_phy_id_devad(data
->phy_id
);
2635 } else if (data
->phy_id
< 32) {
2636 prtad
= data
->phy_id
;
2638 data
->reg_num
&= 0x1f;
2642 mbox
= pi
->adapter
->pf
;
2643 if (cmd
== SIOCGMIIREG
)
2644 ret
= t4_mdio_rd(pi
->adapter
, mbox
, prtad
, devad
,
2645 data
->reg_num
, &data
->val_out
);
2647 ret
= t4_mdio_wr(pi
->adapter
, mbox
, prtad
, devad
,
2648 data
->reg_num
, data
->val_in
);
2651 return copy_to_user(req
->ifr_data
, &pi
->tstamp_config
,
2652 sizeof(pi
->tstamp_config
)) ?
2655 if (copy_from_user(&pi
->tstamp_config
, req
->ifr_data
,
2656 sizeof(pi
->tstamp_config
)))
2659 if (!is_t4(adapter
->params
.chip
)) {
2660 switch (pi
->tstamp_config
.tx_type
) {
2661 case HWTSTAMP_TX_OFF
:
2662 case HWTSTAMP_TX_ON
:
2668 switch (pi
->tstamp_config
.rx_filter
) {
2669 case HWTSTAMP_FILTER_NONE
:
2670 pi
->rxtstamp
= false;
2672 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT
:
2673 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT
:
2674 cxgb4_ptprx_timestamping(pi
, pi
->port_id
,
2677 case HWTSTAMP_FILTER_PTP_V2_EVENT
:
2678 cxgb4_ptprx_timestamping(pi
, pi
->port_id
,
2681 case HWTSTAMP_FILTER_ALL
:
2682 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC
:
2683 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ
:
2684 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC
:
2685 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ
:
2686 pi
->rxtstamp
= true;
2689 pi
->tstamp_config
.rx_filter
=
2690 HWTSTAMP_FILTER_NONE
;
2694 if ((pi
->tstamp_config
.tx_type
== HWTSTAMP_TX_OFF
) &&
2695 (pi
->tstamp_config
.rx_filter
==
2696 HWTSTAMP_FILTER_NONE
)) {
2697 if (cxgb4_ptp_txtype(adapter
, pi
->port_id
) >= 0)
2698 pi
->ptp_enable
= false;
2701 if (pi
->tstamp_config
.rx_filter
!=
2702 HWTSTAMP_FILTER_NONE
) {
2703 if (cxgb4_ptp_redirect_rx_packet(adapter
,
2705 pi
->ptp_enable
= true;
2708 /* For T4 Adapters */
2709 switch (pi
->tstamp_config
.rx_filter
) {
2710 case HWTSTAMP_FILTER_NONE
:
2711 pi
->rxtstamp
= false;
2713 case HWTSTAMP_FILTER_ALL
:
2714 pi
->rxtstamp
= true;
2717 pi
->tstamp_config
.rx_filter
=
2718 HWTSTAMP_FILTER_NONE
;
2722 return copy_to_user(req
->ifr_data
, &pi
->tstamp_config
,
2723 sizeof(pi
->tstamp_config
)) ?
2731 static void cxgb_set_rxmode(struct net_device
*dev
)
2733 /* unfortunately we can't return errors to the stack */
2734 set_rxmode(dev
, -1, false);
2737 static int cxgb_change_mtu(struct net_device
*dev
, int new_mtu
)
2740 struct port_info
*pi
= netdev_priv(dev
);
2742 ret
= t4_set_rxmode(pi
->adapter
, pi
->adapter
->pf
, pi
->viid
, new_mtu
, -1,
2749 #ifdef CONFIG_PCI_IOV
2750 static int cxgb4_mgmt_open(struct net_device
*dev
)
2752 /* Turn carrier off since we don't have to transmit anything on this
2755 netif_carrier_off(dev
);
2759 /* Fill MAC address that will be assigned by the FW */
2760 static void cxgb4_mgmt_fill_vf_station_mac_addr(struct adapter
*adap
)
2762 u8 hw_addr
[ETH_ALEN
], macaddr
[ETH_ALEN
];
2763 unsigned int i
, vf
, nvfs
;
2768 adap
->params
.pci
.vpd_cap_addr
= pci_find_capability(adap
->pdev
,
2770 err
= t4_get_raw_vpd_params(adap
, &adap
->params
.vpd
);
2774 na
= adap
->params
.vpd
.na
;
2775 for (i
= 0; i
< ETH_ALEN
; i
++)
2776 hw_addr
[i
] = (hex2val(na
[2 * i
+ 0]) * 16 +
2777 hex2val(na
[2 * i
+ 1]));
2779 a
= (hw_addr
[0] << 8) | hw_addr
[1];
2780 b
= (hw_addr
[1] << 8) | hw_addr
[2];
2782 a
|= 0x0200; /* locally assigned Ethernet MAC address */
2783 a
&= ~0x0100; /* not a multicast Ethernet MAC address */
2784 macaddr
[0] = a
>> 8;
2785 macaddr
[1] = a
& 0xff;
2787 for (i
= 2; i
< 5; i
++)
2788 macaddr
[i
] = hw_addr
[i
+ 1];
2790 for (vf
= 0, nvfs
= pci_sriov_get_totalvfs(adap
->pdev
);
2792 macaddr
[5] = adap
->pf
* nvfs
+ vf
;
2793 ether_addr_copy(adap
->vfinfo
[vf
].vf_mac_addr
, macaddr
);
2797 static int cxgb4_mgmt_set_vf_mac(struct net_device
*dev
, int vf
, u8
*mac
)
2799 struct port_info
*pi
= netdev_priv(dev
);
2800 struct adapter
*adap
= pi
->adapter
;
2803 /* verify MAC addr is valid */
2804 if (!is_valid_ether_addr(mac
)) {
2805 dev_err(pi
->adapter
->pdev_dev
,
2806 "Invalid Ethernet address %pM for VF %d\n",
2811 dev_info(pi
->adapter
->pdev_dev
,
2812 "Setting MAC %pM on VF %d\n", mac
, vf
);
2813 ret
= t4_set_vf_mac_acl(adap
, vf
+ 1, 1, mac
);
2815 ether_addr_copy(adap
->vfinfo
[vf
].vf_mac_addr
, mac
);
2819 static int cxgb4_mgmt_get_vf_config(struct net_device
*dev
,
2820 int vf
, struct ifla_vf_info
*ivi
)
2822 struct port_info
*pi
= netdev_priv(dev
);
2823 struct adapter
*adap
= pi
->adapter
;
2824 struct vf_info
*vfinfo
;
2826 if (vf
>= adap
->num_vfs
)
2828 vfinfo
= &adap
->vfinfo
[vf
];
2831 ivi
->max_tx_rate
= vfinfo
->tx_rate
;
2832 ivi
->min_tx_rate
= 0;
2833 ether_addr_copy(ivi
->mac
, vfinfo
->vf_mac_addr
);
2834 ivi
->vlan
= vfinfo
->vlan
;
2835 ivi
->linkstate
= vfinfo
->link_state
;
2839 static int cxgb4_mgmt_get_phys_port_id(struct net_device
*dev
,
2840 struct netdev_phys_item_id
*ppid
)
2842 struct port_info
*pi
= netdev_priv(dev
);
2843 unsigned int phy_port_id
;
2845 phy_port_id
= pi
->adapter
->adap_idx
* 10 + pi
->port_id
;
2846 ppid
->id_len
= sizeof(phy_port_id
);
2847 memcpy(ppid
->id
, &phy_port_id
, ppid
->id_len
);
2851 static int cxgb4_mgmt_set_vf_rate(struct net_device
*dev
, int vf
,
2852 int min_tx_rate
, int max_tx_rate
)
2854 struct port_info
*pi
= netdev_priv(dev
);
2855 struct adapter
*adap
= pi
->adapter
;
2856 unsigned int link_ok
, speed
, mtu
;
2857 u32 fw_pfvf
, fw_class
;
2862 if (vf
>= adap
->num_vfs
)
2866 dev_err(adap
->pdev_dev
,
2867 "Min tx rate (%d) (> 0) for VF %d is Invalid.\n",
2872 if (max_tx_rate
== 0) {
2873 /* unbind VF to to any Traffic Class */
2875 (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF
) |
2876 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_SCHEDCLASS_ETH
));
2877 fw_class
= 0xffffffff;
2878 ret
= t4_set_params(adap
, adap
->mbox
, adap
->pf
, vf
+ 1, 1,
2879 &fw_pfvf
, &fw_class
);
2881 dev_err(adap
->pdev_dev
,
2882 "Err %d in unbinding PF %d VF %d from TX Rate Limiting\n",
2886 dev_info(adap
->pdev_dev
,
2887 "PF %d VF %d is unbound from TX Rate Limiting\n",
2889 adap
->vfinfo
[vf
].tx_rate
= 0;
2893 ret
= t4_get_link_params(pi
, &link_ok
, &speed
, &mtu
);
2894 if (ret
!= FW_SUCCESS
) {
2895 dev_err(adap
->pdev_dev
,
2896 "Failed to get link information for VF %d\n", vf
);
2901 dev_err(adap
->pdev_dev
, "Link down for VF %d\n", vf
);
2905 if (max_tx_rate
> speed
) {
2906 dev_err(adap
->pdev_dev
,
2907 "Max tx rate %d for VF %d can't be > link-speed %u",
2908 max_tx_rate
, vf
, speed
);
2913 /* subtract ethhdr size and 4 bytes crc since, f/w appends it */
2914 pktsize
= pktsize
- sizeof(struct ethhdr
) - 4;
2915 /* subtract ipv4 hdr size, tcp hdr size to get typical IPv4 MSS size */
2916 pktsize
= pktsize
- sizeof(struct iphdr
) - sizeof(struct tcphdr
);
2917 /* configure Traffic Class for rate-limiting */
2918 ret
= t4_sched_params(adap
, SCHED_CLASS_TYPE_PACKET
,
2919 SCHED_CLASS_LEVEL_CL_RL
,
2920 SCHED_CLASS_MODE_CLASS
,
2921 SCHED_CLASS_RATEUNIT_BITS
,
2922 SCHED_CLASS_RATEMODE_ABS
,
2923 pi
->tx_chan
, class_id
, 0,
2924 max_tx_rate
* 1000, 0, pktsize
);
2926 dev_err(adap
->pdev_dev
, "Err %d for Traffic Class config\n",
2930 dev_info(adap
->pdev_dev
,
2931 "Class %d with MSS %u configured with rate %u\n",
2932 class_id
, pktsize
, max_tx_rate
);
2934 /* bind VF to configured Traffic Class */
2935 fw_pfvf
= (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF
) |
2936 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_SCHEDCLASS_ETH
));
2937 fw_class
= class_id
;
2938 ret
= t4_set_params(adap
, adap
->mbox
, adap
->pf
, vf
+ 1, 1, &fw_pfvf
,
2941 dev_err(adap
->pdev_dev
,
2942 "Err %d in binding PF %d VF %d to Traffic Class %d\n",
2943 ret
, adap
->pf
, vf
, class_id
);
2946 dev_info(adap
->pdev_dev
, "PF %d VF %d is bound to Class %d\n",
2947 adap
->pf
, vf
, class_id
);
2948 adap
->vfinfo
[vf
].tx_rate
= max_tx_rate
;
2952 static int cxgb4_mgmt_set_vf_vlan(struct net_device
*dev
, int vf
,
2953 u16 vlan
, u8 qos
, __be16 vlan_proto
)
2955 struct port_info
*pi
= netdev_priv(dev
);
2956 struct adapter
*adap
= pi
->adapter
;
2959 if (vf
>= adap
->num_vfs
|| vlan
> 4095 || qos
> 7)
2962 if (vlan_proto
!= htons(ETH_P_8021Q
) || qos
!= 0)
2963 return -EPROTONOSUPPORT
;
2965 ret
= t4_set_vlan_acl(adap
, adap
->mbox
, vf
+ 1, vlan
);
2967 adap
->vfinfo
[vf
].vlan
= vlan
;
2971 dev_err(adap
->pdev_dev
, "Err %d %s VLAN ACL for PF/VF %d/%d\n",
2972 ret
, (vlan
? "setting" : "clearing"), adap
->pf
, vf
);
2976 static int cxgb4_mgmt_set_vf_link_state(struct net_device
*dev
, int vf
,
2979 struct port_info
*pi
= netdev_priv(dev
);
2980 struct adapter
*adap
= pi
->adapter
;
2984 if (vf
>= adap
->num_vfs
)
2988 case IFLA_VF_LINK_STATE_AUTO
:
2989 val
= FW_VF_LINK_STATE_AUTO
;
2992 case IFLA_VF_LINK_STATE_ENABLE
:
2993 val
= FW_VF_LINK_STATE_ENABLE
;
2996 case IFLA_VF_LINK_STATE_DISABLE
:
2997 val
= FW_VF_LINK_STATE_DISABLE
;
3004 param
= (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF
) |
3005 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_LINK_STATE
));
3006 ret
= t4_set_params(adap
, adap
->mbox
, adap
->pf
, vf
+ 1, 1,
3009 dev_err(adap
->pdev_dev
,
3010 "Error %d in setting PF %d VF %d link state\n",
3015 adap
->vfinfo
[vf
].link_state
= link
;
3018 #endif /* CONFIG_PCI_IOV */
3020 static int cxgb_set_mac_addr(struct net_device
*dev
, void *p
)
3023 struct sockaddr
*addr
= p
;
3024 struct port_info
*pi
= netdev_priv(dev
);
3026 if (!is_valid_ether_addr(addr
->sa_data
))
3027 return -EADDRNOTAVAIL
;
3029 ret
= cxgb4_update_mac_filt(pi
, pi
->viid
, &pi
->xact_addr_filt
,
3030 addr
->sa_data
, true, &pi
->smt_idx
);
3034 memcpy(dev
->dev_addr
, addr
->sa_data
, dev
->addr_len
);
3035 pi
->xact_addr_filt
= ret
;
3039 #ifdef CONFIG_NET_POLL_CONTROLLER
3040 static void cxgb_netpoll(struct net_device
*dev
)
3042 struct port_info
*pi
= netdev_priv(dev
);
3043 struct adapter
*adap
= pi
->adapter
;
3045 if (adap
->flags
& CXGB4_USING_MSIX
) {
3047 struct sge_eth_rxq
*rx
= &adap
->sge
.ethrxq
[pi
->first_qset
];
3049 for (i
= pi
->nqsets
; i
; i
--, rx
++)
3050 t4_sge_intr_msix(0, &rx
->rspq
);
3052 t4_intr_handler(adap
)(0, adap
);
3056 static int cxgb_set_tx_maxrate(struct net_device
*dev
, int index
, u32 rate
)
3058 struct port_info
*pi
= netdev_priv(dev
);
3059 struct adapter
*adap
= pi
->adapter
;
3060 struct sched_class
*e
;
3061 struct ch_sched_params p
;
3062 struct ch_sched_queue qe
;
3066 if (!can_sched(dev
))
3069 if (index
< 0 || index
> pi
->nqsets
- 1)
3072 if (!(adap
->flags
& CXGB4_FULL_INIT_DONE
)) {
3073 dev_err(adap
->pdev_dev
,
3074 "Failed to rate limit on queue %d. Link Down?\n",
3079 /* Convert from Mbps to Kbps */
3080 req_rate
= rate
* 1000;
3082 /* Max rate is 100 Gbps */
3083 if (req_rate
> SCHED_MAX_RATE_KBPS
) {
3084 dev_err(adap
->pdev_dev
,
3085 "Invalid rate %u Mbps, Max rate is %u Mbps\n",
3086 rate
, SCHED_MAX_RATE_KBPS
/ 1000);
3090 /* First unbind the queue from any existing class */
3091 memset(&qe
, 0, sizeof(qe
));
3093 qe
.class = SCHED_CLS_NONE
;
3095 err
= cxgb4_sched_class_unbind(dev
, (void *)(&qe
), SCHED_QUEUE
);
3097 dev_err(adap
->pdev_dev
,
3098 "Unbinding Queue %d on port %d fail. Err: %d\n",
3099 index
, pi
->port_id
, err
);
3103 /* Queue already unbound */
3107 /* Fetch any available unused or matching scheduling class */
3108 memset(&p
, 0, sizeof(p
));
3109 p
.type
= SCHED_CLASS_TYPE_PACKET
;
3110 p
.u
.params
.level
= SCHED_CLASS_LEVEL_CL_RL
;
3111 p
.u
.params
.mode
= SCHED_CLASS_MODE_CLASS
;
3112 p
.u
.params
.rateunit
= SCHED_CLASS_RATEUNIT_BITS
;
3113 p
.u
.params
.ratemode
= SCHED_CLASS_RATEMODE_ABS
;
3114 p
.u
.params
.channel
= pi
->tx_chan
;
3115 p
.u
.params
.class = SCHED_CLS_NONE
;
3116 p
.u
.params
.minrate
= 0;
3117 p
.u
.params
.maxrate
= req_rate
;
3118 p
.u
.params
.weight
= 0;
3119 p
.u
.params
.pktsize
= dev
->mtu
;
3121 e
= cxgb4_sched_class_alloc(dev
, &p
);
3125 /* Bind the queue to a scheduling class */
3126 memset(&qe
, 0, sizeof(qe
));
3130 err
= cxgb4_sched_class_bind(dev
, (void *)(&qe
), SCHED_QUEUE
);
3132 dev_err(adap
->pdev_dev
,
3133 "Queue rate limiting failed. Err: %d\n", err
);
3137 static int cxgb_setup_tc_flower(struct net_device
*dev
,
3138 struct flow_cls_offload
*cls_flower
)
3140 switch (cls_flower
->command
) {
3141 case FLOW_CLS_REPLACE
:
3142 return cxgb4_tc_flower_replace(dev
, cls_flower
);
3143 case FLOW_CLS_DESTROY
:
3144 return cxgb4_tc_flower_destroy(dev
, cls_flower
);
3145 case FLOW_CLS_STATS
:
3146 return cxgb4_tc_flower_stats(dev
, cls_flower
);
3152 static int cxgb_setup_tc_cls_u32(struct net_device
*dev
,
3153 struct tc_cls_u32_offload
*cls_u32
)
3155 switch (cls_u32
->command
) {
3156 case TC_CLSU32_NEW_KNODE
:
3157 case TC_CLSU32_REPLACE_KNODE
:
3158 return cxgb4_config_knode(dev
, cls_u32
);
3159 case TC_CLSU32_DELETE_KNODE
:
3160 return cxgb4_delete_knode(dev
, cls_u32
);
3166 static int cxgb_setup_tc_block_cb(enum tc_setup_type type
, void *type_data
,
3169 struct net_device
*dev
= cb_priv
;
3170 struct port_info
*pi
= netdev2pinfo(dev
);
3171 struct adapter
*adap
= netdev2adap(dev
);
3173 if (!(adap
->flags
& CXGB4_FULL_INIT_DONE
)) {
3174 dev_err(adap
->pdev_dev
,
3175 "Failed to setup tc on port %d. Link Down?\n",
3180 if (!tc_cls_can_offload_and_chain0(dev
, type_data
))
3184 case TC_SETUP_CLSU32
:
3185 return cxgb_setup_tc_cls_u32(dev
, type_data
);
3186 case TC_SETUP_CLSFLOWER
:
3187 return cxgb_setup_tc_flower(dev
, type_data
);
3193 static LIST_HEAD(cxgb_block_cb_list
);
3195 static int cxgb_setup_tc(struct net_device
*dev
, enum tc_setup_type type
,
3198 struct port_info
*pi
= netdev2pinfo(dev
);
3201 case TC_SETUP_BLOCK
:
3202 return flow_block_cb_setup_simple(type_data
,
3203 &cxgb_block_cb_list
,
3204 cxgb_setup_tc_block_cb
,
3211 static void cxgb_del_udp_tunnel(struct net_device
*netdev
,
3212 struct udp_tunnel_info
*ti
)
3214 struct port_info
*pi
= netdev_priv(netdev
);
3215 struct adapter
*adapter
= pi
->adapter
;
3216 unsigned int chip_ver
= CHELSIO_CHIP_VERSION(adapter
->params
.chip
);
3217 u8 match_all_mac
[] = { 0, 0, 0, 0, 0, 0 };
3220 if (chip_ver
< CHELSIO_T6
)
3224 case UDP_TUNNEL_TYPE_VXLAN
:
3225 if (!adapter
->vxlan_port_cnt
||
3226 adapter
->vxlan_port
!= ti
->port
)
3227 return; /* Invalid VxLAN destination port */
3229 adapter
->vxlan_port_cnt
--;
3230 if (adapter
->vxlan_port_cnt
)
3233 adapter
->vxlan_port
= 0;
3234 t4_write_reg(adapter
, MPS_RX_VXLAN_TYPE_A
, 0);
3236 case UDP_TUNNEL_TYPE_GENEVE
:
3237 if (!adapter
->geneve_port_cnt
||
3238 adapter
->geneve_port
!= ti
->port
)
3239 return; /* Invalid GENEVE destination port */
3241 adapter
->geneve_port_cnt
--;
3242 if (adapter
->geneve_port_cnt
)
3245 adapter
->geneve_port
= 0;
3246 t4_write_reg(adapter
, MPS_RX_GENEVE_TYPE_A
, 0);
3252 /* Matchall mac entries can be deleted only after all tunnel ports
3253 * are brought down or removed.
3255 if (!adapter
->rawf_cnt
)
3257 for_each_port(adapter
, i
) {
3258 pi
= adap2pinfo(adapter
, i
);
3259 ret
= t4_free_raw_mac_filt(adapter
, pi
->viid
,
3260 match_all_mac
, match_all_mac
,
3261 adapter
->rawf_start
+
3263 1, pi
->port_id
, false);
3265 netdev_info(netdev
, "Failed to free mac filter entry, for port %d\n",
3272 static void cxgb_add_udp_tunnel(struct net_device
*netdev
,
3273 struct udp_tunnel_info
*ti
)
3275 struct port_info
*pi
= netdev_priv(netdev
);
3276 struct adapter
*adapter
= pi
->adapter
;
3277 unsigned int chip_ver
= CHELSIO_CHIP_VERSION(adapter
->params
.chip
);
3278 u8 match_all_mac
[] = { 0, 0, 0, 0, 0, 0 };
3281 if (chip_ver
< CHELSIO_T6
|| !adapter
->rawf_cnt
)
3285 case UDP_TUNNEL_TYPE_VXLAN
:
3286 /* Callback for adding vxlan port can be called with the same
3287 * port for both IPv4 and IPv6. We should not disable the
3288 * offloading when the same port for both protocols is added
3289 * and later one of them is removed.
3291 if (adapter
->vxlan_port_cnt
&&
3292 adapter
->vxlan_port
== ti
->port
) {
3293 adapter
->vxlan_port_cnt
++;
3297 /* We will support only one VxLAN port */
3298 if (adapter
->vxlan_port_cnt
) {
3299 netdev_info(netdev
, "UDP port %d already offloaded, not adding port %d\n",
3300 be16_to_cpu(adapter
->vxlan_port
),
3301 be16_to_cpu(ti
->port
));
3305 adapter
->vxlan_port
= ti
->port
;
3306 adapter
->vxlan_port_cnt
= 1;
3308 t4_write_reg(adapter
, MPS_RX_VXLAN_TYPE_A
,
3309 VXLAN_V(be16_to_cpu(ti
->port
)) | VXLAN_EN_F
);
3311 case UDP_TUNNEL_TYPE_GENEVE
:
3312 if (adapter
->geneve_port_cnt
&&
3313 adapter
->geneve_port
== ti
->port
) {
3314 adapter
->geneve_port_cnt
++;
3318 /* We will support only one GENEVE port */
3319 if (adapter
->geneve_port_cnt
) {
3320 netdev_info(netdev
, "UDP port %d already offloaded, not adding port %d\n",
3321 be16_to_cpu(adapter
->geneve_port
),
3322 be16_to_cpu(ti
->port
));
3326 adapter
->geneve_port
= ti
->port
;
3327 adapter
->geneve_port_cnt
= 1;
3329 t4_write_reg(adapter
, MPS_RX_GENEVE_TYPE_A
,
3330 GENEVE_V(be16_to_cpu(ti
->port
)) | GENEVE_EN_F
);
3336 /* Create a 'match all' mac filter entry for inner mac,
3337 * if raw mac interface is supported. Once the linux kernel provides
3338 * driver entry points for adding/deleting the inner mac addresses,
3339 * we will remove this 'match all' entry and fallback to adding
3340 * exact match filters.
3342 for_each_port(adapter
, i
) {
3343 pi
= adap2pinfo(adapter
, i
);
3345 ret
= t4_alloc_raw_mac_filt(adapter
, pi
->viid
,
3348 adapter
->rawf_start
+
3350 1, pi
->port_id
, false);
3352 netdev_info(netdev
, "Failed to allocate a mac filter entry, not adding port %d\n",
3353 be16_to_cpu(ti
->port
));
3354 cxgb_del_udp_tunnel(netdev
, ti
);
3360 static netdev_features_t
cxgb_features_check(struct sk_buff
*skb
,
3361 struct net_device
*dev
,
3362 netdev_features_t features
)
3364 struct port_info
*pi
= netdev_priv(dev
);
3365 struct adapter
*adapter
= pi
->adapter
;
3367 if (CHELSIO_CHIP_VERSION(adapter
->params
.chip
) < CHELSIO_T6
)
3370 /* Check if hw supports offload for this packet */
3371 if (!skb
->encapsulation
|| cxgb_encap_offload_supported(skb
))
3374 /* Offload is not supported for this encapsulated packet */
3375 return features
& ~(NETIF_F_CSUM_MASK
| NETIF_F_GSO_MASK
);
3378 static netdev_features_t
cxgb_fix_features(struct net_device
*dev
,
3379 netdev_features_t features
)
3381 /* Disable GRO, if RX_CSUM is disabled */
3382 if (!(features
& NETIF_F_RXCSUM
))
3383 features
&= ~NETIF_F_GRO
;
3388 static const struct net_device_ops cxgb4_netdev_ops
= {
3389 .ndo_open
= cxgb_open
,
3390 .ndo_stop
= cxgb_close
,
3391 .ndo_start_xmit
= t4_start_xmit
,
3392 .ndo_select_queue
= cxgb_select_queue
,
3393 .ndo_get_stats64
= cxgb_get_stats
,
3394 .ndo_set_rx_mode
= cxgb_set_rxmode
,
3395 .ndo_set_mac_address
= cxgb_set_mac_addr
,
3396 .ndo_set_features
= cxgb_set_features
,
3397 .ndo_validate_addr
= eth_validate_addr
,
3398 .ndo_do_ioctl
= cxgb_ioctl
,
3399 .ndo_change_mtu
= cxgb_change_mtu
,
3400 #ifdef CONFIG_NET_POLL_CONTROLLER
3401 .ndo_poll_controller
= cxgb_netpoll
,
3403 #ifdef CONFIG_CHELSIO_T4_FCOE
3404 .ndo_fcoe_enable
= cxgb_fcoe_enable
,
3405 .ndo_fcoe_disable
= cxgb_fcoe_disable
,
3406 #endif /* CONFIG_CHELSIO_T4_FCOE */
3407 .ndo_set_tx_maxrate
= cxgb_set_tx_maxrate
,
3408 .ndo_setup_tc
= cxgb_setup_tc
,
3409 .ndo_udp_tunnel_add
= cxgb_add_udp_tunnel
,
3410 .ndo_udp_tunnel_del
= cxgb_del_udp_tunnel
,
3411 .ndo_features_check
= cxgb_features_check
,
3412 .ndo_fix_features
= cxgb_fix_features
,
3415 #ifdef CONFIG_PCI_IOV
3416 static const struct net_device_ops cxgb4_mgmt_netdev_ops
= {
3417 .ndo_open
= cxgb4_mgmt_open
,
3418 .ndo_set_vf_mac
= cxgb4_mgmt_set_vf_mac
,
3419 .ndo_get_vf_config
= cxgb4_mgmt_get_vf_config
,
3420 .ndo_set_vf_rate
= cxgb4_mgmt_set_vf_rate
,
3421 .ndo_get_phys_port_id
= cxgb4_mgmt_get_phys_port_id
,
3422 .ndo_set_vf_vlan
= cxgb4_mgmt_set_vf_vlan
,
3423 .ndo_set_vf_link_state
= cxgb4_mgmt_set_vf_link_state
,
3427 static void cxgb4_mgmt_get_drvinfo(struct net_device
*dev
,
3428 struct ethtool_drvinfo
*info
)
3430 struct adapter
*adapter
= netdev2adap(dev
);
3432 strlcpy(info
->driver
, cxgb4_driver_name
, sizeof(info
->driver
));
3433 strlcpy(info
->version
, cxgb4_driver_version
,
3434 sizeof(info
->version
));
3435 strlcpy(info
->bus_info
, pci_name(adapter
->pdev
),
3436 sizeof(info
->bus_info
));
3439 static const struct ethtool_ops cxgb4_mgmt_ethtool_ops
= {
3440 .get_drvinfo
= cxgb4_mgmt_get_drvinfo
,
3443 static void notify_fatal_err(struct work_struct
*work
)
3445 struct adapter
*adap
;
3447 adap
= container_of(work
, struct adapter
, fatal_err_notify_task
);
3448 notify_ulds(adap
, CXGB4_STATE_FATAL_ERROR
);
3451 void t4_fatal_err(struct adapter
*adap
)
3455 if (pci_channel_offline(adap
->pdev
))
3458 /* Disable the SGE since ULDs are going to free resources that
3459 * could be exposed to the adapter. RDMA MWs for example...
3461 t4_shutdown_adapter(adap
);
3462 for_each_port(adap
, port
) {
3463 struct net_device
*dev
= adap
->port
[port
];
3465 /* If we get here in very early initialization the network
3466 * devices may not have been set up yet.
3471 netif_tx_stop_all_queues(dev
);
3472 netif_carrier_off(dev
);
3474 dev_alert(adap
->pdev_dev
, "encountered fatal error, adapter stopped\n");
3475 queue_work(adap
->workq
, &adap
->fatal_err_notify_task
);
3478 static void setup_memwin(struct adapter
*adap
)
3480 u32 nic_win_base
= t4_get_util_window(adap
);
3482 t4_setup_memwin(adap
, nic_win_base
, MEMWIN_NIC
);
3485 static void setup_memwin_rdma(struct adapter
*adap
)
3487 if (adap
->vres
.ocq
.size
) {
3491 start
= t4_read_pcie_cfg4(adap
, PCI_BASE_ADDRESS_2
);
3492 start
&= PCI_BASE_ADDRESS_MEM_MASK
;
3493 start
+= OCQ_WIN_OFFSET(adap
->pdev
, &adap
->vres
);
3494 sz_kb
= roundup_pow_of_two(adap
->vres
.ocq
.size
) >> 10;
3496 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A
, 3),
3497 start
| BIR_V(1) | WINDOW_V(ilog2(sz_kb
)));
3499 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A
, 3),
3500 adap
->vres
.ocq
.start
);
3502 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A
, 3));
3506 /* HMA Definitions */
3508 /* The maximum number of address that can be send in a single FW cmd */
3509 #define HMA_MAX_ADDR_IN_CMD 5
3511 #define HMA_PAGE_SIZE PAGE_SIZE
3513 #define HMA_MAX_NO_FW_ADDRESS (16 << 10) /* FW supports 16K addresses */
3515 #define HMA_PAGE_ORDER \
3516 ((HMA_PAGE_SIZE < HMA_MAX_NO_FW_ADDRESS) ? \
3517 ilog2(HMA_MAX_NO_FW_ADDRESS / HMA_PAGE_SIZE) : 0)
3519 /* The minimum and maximum possible HMA sizes that can be specified in the FW
3520 * configuration(in units of MB).
3522 #define HMA_MIN_TOTAL_SIZE 1
3523 #define HMA_MAX_TOTAL_SIZE \
3524 (((HMA_PAGE_SIZE << HMA_PAGE_ORDER) * \
3525 HMA_MAX_NO_FW_ADDRESS) >> 20)
3527 static void adap_free_hma_mem(struct adapter
*adapter
)
3529 struct scatterlist
*iter
;
3533 if (!adapter
->hma
.sgt
)
3536 if (adapter
->hma
.flags
& HMA_DMA_MAPPED_FLAG
) {
3537 dma_unmap_sg(adapter
->pdev_dev
, adapter
->hma
.sgt
->sgl
,
3538 adapter
->hma
.sgt
->nents
, PCI_DMA_BIDIRECTIONAL
);
3539 adapter
->hma
.flags
&= ~HMA_DMA_MAPPED_FLAG
;
3542 for_each_sg(adapter
->hma
.sgt
->sgl
, iter
,
3543 adapter
->hma
.sgt
->orig_nents
, i
) {
3544 page
= sg_page(iter
);
3546 __free_pages(page
, HMA_PAGE_ORDER
);
3549 kfree(adapter
->hma
.phy_addr
);
3550 sg_free_table(adapter
->hma
.sgt
);
3551 kfree(adapter
->hma
.sgt
);
3552 adapter
->hma
.sgt
= NULL
;
3555 static int adap_config_hma(struct adapter
*adapter
)
3557 struct scatterlist
*sgl
, *iter
;
3558 struct sg_table
*sgt
;
3559 struct page
*newpage
;
3560 unsigned int i
, j
, k
;
3561 u32 param
, hma_size
;
3567 /* HMA is supported only for T6+ cards.
3568 * Avoid initializing HMA in kdump kernels.
3570 if (is_kdump_kernel() ||
3571 CHELSIO_CHIP_VERSION(adapter
->params
.chip
) < CHELSIO_T6
)
3574 /* Get the HMA region size required by fw */
3575 param
= (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV
) |
3576 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_HMA_SIZE
));
3577 ret
= t4_query_params(adapter
, adapter
->mbox
, adapter
->pf
, 0,
3578 1, ¶m
, &hma_size
);
3579 /* An error means card has its own memory or HMA is not supported by
3580 * the firmware. Return without any errors.
3582 if (ret
|| !hma_size
)
3585 if (hma_size
< HMA_MIN_TOTAL_SIZE
||
3586 hma_size
> HMA_MAX_TOTAL_SIZE
) {
3587 dev_err(adapter
->pdev_dev
,
3588 "HMA size %uMB beyond bounds(%u-%lu)MB\n",
3589 hma_size
, HMA_MIN_TOTAL_SIZE
, HMA_MAX_TOTAL_SIZE
);
3593 page_size
= HMA_PAGE_SIZE
;
3594 page_order
= HMA_PAGE_ORDER
;
3595 adapter
->hma
.sgt
= kzalloc(sizeof(*adapter
->hma
.sgt
), GFP_KERNEL
);
3596 if (unlikely(!adapter
->hma
.sgt
)) {
3597 dev_err(adapter
->pdev_dev
, "HMA SG table allocation failed\n");
3600 sgt
= adapter
->hma
.sgt
;
3601 /* FW returned value will be in MB's
3603 sgt
->orig_nents
= (hma_size
<< 20) / (page_size
<< page_order
);
3604 if (sg_alloc_table(sgt
, sgt
->orig_nents
, GFP_KERNEL
)) {
3605 dev_err(adapter
->pdev_dev
, "HMA SGL allocation failed\n");
3606 kfree(adapter
->hma
.sgt
);
3607 adapter
->hma
.sgt
= NULL
;
3611 sgl
= adapter
->hma
.sgt
->sgl
;
3612 node
= dev_to_node(adapter
->pdev_dev
);
3613 for_each_sg(sgl
, iter
, sgt
->orig_nents
, i
) {
3614 newpage
= alloc_pages_node(node
, __GFP_NOWARN
| GFP_KERNEL
|
3615 __GFP_ZERO
, page_order
);
3617 dev_err(adapter
->pdev_dev
,
3618 "Not enough memory for HMA page allocation\n");
3622 sg_set_page(iter
, newpage
, page_size
<< page_order
, 0);
3625 sgt
->nents
= dma_map_sg(adapter
->pdev_dev
, sgl
, sgt
->orig_nents
,
3628 dev_err(adapter
->pdev_dev
,
3629 "Not enough memory for HMA DMA mapping");
3633 adapter
->hma
.flags
|= HMA_DMA_MAPPED_FLAG
;
3635 adapter
->hma
.phy_addr
= kcalloc(sgt
->nents
, sizeof(dma_addr_t
),
3637 if (unlikely(!adapter
->hma
.phy_addr
))
3640 for_each_sg(sgl
, iter
, sgt
->nents
, i
) {
3641 newpage
= sg_page(iter
);
3642 adapter
->hma
.phy_addr
[i
] = sg_dma_address(iter
);
3645 ncmds
= DIV_ROUND_UP(sgt
->nents
, HMA_MAX_ADDR_IN_CMD
);
3646 /* Pass on the addresses to firmware */
3647 for (i
= 0, k
= 0; i
< ncmds
; i
++, k
+= HMA_MAX_ADDR_IN_CMD
) {
3648 struct fw_hma_cmd hma_cmd
;
3649 u8 naddr
= HMA_MAX_ADDR_IN_CMD
;
3650 u8 soc
= 0, eoc
= 0;
3651 u8 hma_mode
= 1; /* Presently we support only Page table mode */
3653 soc
= (i
== 0) ? 1 : 0;
3654 eoc
= (i
== ncmds
- 1) ? 1 : 0;
3656 /* For last cmd, set naddr corresponding to remaining
3659 if (i
== ncmds
- 1) {
3660 naddr
= sgt
->nents
% HMA_MAX_ADDR_IN_CMD
;
3661 naddr
= naddr
? naddr
: HMA_MAX_ADDR_IN_CMD
;
3663 memset(&hma_cmd
, 0, sizeof(hma_cmd
));
3664 hma_cmd
.op_pkd
= htonl(FW_CMD_OP_V(FW_HMA_CMD
) |
3665 FW_CMD_REQUEST_F
| FW_CMD_WRITE_F
);
3666 hma_cmd
.retval_len16
= htonl(FW_LEN16(hma_cmd
));
3668 hma_cmd
.mode_to_pcie_params
=
3669 htonl(FW_HMA_CMD_MODE_V(hma_mode
) |
3670 FW_HMA_CMD_SOC_V(soc
) | FW_HMA_CMD_EOC_V(eoc
));
3672 /* HMA cmd size specified in MB's */
3673 hma_cmd
.naddr_size
=
3674 htonl(FW_HMA_CMD_SIZE_V(hma_size
) |
3675 FW_HMA_CMD_NADDR_V(naddr
));
3677 /* Total Page size specified in units of 4K */
3678 hma_cmd
.addr_size_pkd
=
3679 htonl(FW_HMA_CMD_ADDR_SIZE_V
3680 ((page_size
<< page_order
) >> 12));
3682 /* Fill the 5 addresses */
3683 for (j
= 0; j
< naddr
; j
++) {
3684 hma_cmd
.phy_address
[j
] =
3685 cpu_to_be64(adapter
->hma
.phy_addr
[j
+ k
]);
3687 ret
= t4_wr_mbox(adapter
, adapter
->mbox
, &hma_cmd
,
3688 sizeof(hma_cmd
), &hma_cmd
);
3690 dev_err(adapter
->pdev_dev
,
3691 "HMA FW command failed with err %d\n", ret
);
3697 dev_info(adapter
->pdev_dev
,
3698 "Reserved %uMB host memory for HMA\n", hma_size
);
3702 adap_free_hma_mem(adapter
);
3706 static int adap_init1(struct adapter
*adap
, struct fw_caps_config_cmd
*c
)
3711 /* Now that we've successfully configured and initialized the adapter
3712 * can ask the Firmware what resources it has provisioned for us.
3714 ret
= t4_get_pfres(adap
);
3716 dev_err(adap
->pdev_dev
,
3717 "Unable to retrieve resource provisioning information\n");
3721 /* get device capabilities */
3722 memset(c
, 0, sizeof(*c
));
3723 c
->op_to_write
= htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD
) |
3724 FW_CMD_REQUEST_F
| FW_CMD_READ_F
);
3725 c
->cfvalid_to_len16
= htonl(FW_LEN16(*c
));
3726 ret
= t4_wr_mbox(adap
, adap
->mbox
, c
, sizeof(*c
), c
);
3730 c
->op_to_write
= htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD
) |
3731 FW_CMD_REQUEST_F
| FW_CMD_WRITE_F
);
3732 ret
= t4_wr_mbox(adap
, adap
->mbox
, c
, sizeof(*c
), NULL
);
3736 ret
= t4_config_glbl_rss(adap
, adap
->pf
,
3737 FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL
,
3738 FW_RSS_GLB_CONFIG_CMD_TNLMAPEN_F
|
3739 FW_RSS_GLB_CONFIG_CMD_TNLALLLKP_F
);
3743 ret
= t4_cfg_pfvf(adap
, adap
->mbox
, adap
->pf
, 0, adap
->sge
.egr_sz
, 64,
3744 MAX_INGQ
, 0, 0, 4, 0xf, 0xf, 16, FW_CMD_CAP_PF
,
3751 /* tweak some settings */
3752 t4_write_reg(adap
, TP_SHIFT_CNT_A
, 0x64f8849);
3753 t4_write_reg(adap
, ULP_RX_TDDP_PSZ_A
, HPZ0_V(PAGE_SHIFT
- 12));
3754 t4_write_reg(adap
, TP_PIO_ADDR_A
, TP_INGRESS_CONFIG_A
);
3755 v
= t4_read_reg(adap
, TP_PIO_DATA_A
);
3756 t4_write_reg(adap
, TP_PIO_DATA_A
, v
& ~CSUM_HAS_PSEUDO_HDR_F
);
3758 /* first 4 Tx modulation queues point to consecutive Tx channels */
3759 adap
->params
.tp
.tx_modq_map
= 0xE4;
3760 t4_write_reg(adap
, TP_TX_MOD_QUEUE_REQ_MAP_A
,
3761 TX_MOD_QUEUE_REQ_MAP_V(adap
->params
.tp
.tx_modq_map
));
3763 /* associate each Tx modulation queue with consecutive Tx channels */
3765 t4_write_indirect(adap
, TP_PIO_ADDR_A
, TP_PIO_DATA_A
,
3766 &v
, 1, TP_TX_SCHED_HDR_A
);
3767 t4_write_indirect(adap
, TP_PIO_ADDR_A
, TP_PIO_DATA_A
,
3768 &v
, 1, TP_TX_SCHED_FIFO_A
);
3769 t4_write_indirect(adap
, TP_PIO_ADDR_A
, TP_PIO_DATA_A
,
3770 &v
, 1, TP_TX_SCHED_PCMD_A
);
3772 #define T4_TX_MODQ_10G_WEIGHT_DEFAULT 16 /* in KB units */
3773 if (is_offload(adap
)) {
3774 t4_write_reg(adap
, TP_TX_MOD_QUEUE_WEIGHT0_A
,
3775 TX_MODQ_WEIGHT0_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT
) |
3776 TX_MODQ_WEIGHT1_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT
) |
3777 TX_MODQ_WEIGHT2_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT
) |
3778 TX_MODQ_WEIGHT3_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT
));
3779 t4_write_reg(adap
, TP_TX_MOD_CHANNEL_WEIGHT_A
,
3780 TX_MODQ_WEIGHT0_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT
) |
3781 TX_MODQ_WEIGHT1_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT
) |
3782 TX_MODQ_WEIGHT2_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT
) |
3783 TX_MODQ_WEIGHT3_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT
));
3786 /* get basic stuff going */
3787 return t4_early_init(adap
, adap
->pf
);
3791 * Max # of ATIDs. The absolute HW max is 16K but we keep it lower.
3793 #define MAX_ATIDS 8192U
3796 * Phase 0 of initialization: contact FW, obtain config, perform basic init.
3798 * If the firmware we're dealing with has Configuration File support, then
3799 * we use that to perform all configuration
3803 * Tweak configuration based on module parameters, etc. Most of these have
3804 * defaults assigned to them by Firmware Configuration Files (if we're using
3805 * them) but need to be explicitly set if we're using hard-coded
3806 * initialization. But even in the case of using Firmware Configuration
3807 * Files, we'd like to expose the ability to change these via module
3808 * parameters so these are essentially common tweaks/settings for
3809 * Configuration Files and hard-coded initialization ...
3811 static int adap_init0_tweaks(struct adapter
*adapter
)
3814 * Fix up various Host-Dependent Parameters like Page Size, Cache
3815 * Line Size, etc. The firmware default is for a 4KB Page Size and
3816 * 64B Cache Line Size ...
3818 t4_fixup_host_params(adapter
, PAGE_SIZE
, L1_CACHE_BYTES
);
3821 * Process module parameters which affect early initialization.
3823 if (rx_dma_offset
!= 2 && rx_dma_offset
!= 0) {
3824 dev_err(&adapter
->pdev
->dev
,
3825 "Ignoring illegal rx_dma_offset=%d, using 2\n",
3829 t4_set_reg_field(adapter
, SGE_CONTROL_A
,
3830 PKTSHIFT_V(PKTSHIFT_M
),
3831 PKTSHIFT_V(rx_dma_offset
));
3834 * Don't include the "IP Pseudo Header" in CPL_RX_PKT checksums: Linux
3835 * adds the pseudo header itself.
3837 t4_tp_wr_bits_indirect(adapter
, TP_INGRESS_CONFIG_A
,
3838 CSUM_HAS_PSEUDO_HDR_F
, 0);
3843 /* 10Gb/s-BT PHY Support. chip-external 10Gb/s-BT PHYs are complex chips
3844 * unto themselves and they contain their own firmware to perform their
3847 static int phy_aq1202_version(const u8
*phy_fw_data
,
3852 /* At offset 0x8 you're looking for the primary image's
3853 * starting offset which is 3 Bytes wide
3855 * At offset 0xa of the primary image, you look for the offset
3856 * of the DRAM segment which is 3 Bytes wide.
3858 * The FW version is at offset 0x27e of the DRAM and is 2 Bytes
3861 #define be16(__p) (((__p)[0] << 8) | (__p)[1])
3862 #define le16(__p) ((__p)[0] | ((__p)[1] << 8))
3863 #define le24(__p) (le16(__p) | ((__p)[2] << 16))
3865 offset
= le24(phy_fw_data
+ 0x8) << 12;
3866 offset
= le24(phy_fw_data
+ offset
+ 0xa);
3867 return be16(phy_fw_data
+ offset
+ 0x27e);
3874 static struct info_10gbt_phy_fw
{
3875 unsigned int phy_fw_id
; /* PCI Device ID */
3876 char *phy_fw_file
; /* /lib/firmware/ PHY Firmware file */
3877 int (*phy_fw_version
)(const u8
*phy_fw_data
, size_t phy_fw_size
);
3878 int phy_flash
; /* Has FLASH for PHY Firmware */
3879 } phy_info_array
[] = {
3881 PHY_AQ1202_DEVICEID
,
3882 PHY_AQ1202_FIRMWARE
,
3887 PHY_BCM84834_DEVICEID
,
3888 PHY_BCM84834_FIRMWARE
,
3895 static struct info_10gbt_phy_fw
*find_phy_info(int devid
)
3899 for (i
= 0; i
< ARRAY_SIZE(phy_info_array
); i
++) {
3900 if (phy_info_array
[i
].phy_fw_id
== devid
)
3901 return &phy_info_array
[i
];
3906 /* Handle updating of chip-external 10Gb/s-BT PHY firmware. This needs to
3907 * happen after the FW_RESET_CMD but before the FW_INITIALIZE_CMD. On error
3908 * we return a negative error number. If we transfer new firmware we return 1
3909 * (from t4_load_phy_fw()). If we don't do anything we return 0.
3911 static int adap_init0_phy(struct adapter
*adap
)
3913 const struct firmware
*phyf
;
3915 struct info_10gbt_phy_fw
*phy_info
;
3917 /* Use the device ID to determine which PHY file to flash.
3919 phy_info
= find_phy_info(adap
->pdev
->device
);
3921 dev_warn(adap
->pdev_dev
,
3922 "No PHY Firmware file found for this PHY\n");
3926 /* If we have a T4 PHY firmware file under /lib/firmware/cxgb4/, then
3927 * use that. The adapter firmware provides us with a memory buffer
3928 * where we can load a PHY firmware file from the host if we want to
3929 * override the PHY firmware File in flash.
3931 ret
= request_firmware_direct(&phyf
, phy_info
->phy_fw_file
,
3934 /* For adapters without FLASH attached to PHY for their
3935 * firmware, it's obviously a fatal error if we can't get the
3936 * firmware to the adapter. For adapters with PHY firmware
3937 * FLASH storage, it's worth a warning if we can't find the
3938 * PHY Firmware but we'll neuter the error ...
3940 dev_err(adap
->pdev_dev
, "unable to find PHY Firmware image "
3941 "/lib/firmware/%s, error %d\n",
3942 phy_info
->phy_fw_file
, -ret
);
3943 if (phy_info
->phy_flash
) {
3944 int cur_phy_fw_ver
= 0;
3946 t4_phy_fw_ver(adap
, &cur_phy_fw_ver
);
3947 dev_warn(adap
->pdev_dev
, "continuing with, on-adapter "
3948 "FLASH copy, version %#x\n", cur_phy_fw_ver
);
3955 /* Load PHY Firmware onto adapter.
3957 ret
= t4_load_phy_fw(adap
, MEMWIN_NIC
, &adap
->win0_lock
,
3958 phy_info
->phy_fw_version
,
3959 (u8
*)phyf
->data
, phyf
->size
);
3961 dev_err(adap
->pdev_dev
, "PHY Firmware transfer error %d\n",
3964 int new_phy_fw_ver
= 0;
3966 if (phy_info
->phy_fw_version
)
3967 new_phy_fw_ver
= phy_info
->phy_fw_version(phyf
->data
,
3969 dev_info(adap
->pdev_dev
, "Successfully transferred PHY "
3970 "Firmware /lib/firmware/%s, version %#x\n",
3971 phy_info
->phy_fw_file
, new_phy_fw_ver
);
3974 release_firmware(phyf
);
3980 * Attempt to initialize the adapter via a Firmware Configuration File.
3982 static int adap_init0_config(struct adapter
*adapter
, int reset
)
3984 char *fw_config_file
, fw_config_file_path
[256];
3985 u32 finiver
, finicsum
, cfcsum
, param
, val
;
3986 struct fw_caps_config_cmd caps_cmd
;
3987 unsigned long mtype
= 0, maddr
= 0;
3988 const struct firmware
*cf
;
3989 char *config_name
= NULL
;
3990 int config_issued
= 0;
3994 * Reset device if necessary.
3997 ret
= t4_fw_reset(adapter
, adapter
->mbox
,
3998 PIORSTMODE_F
| PIORST_F
);
4003 /* If this is a 10Gb/s-BT adapter make sure the chip-external
4004 * 10Gb/s-BT PHYs have up-to-date firmware. Note that this step needs
4005 * to be performed after any global adapter RESET above since some
4006 * PHYs only have local RAM copies of the PHY firmware.
4008 if (is_10gbt_device(adapter
->pdev
->device
)) {
4009 ret
= adap_init0_phy(adapter
);
4014 * If we have a T4 configuration file under /lib/firmware/cxgb4/,
4015 * then use that. Otherwise, use the configuration file stored
4016 * in the adapter flash ...
4018 switch (CHELSIO_CHIP_VERSION(adapter
->params
.chip
)) {
4020 fw_config_file
= FW4_CFNAME
;
4023 fw_config_file
= FW5_CFNAME
;
4026 fw_config_file
= FW6_CFNAME
;
4029 dev_err(adapter
->pdev_dev
, "Device %d is not supported\n",
4030 adapter
->pdev
->device
);
4035 ret
= request_firmware(&cf
, fw_config_file
, adapter
->pdev_dev
);
4037 config_name
= "On FLASH";
4038 mtype
= FW_MEMTYPE_CF_FLASH
;
4039 maddr
= t4_flash_cfg_addr(adapter
);
4041 u32 params
[7], val
[7];
4043 sprintf(fw_config_file_path
,
4044 "/lib/firmware/%s", fw_config_file
);
4045 config_name
= fw_config_file_path
;
4047 if (cf
->size
>= FLASH_CFG_MAX_SIZE
)
4050 params
[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV
) |
4051 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CF
));
4052 ret
= t4_query_params(adapter
, adapter
->mbox
,
4053 adapter
->pf
, 0, 1, params
, val
);
4056 * For t4_memory_rw() below addresses and
4057 * sizes have to be in terms of multiples of 4
4058 * bytes. So, if the Configuration File isn't
4059 * a multiple of 4 bytes in length we'll have
4060 * to write that out separately since we can't
4061 * guarantee that the bytes following the
4062 * residual byte in the buffer returned by
4063 * request_firmware() are zeroed out ...
4065 size_t resid
= cf
->size
& 0x3;
4066 size_t size
= cf
->size
& ~0x3;
4067 __be32
*data
= (__be32
*)cf
->data
;
4069 mtype
= FW_PARAMS_PARAM_Y_G(val
[0]);
4070 maddr
= FW_PARAMS_PARAM_Z_G(val
[0]) << 16;
4072 spin_lock(&adapter
->win0_lock
);
4073 ret
= t4_memory_rw(adapter
, 0, mtype
, maddr
,
4074 size
, data
, T4_MEMORY_WRITE
);
4075 if (ret
== 0 && resid
!= 0) {
4082 last
.word
= data
[size
>> 2];
4083 for (i
= resid
; i
< 4; i
++)
4085 ret
= t4_memory_rw(adapter
, 0, mtype
,
4090 spin_unlock(&adapter
->win0_lock
);
4094 release_firmware(cf
);
4101 /* Ofld + Hash filter is supported. Older fw will fail this request and
4104 param
= (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV
) |
4105 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_HASHFILTER_WITH_OFLD
));
4106 ret
= t4_set_params(adapter
, adapter
->mbox
, adapter
->pf
, 0,
4109 /* FW doesn't know about Hash filter + ofld support,
4110 * it's not a problem, don't return an error.
4113 dev_warn(adapter
->pdev_dev
,
4114 "Hash filter with ofld is not supported by FW\n");
4118 * Issue a Capability Configuration command to the firmware to get it
4119 * to parse the Configuration File. We don't use t4_fw_config_file()
4120 * because we want the ability to modify various features after we've
4121 * processed the configuration file ...
4123 memset(&caps_cmd
, 0, sizeof(caps_cmd
));
4124 caps_cmd
.op_to_write
=
4125 htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD
) |
4128 caps_cmd
.cfvalid_to_len16
=
4129 htonl(FW_CAPS_CONFIG_CMD_CFVALID_F
|
4130 FW_CAPS_CONFIG_CMD_MEMTYPE_CF_V(mtype
) |
4131 FW_CAPS_CONFIG_CMD_MEMADDR64K_CF_V(maddr
>> 16) |
4132 FW_LEN16(caps_cmd
));
4133 ret
= t4_wr_mbox(adapter
, adapter
->mbox
, &caps_cmd
, sizeof(caps_cmd
),
4136 /* If the CAPS_CONFIG failed with an ENOENT (for a Firmware
4137 * Configuration File in FLASH), our last gasp effort is to use the
4138 * Firmware Configuration File which is embedded in the firmware. A
4139 * very few early versions of the firmware didn't have one embedded
4140 * but we can ignore those.
4142 if (ret
== -ENOENT
) {
4143 memset(&caps_cmd
, 0, sizeof(caps_cmd
));
4144 caps_cmd
.op_to_write
=
4145 htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD
) |
4148 caps_cmd
.cfvalid_to_len16
= htonl(FW_LEN16(caps_cmd
));
4149 ret
= t4_wr_mbox(adapter
, adapter
->mbox
, &caps_cmd
,
4150 sizeof(caps_cmd
), &caps_cmd
);
4151 config_name
= "Firmware Default";
4158 finiver
= ntohl(caps_cmd
.finiver
);
4159 finicsum
= ntohl(caps_cmd
.finicsum
);
4160 cfcsum
= ntohl(caps_cmd
.cfcsum
);
4161 if (finicsum
!= cfcsum
)
4162 dev_warn(adapter
->pdev_dev
, "Configuration File checksum "\
4163 "mismatch: [fini] csum=%#x, computed csum=%#x\n",
4167 * And now tell the firmware to use the configuration we just loaded.
4169 caps_cmd
.op_to_write
=
4170 htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD
) |
4173 caps_cmd
.cfvalid_to_len16
= htonl(FW_LEN16(caps_cmd
));
4174 ret
= t4_wr_mbox(adapter
, adapter
->mbox
, &caps_cmd
, sizeof(caps_cmd
),
4180 * Tweak configuration based on system architecture, module
4183 ret
= adap_init0_tweaks(adapter
);
4187 /* We will proceed even if HMA init fails. */
4188 ret
= adap_config_hma(adapter
);
4190 dev_err(adapter
->pdev_dev
,
4191 "HMA configuration failed with error %d\n", ret
);
4193 if (is_t6(adapter
->params
.chip
)) {
4194 ret
= setup_ppod_edram(adapter
);
4196 dev_info(adapter
->pdev_dev
, "Successfully enabled "
4197 "ppod edram feature\n");
4201 * And finally tell the firmware to initialize itself using the
4202 * parameters from the Configuration File.
4204 ret
= t4_fw_initialize(adapter
, adapter
->mbox
);
4208 /* Emit Firmware Configuration File information and return
4211 dev_info(adapter
->pdev_dev
, "Successfully configured using Firmware "\
4212 "Configuration File \"%s\", version %#x, computed checksum %#x\n",
4213 config_name
, finiver
, cfcsum
);
4217 * Something bad happened. Return the error ... (If the "error"
4218 * is that there's no Configuration File on the adapter we don't
4219 * want to issue a warning since this is fairly common.)
4222 if (config_issued
&& ret
!= -ENOENT
)
4223 dev_warn(adapter
->pdev_dev
, "\"%s\" configuration file error %d\n",
4228 static struct fw_info fw_info_array
[] = {
4231 .fs_name
= FW4_CFNAME
,
4232 .fw_mod_name
= FW4_FNAME
,
4234 .chip
= FW_HDR_CHIP_T4
,
4235 .fw_ver
= __cpu_to_be32(FW_VERSION(T4
)),
4236 .intfver_nic
= FW_INTFVER(T4
, NIC
),
4237 .intfver_vnic
= FW_INTFVER(T4
, VNIC
),
4238 .intfver_ri
= FW_INTFVER(T4
, RI
),
4239 .intfver_iscsi
= FW_INTFVER(T4
, ISCSI
),
4240 .intfver_fcoe
= FW_INTFVER(T4
, FCOE
),
4244 .fs_name
= FW5_CFNAME
,
4245 .fw_mod_name
= FW5_FNAME
,
4247 .chip
= FW_HDR_CHIP_T5
,
4248 .fw_ver
= __cpu_to_be32(FW_VERSION(T5
)),
4249 .intfver_nic
= FW_INTFVER(T5
, NIC
),
4250 .intfver_vnic
= FW_INTFVER(T5
, VNIC
),
4251 .intfver_ri
= FW_INTFVER(T5
, RI
),
4252 .intfver_iscsi
= FW_INTFVER(T5
, ISCSI
),
4253 .intfver_fcoe
= FW_INTFVER(T5
, FCOE
),
4257 .fs_name
= FW6_CFNAME
,
4258 .fw_mod_name
= FW6_FNAME
,
4260 .chip
= FW_HDR_CHIP_T6
,
4261 .fw_ver
= __cpu_to_be32(FW_VERSION(T6
)),
4262 .intfver_nic
= FW_INTFVER(T6
, NIC
),
4263 .intfver_vnic
= FW_INTFVER(T6
, VNIC
),
4264 .intfver_ofld
= FW_INTFVER(T6
, OFLD
),
4265 .intfver_ri
= FW_INTFVER(T6
, RI
),
4266 .intfver_iscsipdu
= FW_INTFVER(T6
, ISCSIPDU
),
4267 .intfver_iscsi
= FW_INTFVER(T6
, ISCSI
),
4268 .intfver_fcoepdu
= FW_INTFVER(T6
, FCOEPDU
),
4269 .intfver_fcoe
= FW_INTFVER(T6
, FCOE
),
4275 static struct fw_info
*find_fw_info(int chip
)
4279 for (i
= 0; i
< ARRAY_SIZE(fw_info_array
); i
++) {
4280 if (fw_info_array
[i
].chip
== chip
)
4281 return &fw_info_array
[i
];
4287 * Phase 0 of initialization: contact FW, obtain config, perform basic init.
4289 static int adap_init0(struct adapter
*adap
)
4293 enum dev_state state
;
4294 u32 params
[7], val
[7];
4295 struct fw_caps_config_cmd caps_cmd
;
4298 /* Grab Firmware Device Log parameters as early as possible so we have
4299 * access to it for debugging, etc.
4301 ret
= t4_init_devlog_params(adap
);
4305 /* Contact FW, advertising Master capability */
4306 ret
= t4_fw_hello(adap
, adap
->mbox
, adap
->mbox
,
4307 is_kdump_kernel() ? MASTER_MUST
: MASTER_MAY
, &state
);
4309 dev_err(adap
->pdev_dev
, "could not connect to FW, error %d\n",
4313 if (ret
== adap
->mbox
)
4314 adap
->flags
|= CXGB4_MASTER_PF
;
4317 * If we're the Master PF Driver and the device is uninitialized,
4318 * then let's consider upgrading the firmware ... (We always want
4319 * to check the firmware version number in order to A. get it for
4320 * later reporting and B. to warn if the currently loaded firmware
4321 * is excessively mismatched relative to the driver.)
4324 t4_get_version_info(adap
);
4325 ret
= t4_check_fw_version(adap
);
4326 /* If firmware is too old (not supported by driver) force an update. */
4328 state
= DEV_STATE_UNINIT
;
4329 if ((adap
->flags
& CXGB4_MASTER_PF
) && state
!= DEV_STATE_INIT
) {
4330 struct fw_info
*fw_info
;
4331 struct fw_hdr
*card_fw
;
4332 const struct firmware
*fw
;
4333 const u8
*fw_data
= NULL
;
4334 unsigned int fw_size
= 0;
4336 /* This is the firmware whose headers the driver was compiled
4339 fw_info
= find_fw_info(CHELSIO_CHIP_VERSION(adap
->params
.chip
));
4340 if (fw_info
== NULL
) {
4341 dev_err(adap
->pdev_dev
,
4342 "unable to get firmware info for chip %d.\n",
4343 CHELSIO_CHIP_VERSION(adap
->params
.chip
));
4347 /* allocate memory to read the header of the firmware on the
4350 card_fw
= kvzalloc(sizeof(*card_fw
), GFP_KERNEL
);
4356 /* Get FW from from /lib/firmware/ */
4357 ret
= request_firmware(&fw
, fw_info
->fw_mod_name
,
4360 dev_err(adap
->pdev_dev
,
4361 "unable to load firmware image %s, error %d\n",
4362 fw_info
->fw_mod_name
, ret
);
4368 /* upgrade FW logic */
4369 ret
= t4_prep_fw(adap
, fw_info
, fw_data
, fw_size
, card_fw
,
4373 release_firmware(fw
);
4380 /* If the firmware is initialized already, emit a simply note to that
4381 * effect. Otherwise, it's time to try initializing the adapter.
4383 if (state
== DEV_STATE_INIT
) {
4384 ret
= adap_config_hma(adap
);
4386 dev_err(adap
->pdev_dev
,
4387 "HMA configuration failed with error %d\n",
4389 dev_info(adap
->pdev_dev
, "Coming up as %s: "\
4390 "Adapter already initialized\n",
4391 adap
->flags
& CXGB4_MASTER_PF
? "MASTER" : "SLAVE");
4393 dev_info(adap
->pdev_dev
, "Coming up as MASTER: "\
4394 "Initializing adapter\n");
4396 /* Find out whether we're dealing with a version of the
4397 * firmware which has configuration file support.
4399 params
[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV
) |
4400 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CF
));
4401 ret
= t4_query_params(adap
, adap
->mbox
, adap
->pf
, 0, 1,
4404 /* If the firmware doesn't support Configuration Files,
4408 dev_err(adap
->pdev_dev
, "firmware doesn't support "
4409 "Firmware Configuration Files\n");
4413 /* The firmware provides us with a memory buffer where we can
4414 * load a Configuration File from the host if we want to
4415 * override the Configuration File in flash.
4417 ret
= adap_init0_config(adap
, reset
);
4418 if (ret
== -ENOENT
) {
4419 dev_err(adap
->pdev_dev
, "no Configuration File "
4420 "present on adapter.\n");
4424 dev_err(adap
->pdev_dev
, "could not initialize "
4425 "adapter, error %d\n", -ret
);
4430 /* Now that we've successfully configured and initialized the adapter
4431 * (or found it already initialized), we can ask the Firmware what
4432 * resources it has provisioned for us.
4434 ret
= t4_get_pfres(adap
);
4436 dev_err(adap
->pdev_dev
,
4437 "Unable to retrieve resource provisioning information\n");
4441 /* Grab VPD parameters. This should be done after we establish a
4442 * connection to the firmware since some of the VPD parameters
4443 * (notably the Core Clock frequency) are retrieved via requests to
4444 * the firmware. On the other hand, we need these fairly early on
4445 * so we do this right after getting ahold of the firmware.
4447 * We need to do this after initializing the adapter because someone
4448 * could have FLASHed a new VPD which won't be read by the firmware
4449 * until we do the RESET ...
4451 ret
= t4_get_vpd_params(adap
, &adap
->params
.vpd
);
4455 /* Find out what ports are available to us. Note that we need to do
4456 * this before calling adap_init0_no_config() since it needs nports
4460 FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV
) |
4461 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PORTVEC
);
4462 ret
= t4_query_params(adap
, adap
->mbox
, adap
->pf
, 0, 1, &v
, &port_vec
);
4466 adap
->params
.nports
= hweight32(port_vec
);
4467 adap
->params
.portvec
= port_vec
;
4469 /* Give the SGE code a chance to pull in anything that it needs ...
4470 * Note that this must be called after we retrieve our VPD parameters
4471 * in order to know how to convert core ticks to seconds, etc.
4473 ret
= t4_sge_init(adap
);
4477 /* Grab the SGE Doorbell Queue Timer values. If successful, that
4478 * indicates that the Firmware and Hardware support this.
4480 params
[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV
) |
4481 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_DBQ_TIMERTICK
));
4482 ret
= t4_query_params(adap
, adap
->mbox
, adap
->pf
, 0,
4486 adap
->sge
.dbqtimer_tick
= val
[0];
4487 ret
= t4_read_sge_dbqtimers(adap
,
4488 ARRAY_SIZE(adap
->sge
.dbqtimer_val
),
4489 adap
->sge
.dbqtimer_val
);
4493 adap
->flags
|= CXGB4_SGE_DBQ_TIMER
;
4495 if (is_bypass_device(adap
->pdev
->device
))
4496 adap
->params
.bypass
= 1;
4499 * Grab some of our basic fundamental operating parameters.
4501 #define FW_PARAM_DEV(param) \
4502 (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | \
4503 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_##param))
4505 #define FW_PARAM_PFVF(param) \
4506 FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) | \
4507 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_##param)| \
4508 FW_PARAMS_PARAM_Y_V(0) | \
4509 FW_PARAMS_PARAM_Z_V(0)
4511 params
[0] = FW_PARAM_PFVF(EQ_START
);
4512 params
[1] = FW_PARAM_PFVF(L2T_START
);
4513 params
[2] = FW_PARAM_PFVF(L2T_END
);
4514 params
[3] = FW_PARAM_PFVF(FILTER_START
);
4515 params
[4] = FW_PARAM_PFVF(FILTER_END
);
4516 params
[5] = FW_PARAM_PFVF(IQFLINT_START
);
4517 ret
= t4_query_params(adap
, adap
->mbox
, adap
->pf
, 0, 6, params
, val
);
4520 adap
->sge
.egr_start
= val
[0];
4521 adap
->l2t_start
= val
[1];
4522 adap
->l2t_end
= val
[2];
4523 adap
->tids
.ftid_base
= val
[3];
4524 adap
->tids
.nftids
= val
[4] - val
[3] + 1;
4525 adap
->sge
.ingr_start
= val
[5];
4527 if (CHELSIO_CHIP_VERSION(adap
->params
.chip
) > CHELSIO_T5
) {
4528 /* Read the raw mps entries. In T6, the last 2 tcam entries
4529 * are reserved for raw mac addresses (rawf = 2, one per port).
4531 params
[0] = FW_PARAM_PFVF(RAWF_START
);
4532 params
[1] = FW_PARAM_PFVF(RAWF_END
);
4533 ret
= t4_query_params(adap
, adap
->mbox
, adap
->pf
, 0, 2,
4536 adap
->rawf_start
= val
[0];
4537 adap
->rawf_cnt
= val
[1] - val
[0] + 1;
4541 /* qids (ingress/egress) returned from firmware can be anywhere
4542 * in the range from EQ(IQFLINT)_START to EQ(IQFLINT)_END.
4543 * Hence driver needs to allocate memory for this range to
4544 * store the queue info. Get the highest IQFLINT/EQ index returned
4545 * in FW_EQ_*_CMD.alloc command.
4547 params
[0] = FW_PARAM_PFVF(EQ_END
);
4548 params
[1] = FW_PARAM_PFVF(IQFLINT_END
);
4549 ret
= t4_query_params(adap
, adap
->mbox
, adap
->pf
, 0, 2, params
, val
);
4552 adap
->sge
.egr_sz
= val
[0] - adap
->sge
.egr_start
+ 1;
4553 adap
->sge
.ingr_sz
= val
[1] - adap
->sge
.ingr_start
+ 1;
4555 adap
->sge
.egr_map
= kcalloc(adap
->sge
.egr_sz
,
4556 sizeof(*adap
->sge
.egr_map
), GFP_KERNEL
);
4557 if (!adap
->sge
.egr_map
) {
4562 adap
->sge
.ingr_map
= kcalloc(adap
->sge
.ingr_sz
,
4563 sizeof(*adap
->sge
.ingr_map
), GFP_KERNEL
);
4564 if (!adap
->sge
.ingr_map
) {
4569 /* Allocate the memory for the vaious egress queue bitmaps
4570 * ie starving_fl, txq_maperr and blocked_fl.
4572 adap
->sge
.starving_fl
= kcalloc(BITS_TO_LONGS(adap
->sge
.egr_sz
),
4573 sizeof(long), GFP_KERNEL
);
4574 if (!adap
->sge
.starving_fl
) {
4579 adap
->sge
.txq_maperr
= kcalloc(BITS_TO_LONGS(adap
->sge
.egr_sz
),
4580 sizeof(long), GFP_KERNEL
);
4581 if (!adap
->sge
.txq_maperr
) {
4586 #ifdef CONFIG_DEBUG_FS
4587 adap
->sge
.blocked_fl
= kcalloc(BITS_TO_LONGS(adap
->sge
.egr_sz
),
4588 sizeof(long), GFP_KERNEL
);
4589 if (!adap
->sge
.blocked_fl
) {
4595 params
[0] = FW_PARAM_PFVF(CLIP_START
);
4596 params
[1] = FW_PARAM_PFVF(CLIP_END
);
4597 ret
= t4_query_params(adap
, adap
->mbox
, adap
->pf
, 0, 2, params
, val
);
4600 adap
->clipt_start
= val
[0];
4601 adap
->clipt_end
= val
[1];
4603 /* We don't yet have a PARAMs calls to retrieve the number of Traffic
4604 * Classes supported by the hardware/firmware so we hard code it here
4607 adap
->params
.nsched_cls
= is_t4(adap
->params
.chip
) ? 15 : 16;
4609 /* query params related to active filter region */
4610 params
[0] = FW_PARAM_PFVF(ACTIVE_FILTER_START
);
4611 params
[1] = FW_PARAM_PFVF(ACTIVE_FILTER_END
);
4612 ret
= t4_query_params(adap
, adap
->mbox
, adap
->pf
, 0, 2, params
, val
);
4613 /* If Active filter size is set we enable establishing
4614 * offload connection through firmware work request
4616 if ((val
[0] != val
[1]) && (ret
>= 0)) {
4617 adap
->flags
|= CXGB4_FW_OFLD_CONN
;
4618 adap
->tids
.aftid_base
= val
[0];
4619 adap
->tids
.aftid_end
= val
[1];
4622 /* If we're running on newer firmware, let it know that we're
4623 * prepared to deal with encapsulated CPL messages. Older
4624 * firmware won't understand this and we'll just get
4625 * unencapsulated messages ...
4627 params
[0] = FW_PARAM_PFVF(CPLFW4MSG_ENCAP
);
4629 (void)t4_set_params(adap
, adap
->mbox
, adap
->pf
, 0, 1, params
, val
);
4632 * Find out whether we're allowed to use the T5+ ULPTX MEMWRITE DSGL
4633 * capability. Earlier versions of the firmware didn't have the
4634 * ULPTX_MEMWRITE_DSGL so we'll interpret a query failure as no
4635 * permission to use ULPTX MEMWRITE DSGL.
4637 if (is_t4(adap
->params
.chip
)) {
4638 adap
->params
.ulptx_memwrite_dsgl
= false;
4640 params
[0] = FW_PARAM_DEV(ULPTX_MEMWRITE_DSGL
);
4641 ret
= t4_query_params(adap
, adap
->mbox
, adap
->pf
, 0,
4643 adap
->params
.ulptx_memwrite_dsgl
= (ret
== 0 && val
[0] != 0);
4646 /* See if FW supports FW_RI_FR_NSMR_TPTE_WR work request */
4647 params
[0] = FW_PARAM_DEV(RI_FR_NSMR_TPTE_WR
);
4648 ret
= t4_query_params(adap
, adap
->mbox
, adap
->pf
, 0,
4650 adap
->params
.fr_nsmr_tpte_wr_support
= (ret
== 0 && val
[0] != 0);
4652 /* See if FW supports FW_FILTER2 work request */
4653 if (is_t4(adap
->params
.chip
)) {
4654 adap
->params
.filter2_wr_support
= 0;
4656 params
[0] = FW_PARAM_DEV(FILTER2_WR
);
4657 ret
= t4_query_params(adap
, adap
->mbox
, adap
->pf
, 0,
4659 adap
->params
.filter2_wr_support
= (ret
== 0 && val
[0] != 0);
4662 /* Check if FW supports returning vin and smt index.
4663 * If this is not supported, driver will interpret
4664 * these values from viid.
4666 params
[0] = FW_PARAM_DEV(OPAQUE_VIID_SMT_EXTN
);
4667 ret
= t4_query_params(adap
, adap
->mbox
, adap
->pf
, 0,
4669 adap
->params
.viid_smt_extn_support
= (ret
== 0 && val
[0] != 0);
4672 * Get device capabilities so we can determine what resources we need
4675 memset(&caps_cmd
, 0, sizeof(caps_cmd
));
4676 caps_cmd
.op_to_write
= htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD
) |
4677 FW_CMD_REQUEST_F
| FW_CMD_READ_F
);
4678 caps_cmd
.cfvalid_to_len16
= htonl(FW_LEN16(caps_cmd
));
4679 ret
= t4_wr_mbox(adap
, adap
->mbox
, &caps_cmd
, sizeof(caps_cmd
),
4684 /* hash filter has some mandatory register settings to be tested and for
4685 * that it needs to test whether offload is enabled or not, hence
4686 * checking and setting it here.
4688 if (caps_cmd
.ofldcaps
)
4689 adap
->params
.offload
= 1;
4691 if (caps_cmd
.ofldcaps
||
4692 (caps_cmd
.niccaps
& htons(FW_CAPS_CONFIG_NIC_HASHFILTER
))) {
4693 /* query offload-related parameters */
4694 params
[0] = FW_PARAM_DEV(NTID
);
4695 params
[1] = FW_PARAM_PFVF(SERVER_START
);
4696 params
[2] = FW_PARAM_PFVF(SERVER_END
);
4697 params
[3] = FW_PARAM_PFVF(TDDP_START
);
4698 params
[4] = FW_PARAM_PFVF(TDDP_END
);
4699 params
[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ
);
4700 ret
= t4_query_params(adap
, adap
->mbox
, adap
->pf
, 0, 6,
4704 adap
->tids
.ntids
= val
[0];
4705 adap
->tids
.natids
= min(adap
->tids
.ntids
/ 2, MAX_ATIDS
);
4706 adap
->tids
.stid_base
= val
[1];
4707 adap
->tids
.nstids
= val
[2] - val
[1] + 1;
4709 * Setup server filter region. Divide the available filter
4710 * region into two parts. Regular filters get 1/3rd and server
4711 * filters get 2/3rd part. This is only enabled if workarond
4713 * 1. For regular filters.
4714 * 2. Server filter: This are special filters which are used
4715 * to redirect SYN packets to offload queue.
4717 if (adap
->flags
& CXGB4_FW_OFLD_CONN
&& !is_bypass(adap
)) {
4718 adap
->tids
.sftid_base
= adap
->tids
.ftid_base
+
4719 DIV_ROUND_UP(adap
->tids
.nftids
, 3);
4720 adap
->tids
.nsftids
= adap
->tids
.nftids
-
4721 DIV_ROUND_UP(adap
->tids
.nftids
, 3);
4722 adap
->tids
.nftids
= adap
->tids
.sftid_base
-
4723 adap
->tids
.ftid_base
;
4725 adap
->vres
.ddp
.start
= val
[3];
4726 adap
->vres
.ddp
.size
= val
[4] - val
[3] + 1;
4727 adap
->params
.ofldq_wr_cred
= val
[5];
4729 if (caps_cmd
.niccaps
& htons(FW_CAPS_CONFIG_NIC_HASHFILTER
)) {
4730 init_hash_filter(adap
);
4732 adap
->num_ofld_uld
+= 1;
4735 if (caps_cmd
.rdmacaps
) {
4736 params
[0] = FW_PARAM_PFVF(STAG_START
);
4737 params
[1] = FW_PARAM_PFVF(STAG_END
);
4738 params
[2] = FW_PARAM_PFVF(RQ_START
);
4739 params
[3] = FW_PARAM_PFVF(RQ_END
);
4740 params
[4] = FW_PARAM_PFVF(PBL_START
);
4741 params
[5] = FW_PARAM_PFVF(PBL_END
);
4742 ret
= t4_query_params(adap
, adap
->mbox
, adap
->pf
, 0, 6,
4746 adap
->vres
.stag
.start
= val
[0];
4747 adap
->vres
.stag
.size
= val
[1] - val
[0] + 1;
4748 adap
->vres
.rq
.start
= val
[2];
4749 adap
->vres
.rq
.size
= val
[3] - val
[2] + 1;
4750 adap
->vres
.pbl
.start
= val
[4];
4751 adap
->vres
.pbl
.size
= val
[5] - val
[4] + 1;
4753 params
[0] = FW_PARAM_PFVF(SRQ_START
);
4754 params
[1] = FW_PARAM_PFVF(SRQ_END
);
4755 ret
= t4_query_params(adap
, adap
->mbox
, adap
->pf
, 0, 2,
4758 adap
->vres
.srq
.start
= val
[0];
4759 adap
->vres
.srq
.size
= val
[1] - val
[0] + 1;
4761 if (adap
->vres
.srq
.size
) {
4762 adap
->srq
= t4_init_srq(adap
->vres
.srq
.size
);
4764 dev_warn(&adap
->pdev
->dev
, "could not allocate SRQ, continuing\n");
4767 params
[0] = FW_PARAM_PFVF(SQRQ_START
);
4768 params
[1] = FW_PARAM_PFVF(SQRQ_END
);
4769 params
[2] = FW_PARAM_PFVF(CQ_START
);
4770 params
[3] = FW_PARAM_PFVF(CQ_END
);
4771 params
[4] = FW_PARAM_PFVF(OCQ_START
);
4772 params
[5] = FW_PARAM_PFVF(OCQ_END
);
4773 ret
= t4_query_params(adap
, adap
->mbox
, adap
->pf
, 0, 6, params
,
4777 adap
->vres
.qp
.start
= val
[0];
4778 adap
->vres
.qp
.size
= val
[1] - val
[0] + 1;
4779 adap
->vres
.cq
.start
= val
[2];
4780 adap
->vres
.cq
.size
= val
[3] - val
[2] + 1;
4781 adap
->vres
.ocq
.start
= val
[4];
4782 adap
->vres
.ocq
.size
= val
[5] - val
[4] + 1;
4784 params
[0] = FW_PARAM_DEV(MAXORDIRD_QP
);
4785 params
[1] = FW_PARAM_DEV(MAXIRD_ADAPTER
);
4786 ret
= t4_query_params(adap
, adap
->mbox
, adap
->pf
, 0, 2, params
,
4789 adap
->params
.max_ordird_qp
= 8;
4790 adap
->params
.max_ird_adapter
= 32 * adap
->tids
.ntids
;
4793 adap
->params
.max_ordird_qp
= val
[0];
4794 adap
->params
.max_ird_adapter
= val
[1];
4796 dev_info(adap
->pdev_dev
,
4797 "max_ordird_qp %d max_ird_adapter %d\n",
4798 adap
->params
.max_ordird_qp
,
4799 adap
->params
.max_ird_adapter
);
4801 /* Enable write_with_immediate if FW supports it */
4802 params
[0] = FW_PARAM_DEV(RDMA_WRITE_WITH_IMM
);
4803 ret
= t4_query_params(adap
, adap
->mbox
, adap
->pf
, 0, 1, params
,
4805 adap
->params
.write_w_imm_support
= (ret
== 0 && val
[0] != 0);
4807 /* Enable write_cmpl if FW supports it */
4808 params
[0] = FW_PARAM_DEV(RI_WRITE_CMPL_WR
);
4809 ret
= t4_query_params(adap
, adap
->mbox
, adap
->pf
, 0, 1, params
,
4811 adap
->params
.write_cmpl_support
= (ret
== 0 && val
[0] != 0);
4812 adap
->num_ofld_uld
+= 2;
4814 if (caps_cmd
.iscsicaps
) {
4815 params
[0] = FW_PARAM_PFVF(ISCSI_START
);
4816 params
[1] = FW_PARAM_PFVF(ISCSI_END
);
4817 ret
= t4_query_params(adap
, adap
->mbox
, adap
->pf
, 0, 2,
4821 adap
->vres
.iscsi
.start
= val
[0];
4822 adap
->vres
.iscsi
.size
= val
[1] - val
[0] + 1;
4823 if (is_t6(adap
->params
.chip
)) {
4824 params
[0] = FW_PARAM_PFVF(PPOD_EDRAM_START
);
4825 params
[1] = FW_PARAM_PFVF(PPOD_EDRAM_END
);
4826 ret
= t4_query_params(adap
, adap
->mbox
, adap
->pf
, 0, 2,
4829 adap
->vres
.ppod_edram
.start
= val
[0];
4830 adap
->vres
.ppod_edram
.size
=
4831 val
[1] - val
[0] + 1;
4833 dev_info(adap
->pdev_dev
,
4834 "ppod edram start 0x%x end 0x%x size 0x%x\n",
4836 adap
->vres
.ppod_edram
.size
);
4839 /* LIO target and cxgb4i initiaitor */
4840 adap
->num_ofld_uld
+= 2;
4842 if (caps_cmd
.cryptocaps
) {
4843 if (ntohs(caps_cmd
.cryptocaps
) &
4844 FW_CAPS_CONFIG_CRYPTO_LOOKASIDE
) {
4845 params
[0] = FW_PARAM_PFVF(NCRYPTO_LOOKASIDE
);
4846 ret
= t4_query_params(adap
, adap
->mbox
, adap
->pf
, 0,
4852 adap
->vres
.ncrypto_fc
= val
[0];
4854 adap
->num_ofld_uld
+= 1;
4856 if (ntohs(caps_cmd
.cryptocaps
) &
4857 FW_CAPS_CONFIG_TLS_INLINE
) {
4858 params
[0] = FW_PARAM_PFVF(TLS_START
);
4859 params
[1] = FW_PARAM_PFVF(TLS_END
);
4860 ret
= t4_query_params(adap
, adap
->mbox
, adap
->pf
, 0,
4864 adap
->vres
.key
.start
= val
[0];
4865 adap
->vres
.key
.size
= val
[1] - val
[0] + 1;
4868 adap
->params
.crypto
= ntohs(caps_cmd
.cryptocaps
);
4870 #undef FW_PARAM_PFVF
4873 /* The MTU/MSS Table is initialized by now, so load their values. If
4874 * we're initializing the adapter, then we'll make any modifications
4875 * we want to the MTU/MSS Table and also initialize the congestion
4878 t4_read_mtu_tbl(adap
, adap
->params
.mtus
, NULL
);
4879 if (state
!= DEV_STATE_INIT
) {
4882 /* The default MTU Table contains values 1492 and 1500.
4883 * However, for TCP, it's better to have two values which are
4884 * a multiple of 8 +/- 4 bytes apart near this popular MTU.
4885 * This allows us to have a TCP Data Payload which is a
4886 * multiple of 8 regardless of what combination of TCP Options
4887 * are in use (always a multiple of 4 bytes) which is
4888 * important for performance reasons. For instance, if no
4889 * options are in use, then we have a 20-byte IP header and a
4890 * 20-byte TCP header. In this case, a 1500-byte MSS would
4891 * result in a TCP Data Payload of 1500 - 40 == 1460 bytes
4892 * which is not a multiple of 8. So using an MSS of 1488 in
4893 * this case results in a TCP Data Payload of 1448 bytes which
4894 * is a multiple of 8. On the other hand, if 12-byte TCP Time
4895 * Stamps have been negotiated, then an MTU of 1500 bytes
4896 * results in a TCP Data Payload of 1448 bytes which, as
4897 * above, is a multiple of 8 bytes ...
4899 for (i
= 0; i
< NMTUS
; i
++)
4900 if (adap
->params
.mtus
[i
] == 1492) {
4901 adap
->params
.mtus
[i
] = 1488;
4905 t4_load_mtus(adap
, adap
->params
.mtus
, adap
->params
.a_wnd
,
4906 adap
->params
.b_wnd
);
4908 t4_init_sge_params(adap
);
4909 adap
->flags
|= CXGB4_FW_OK
;
4910 t4_init_tp_params(adap
, true);
4914 * Something bad happened. If a command timed out or failed with EIO
4915 * FW does not operate within its spec or something catastrophic
4916 * happened to HW/FW, stop issuing commands.
4919 adap_free_hma_mem(adap
);
4920 kfree(adap
->sge
.egr_map
);
4921 kfree(adap
->sge
.ingr_map
);
4922 kfree(adap
->sge
.starving_fl
);
4923 kfree(adap
->sge
.txq_maperr
);
4924 #ifdef CONFIG_DEBUG_FS
4925 kfree(adap
->sge
.blocked_fl
);
4927 if (ret
!= -ETIMEDOUT
&& ret
!= -EIO
)
4928 t4_fw_bye(adap
, adap
->mbox
);
4934 static pci_ers_result_t
eeh_err_detected(struct pci_dev
*pdev
,
4935 pci_channel_state_t state
)
4938 struct adapter
*adap
= pci_get_drvdata(pdev
);
4944 adap
->flags
&= ~CXGB4_FW_OK
;
4945 notify_ulds(adap
, CXGB4_STATE_START_RECOVERY
);
4946 spin_lock(&adap
->stats_lock
);
4947 for_each_port(adap
, i
) {
4948 struct net_device
*dev
= adap
->port
[i
];
4950 netif_device_detach(dev
);
4951 netif_carrier_off(dev
);
4954 spin_unlock(&adap
->stats_lock
);
4955 disable_interrupts(adap
);
4956 if (adap
->flags
& CXGB4_FULL_INIT_DONE
)
4959 if ((adap
->flags
& CXGB4_DEV_ENABLED
)) {
4960 pci_disable_device(pdev
);
4961 adap
->flags
&= ~CXGB4_DEV_ENABLED
;
4963 out
: return state
== pci_channel_io_perm_failure
?
4964 PCI_ERS_RESULT_DISCONNECT
: PCI_ERS_RESULT_NEED_RESET
;
4967 static pci_ers_result_t
eeh_slot_reset(struct pci_dev
*pdev
)
4970 struct fw_caps_config_cmd c
;
4971 struct adapter
*adap
= pci_get_drvdata(pdev
);
4974 pci_restore_state(pdev
);
4975 pci_save_state(pdev
);
4976 return PCI_ERS_RESULT_RECOVERED
;
4979 if (!(adap
->flags
& CXGB4_DEV_ENABLED
)) {
4980 if (pci_enable_device(pdev
)) {
4981 dev_err(&pdev
->dev
, "Cannot reenable PCI "
4982 "device after reset\n");
4983 return PCI_ERS_RESULT_DISCONNECT
;
4985 adap
->flags
|= CXGB4_DEV_ENABLED
;
4988 pci_set_master(pdev
);
4989 pci_restore_state(pdev
);
4990 pci_save_state(pdev
);
4992 if (t4_wait_dev_ready(adap
->regs
) < 0)
4993 return PCI_ERS_RESULT_DISCONNECT
;
4994 if (t4_fw_hello(adap
, adap
->mbox
, adap
->pf
, MASTER_MUST
, NULL
) < 0)
4995 return PCI_ERS_RESULT_DISCONNECT
;
4996 adap
->flags
|= CXGB4_FW_OK
;
4997 if (adap_init1(adap
, &c
))
4998 return PCI_ERS_RESULT_DISCONNECT
;
5000 for_each_port(adap
, i
) {
5001 struct port_info
*pi
= adap2pinfo(adap
, i
);
5002 u8 vivld
= 0, vin
= 0;
5004 ret
= t4_alloc_vi(adap
, adap
->mbox
, pi
->tx_chan
, adap
->pf
, 0, 1,
5005 NULL
, NULL
, &vivld
, &vin
);
5007 return PCI_ERS_RESULT_DISCONNECT
;
5009 pi
->xact_addr_filt
= -1;
5010 /* If fw supports returning the VIN as part of FW_VI_CMD,
5011 * save the returned values.
5013 if (adap
->params
.viid_smt_extn_support
) {
5017 /* Retrieve the values from VIID */
5018 pi
->vivld
= FW_VIID_VIVLD_G(pi
->viid
);
5019 pi
->vin
= FW_VIID_VIN_G(pi
->viid
);
5023 t4_load_mtus(adap
, adap
->params
.mtus
, adap
->params
.a_wnd
,
5024 adap
->params
.b_wnd
);
5027 return PCI_ERS_RESULT_DISCONNECT
;
5028 return PCI_ERS_RESULT_RECOVERED
;
5031 static void eeh_resume(struct pci_dev
*pdev
)
5034 struct adapter
*adap
= pci_get_drvdata(pdev
);
5040 for_each_port(adap
, i
) {
5041 struct net_device
*dev
= adap
->port
[i
];
5043 if (netif_running(dev
)) {
5045 cxgb_set_rxmode(dev
);
5047 netif_device_attach(dev
);
5053 static const struct pci_error_handlers cxgb4_eeh
= {
5054 .error_detected
= eeh_err_detected
,
5055 .slot_reset
= eeh_slot_reset
,
5056 .resume
= eeh_resume
,
5059 /* Return true if the Link Configuration supports "High Speeds" (those greater
5062 static inline bool is_x_10g_port(const struct link_config
*lc
)
5064 unsigned int speeds
, high_speeds
;
5066 speeds
= FW_PORT_CAP32_SPEED_V(FW_PORT_CAP32_SPEED_G(lc
->pcaps
));
5067 high_speeds
= speeds
&
5068 ~(FW_PORT_CAP32_SPEED_100M
| FW_PORT_CAP32_SPEED_1G
);
5070 return high_speeds
!= 0;
5074 * Perform default configuration of DMA queues depending on the number and type
5075 * of ports we found and the number of available CPUs. Most settings can be
5076 * modified by the admin prior to actual use.
5078 static int cfg_queues(struct adapter
*adap
)
5080 struct sge
*s
= &adap
->sge
;
5081 int i
, n10g
= 0, qidx
= 0;
5082 int niqflint
, neq
, avail_eth_qsets
;
5083 int max_eth_qsets
= 32;
5084 #ifndef CONFIG_CHELSIO_T4_DCB
5088 /* Reduce memory usage in kdump environment, disable all offload.
5090 if (is_kdump_kernel() || (is_uld(adap
) && t4_uld_mem_alloc(adap
))) {
5091 adap
->params
.offload
= 0;
5092 adap
->params
.crypto
= 0;
5095 /* Calculate the number of Ethernet Queue Sets available based on
5096 * resources provisioned for us. We always have an Asynchronous
5097 * Firmware Event Ingress Queue. If we're operating in MSI or Legacy
5098 * IRQ Pin Interrupt mode, then we'll also have a Forwarded Interrupt
5099 * Ingress Queue. Meanwhile, we need two Egress Queues for each
5100 * Queue Set: one for the Free List and one for the Ethernet TX Queue.
5102 * Note that we should also take into account all of the various
5103 * Offload Queues. But, in any situation where we're operating in
5104 * a Resource Constrained Provisioning environment, doing any Offload
5105 * at all is problematic ...
5107 niqflint
= adap
->params
.pfres
.niqflint
- 1;
5108 if (!(adap
->flags
& CXGB4_USING_MSIX
))
5110 neq
= adap
->params
.pfres
.neq
/ 2;
5111 avail_eth_qsets
= min(niqflint
, neq
);
5113 if (avail_eth_qsets
> max_eth_qsets
)
5114 avail_eth_qsets
= max_eth_qsets
;
5116 if (avail_eth_qsets
< adap
->params
.nports
) {
5117 dev_err(adap
->pdev_dev
, "avail_eth_qsets=%d < nports=%d\n",
5118 avail_eth_qsets
, adap
->params
.nports
);
5122 /* Count the number of 10Gb/s or better ports */
5123 for_each_port(adap
, i
)
5124 n10g
+= is_x_10g_port(&adap2pinfo(adap
, i
)->link_cfg
);
5126 #ifdef CONFIG_CHELSIO_T4_DCB
5127 /* For Data Center Bridging support we need to be able to support up
5128 * to 8 Traffic Priorities; each of which will be assigned to its
5129 * own TX Queue in order to prevent Head-Of-Line Blocking.
5131 if (adap
->params
.nports
* 8 > avail_eth_qsets
) {
5132 dev_err(adap
->pdev_dev
, "DCB avail_eth_qsets=%d < %d!\n",
5133 avail_eth_qsets
, adap
->params
.nports
* 8);
5137 for_each_port(adap
, i
) {
5138 struct port_info
*pi
= adap2pinfo(adap
, i
);
5140 pi
->first_qset
= qidx
;
5141 pi
->nqsets
= is_kdump_kernel() ? 1 : 8;
5144 #else /* !CONFIG_CHELSIO_T4_DCB */
5146 * We default to 1 queue per non-10G port and up to # of cores queues
5150 q10g
= (avail_eth_qsets
- (adap
->params
.nports
- n10g
)) / n10g
;
5151 if (q10g
> netif_get_num_default_rss_queues())
5152 q10g
= netif_get_num_default_rss_queues();
5154 if (is_kdump_kernel())
5157 for_each_port(adap
, i
) {
5158 struct port_info
*pi
= adap2pinfo(adap
, i
);
5160 pi
->first_qset
= qidx
;
5161 pi
->nqsets
= is_x_10g_port(&pi
->link_cfg
) ? q10g
: 1;
5164 #endif /* !CONFIG_CHELSIO_T4_DCB */
5167 s
->max_ethqsets
= qidx
; /* MSI-X may lower it later */
5171 * For offload we use 1 queue/channel if all ports are up to 1G,
5172 * otherwise we divide all available queues amongst the channels
5173 * capped by the number of available cores.
5176 i
= min_t(int, MAX_OFLD_QSETS
, num_online_cpus());
5177 s
->ofldqsets
= roundup(i
, adap
->params
.nports
);
5179 s
->ofldqsets
= adap
->params
.nports
;
5183 for (i
= 0; i
< ARRAY_SIZE(s
->ethrxq
); i
++) {
5184 struct sge_eth_rxq
*r
= &s
->ethrxq
[i
];
5186 init_rspq(adap
, &r
->rspq
, 5, 10, 1024, 64);
5190 for (i
= 0; i
< ARRAY_SIZE(s
->ethtxq
); i
++)
5191 s
->ethtxq
[i
].q
.size
= 1024;
5193 for (i
= 0; i
< ARRAY_SIZE(s
->ctrlq
); i
++)
5194 s
->ctrlq
[i
].q
.size
= 512;
5196 if (!is_t4(adap
->params
.chip
))
5197 s
->ptptxq
.q
.size
= 8;
5199 init_rspq(adap
, &s
->fw_evtq
, 0, 1, 1024, 64);
5200 init_rspq(adap
, &s
->intrq
, 0, 1, 512, 64);
5206 * Reduce the number of Ethernet queues across all ports to at most n.
5207 * n provides at least one queue per port.
5209 static void reduce_ethqs(struct adapter
*adap
, int n
)
5212 struct port_info
*pi
;
5214 while (n
< adap
->sge
.ethqsets
)
5215 for_each_port(adap
, i
) {
5216 pi
= adap2pinfo(adap
, i
);
5217 if (pi
->nqsets
> 1) {
5219 adap
->sge
.ethqsets
--;
5220 if (adap
->sge
.ethqsets
<= n
)
5226 for_each_port(adap
, i
) {
5227 pi
= adap2pinfo(adap
, i
);
5233 static int get_msix_info(struct adapter
*adap
)
5235 struct uld_msix_info
*msix_info
;
5236 unsigned int max_ingq
= 0;
5238 if (is_offload(adap
))
5239 max_ingq
+= MAX_OFLD_QSETS
* adap
->num_ofld_uld
;
5240 if (is_pci_uld(adap
))
5241 max_ingq
+= MAX_OFLD_QSETS
* adap
->num_uld
;
5246 msix_info
= kcalloc(max_ingq
, sizeof(*msix_info
), GFP_KERNEL
);
5250 adap
->msix_bmap_ulds
.msix_bmap
= kcalloc(BITS_TO_LONGS(max_ingq
),
5251 sizeof(long), GFP_KERNEL
);
5252 if (!adap
->msix_bmap_ulds
.msix_bmap
) {
5256 spin_lock_init(&adap
->msix_bmap_ulds
.lock
);
5257 adap
->msix_info_ulds
= msix_info
;
5262 static void free_msix_info(struct adapter
*adap
)
5264 if (!(adap
->num_uld
&& adap
->num_ofld_uld
))
5267 kfree(adap
->msix_info_ulds
);
5268 kfree(adap
->msix_bmap_ulds
.msix_bmap
);
5271 /* 2 MSI-X vectors needed for the FW queue and non-data interrupts */
5272 #define EXTRA_VECS 2
5274 static int enable_msix(struct adapter
*adap
)
5276 int ofld_need
= 0, uld_need
= 0;
5277 int i
, j
, want
, need
, allocated
;
5278 struct sge
*s
= &adap
->sge
;
5279 unsigned int nchan
= adap
->params
.nports
;
5280 struct msix_entry
*entries
;
5281 int max_ingq
= MAX_INGQ
;
5283 if (is_pci_uld(adap
))
5284 max_ingq
+= (MAX_OFLD_QSETS
* adap
->num_uld
);
5285 if (is_offload(adap
))
5286 max_ingq
+= (MAX_OFLD_QSETS
* adap
->num_ofld_uld
);
5287 entries
= kmalloc_array(max_ingq
+ 1, sizeof(*entries
),
5293 if (get_msix_info(adap
)) {
5294 adap
->params
.offload
= 0;
5295 adap
->params
.crypto
= 0;
5298 for (i
= 0; i
< max_ingq
+ 1; ++i
)
5299 entries
[i
].entry
= i
;
5301 want
= s
->max_ethqsets
+ EXTRA_VECS
;
5302 if (is_offload(adap
)) {
5303 want
+= adap
->num_ofld_uld
* s
->ofldqsets
;
5304 ofld_need
= adap
->num_ofld_uld
* nchan
;
5306 if (is_pci_uld(adap
)) {
5307 want
+= adap
->num_uld
* s
->ofldqsets
;
5308 uld_need
= adap
->num_uld
* nchan
;
5310 #ifdef CONFIG_CHELSIO_T4_DCB
5311 /* For Data Center Bridging we need 8 Ethernet TX Priority Queues for
5314 need
= 8 * adap
->params
.nports
+ EXTRA_VECS
+ ofld_need
+ uld_need
;
5316 need
= adap
->params
.nports
+ EXTRA_VECS
+ ofld_need
+ uld_need
;
5318 allocated
= pci_enable_msix_range(adap
->pdev
, entries
, need
, want
);
5319 if (allocated
< 0) {
5320 dev_info(adap
->pdev_dev
, "not enough MSI-X vectors left,"
5321 " not using MSI-X\n");
5326 /* Distribute available vectors to the various queue groups.
5327 * Every group gets its minimum requirement and NIC gets top
5328 * priority for leftovers.
5330 i
= allocated
- EXTRA_VECS
- ofld_need
- uld_need
;
5331 if (i
< s
->max_ethqsets
) {
5332 s
->max_ethqsets
= i
;
5333 if (i
< s
->ethqsets
)
5334 reduce_ethqs(adap
, i
);
5337 if (allocated
< want
)
5338 s
->nqs_per_uld
= nchan
;
5340 s
->nqs_per_uld
= s
->ofldqsets
;
5343 for (i
= 0; i
< (s
->max_ethqsets
+ EXTRA_VECS
); ++i
)
5344 adap
->msix_info
[i
].vec
= entries
[i
].vector
;
5346 for (j
= 0 ; i
< allocated
; ++i
, j
++) {
5347 adap
->msix_info_ulds
[j
].vec
= entries
[i
].vector
;
5348 adap
->msix_info_ulds
[j
].idx
= i
;
5350 adap
->msix_bmap_ulds
.mapsize
= j
;
5352 dev_info(adap
->pdev_dev
, "%d MSI-X vectors allocated, "
5353 "nic %d per uld %d\n",
5354 allocated
, s
->max_ethqsets
, s
->nqs_per_uld
);
5362 static int init_rss(struct adapter
*adap
)
5367 err
= t4_init_rss_mode(adap
, adap
->mbox
);
5371 for_each_port(adap
, i
) {
5372 struct port_info
*pi
= adap2pinfo(adap
, i
);
5374 pi
->rss
= kcalloc(pi
->rss_size
, sizeof(u16
), GFP_KERNEL
);
5381 /* Dump basic information about the adapter */
5382 static void print_adapter_info(struct adapter
*adapter
)
5384 /* Hardware/Firmware/etc. Version/Revision IDs */
5385 t4_dump_version_info(adapter
);
5387 /* Software/Hardware configuration */
5388 dev_info(adapter
->pdev_dev
, "Configuration: %sNIC %s, %s capable\n",
5389 is_offload(adapter
) ? "R" : "",
5390 ((adapter
->flags
& CXGB4_USING_MSIX
) ? "MSI-X" :
5391 (adapter
->flags
& CXGB4_USING_MSI
) ? "MSI" : ""),
5392 is_offload(adapter
) ? "Offload" : "non-Offload");
5395 static void print_port_info(const struct net_device
*dev
)
5399 const struct port_info
*pi
= netdev_priv(dev
);
5400 const struct adapter
*adap
= pi
->adapter
;
5402 if (pi
->link_cfg
.pcaps
& FW_PORT_CAP32_SPEED_100M
)
5403 bufp
+= sprintf(bufp
, "100M/");
5404 if (pi
->link_cfg
.pcaps
& FW_PORT_CAP32_SPEED_1G
)
5405 bufp
+= sprintf(bufp
, "1G/");
5406 if (pi
->link_cfg
.pcaps
& FW_PORT_CAP32_SPEED_10G
)
5407 bufp
+= sprintf(bufp
, "10G/");
5408 if (pi
->link_cfg
.pcaps
& FW_PORT_CAP32_SPEED_25G
)
5409 bufp
+= sprintf(bufp
, "25G/");
5410 if (pi
->link_cfg
.pcaps
& FW_PORT_CAP32_SPEED_40G
)
5411 bufp
+= sprintf(bufp
, "40G/");
5412 if (pi
->link_cfg
.pcaps
& FW_PORT_CAP32_SPEED_50G
)
5413 bufp
+= sprintf(bufp
, "50G/");
5414 if (pi
->link_cfg
.pcaps
& FW_PORT_CAP32_SPEED_100G
)
5415 bufp
+= sprintf(bufp
, "100G/");
5416 if (pi
->link_cfg
.pcaps
& FW_PORT_CAP32_SPEED_200G
)
5417 bufp
+= sprintf(bufp
, "200G/");
5418 if (pi
->link_cfg
.pcaps
& FW_PORT_CAP32_SPEED_400G
)
5419 bufp
+= sprintf(bufp
, "400G/");
5422 sprintf(bufp
, "BASE-%s", t4_get_port_type_description(pi
->port_type
));
5424 netdev_info(dev
, "%s: Chelsio %s (%s) %s\n",
5425 dev
->name
, adap
->params
.vpd
.id
, adap
->name
, buf
);
5429 * Free the following resources:
5430 * - memory used for tables
5433 * - resources FW is holding for us
5435 static void free_some_resources(struct adapter
*adapter
)
5439 kvfree(adapter
->smt
);
5440 kvfree(adapter
->l2t
);
5441 kvfree(adapter
->srq
);
5442 t4_cleanup_sched(adapter
);
5443 kvfree(adapter
->tids
.tid_tab
);
5444 cxgb4_cleanup_tc_flower(adapter
);
5445 cxgb4_cleanup_tc_u32(adapter
);
5446 kfree(adapter
->sge
.egr_map
);
5447 kfree(adapter
->sge
.ingr_map
);
5448 kfree(adapter
->sge
.starving_fl
);
5449 kfree(adapter
->sge
.txq_maperr
);
5450 #ifdef CONFIG_DEBUG_FS
5451 kfree(adapter
->sge
.blocked_fl
);
5453 disable_msi(adapter
);
5455 for_each_port(adapter
, i
)
5456 if (adapter
->port
[i
]) {
5457 struct port_info
*pi
= adap2pinfo(adapter
, i
);
5460 t4_free_vi(adapter
, adapter
->mbox
, adapter
->pf
,
5462 kfree(adap2pinfo(adapter
, i
)->rss
);
5463 free_netdev(adapter
->port
[i
]);
5465 if (adapter
->flags
& CXGB4_FW_OK
)
5466 t4_fw_bye(adapter
, adapter
->pf
);
5469 #define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
5470 #define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \
5471 NETIF_F_GRO | NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
5472 #define SEGMENT_SIZE 128
5474 static int t4_get_chip_type(struct adapter
*adap
, int ver
)
5476 u32 pl_rev
= REV_G(t4_read_reg(adap
, PL_REV_A
));
5480 return CHELSIO_CHIP_CODE(CHELSIO_T4
, pl_rev
);
5482 return CHELSIO_CHIP_CODE(CHELSIO_T5
, pl_rev
);
5484 return CHELSIO_CHIP_CODE(CHELSIO_T6
, pl_rev
);
5491 #ifdef CONFIG_PCI_IOV
5492 static void cxgb4_mgmt_setup(struct net_device
*dev
)
5494 dev
->type
= ARPHRD_NONE
;
5496 dev
->hard_header_len
= 0;
5498 dev
->tx_queue_len
= 0;
5499 dev
->flags
|= IFF_NOARP
;
5500 dev
->priv_flags
|= IFF_NO_QUEUE
;
5502 /* Initialize the device structure. */
5503 dev
->netdev_ops
= &cxgb4_mgmt_netdev_ops
;
5504 dev
->ethtool_ops
= &cxgb4_mgmt_ethtool_ops
;
5507 static int cxgb4_iov_configure(struct pci_dev
*pdev
, int num_vfs
)
5509 struct adapter
*adap
= pci_get_drvdata(pdev
);
5511 int current_vfs
= pci_num_vf(pdev
);
5514 pcie_fw
= readl(adap
->regs
+ PCIE_FW_A
);
5515 /* Check if fw is initialized */
5516 if (!(pcie_fw
& PCIE_FW_INIT_F
)) {
5517 dev_warn(&pdev
->dev
, "Device not initialized\n");
5521 /* If any of the VF's is already assigned to Guest OS, then
5522 * SRIOV for the same cannot be modified
5524 if (current_vfs
&& pci_vfs_assigned(pdev
)) {
5526 "Cannot modify SR-IOV while VFs are assigned\n");
5529 /* Note that the upper-level code ensures that we're never called with
5530 * a non-zero "num_vfs" when we already have VFs instantiated. But
5531 * it never hurts to code defensively.
5533 if (num_vfs
!= 0 && current_vfs
!= 0)
5536 /* Nothing to do for no change. */
5537 if (num_vfs
== current_vfs
)
5540 /* Disable SRIOV when zero is passed. */
5542 pci_disable_sriov(pdev
);
5543 /* free VF Management Interface */
5544 unregister_netdev(adap
->port
[0]);
5545 free_netdev(adap
->port
[0]);
5546 adap
->port
[0] = NULL
;
5548 /* free VF resources */
5550 kfree(adap
->vfinfo
);
5551 adap
->vfinfo
= NULL
;
5556 struct fw_pfvf_cmd port_cmd
, port_rpl
;
5557 struct net_device
*netdev
;
5558 unsigned int pmask
, port
;
5559 struct pci_dev
*pbridge
;
5560 struct port_info
*pi
;
5561 char name
[IFNAMSIZ
];
5565 /* If we want to instantiate Virtual Functions, then our
5566 * parent bridge's PCI-E needs to support Alternative Routing
5567 * ID (ARI) because our VFs will show up at function offset 8
5570 pbridge
= pdev
->bus
->self
;
5571 pcie_capability_read_word(pbridge
, PCI_EXP_FLAGS
, &flags
);
5572 pcie_capability_read_dword(pbridge
, PCI_EXP_DEVCAP2
, &devcap2
);
5574 if ((flags
& PCI_EXP_FLAGS_VERS
) < 2 ||
5575 !(devcap2
& PCI_EXP_DEVCAP2_ARI
)) {
5576 /* Our parent bridge does not support ARI so issue a
5577 * warning and skip instantiating the VFs. They
5578 * won't be reachable.
5580 dev_warn(&pdev
->dev
, "Parent bridge %02x:%02x.%x doesn't support ARI; can't instantiate Virtual Functions\n",
5581 pbridge
->bus
->number
, PCI_SLOT(pbridge
->devfn
),
5582 PCI_FUNC(pbridge
->devfn
));
5585 memset(&port_cmd
, 0, sizeof(port_cmd
));
5586 port_cmd
.op_to_vfn
= cpu_to_be32(FW_CMD_OP_V(FW_PFVF_CMD
) |
5589 FW_PFVF_CMD_PFN_V(adap
->pf
) |
5590 FW_PFVF_CMD_VFN_V(0));
5591 port_cmd
.retval_len16
= cpu_to_be32(FW_LEN16(port_cmd
));
5592 err
= t4_wr_mbox(adap
, adap
->mbox
, &port_cmd
, sizeof(port_cmd
),
5596 pmask
= FW_PFVF_CMD_PMASK_G(be32_to_cpu(port_rpl
.type_to_neq
));
5597 port
= ffs(pmask
) - 1;
5598 /* Allocate VF Management Interface. */
5599 snprintf(name
, IFNAMSIZ
, "mgmtpf%d,%d", adap
->adap_idx
,
5601 netdev
= alloc_netdev(sizeof(struct port_info
),
5602 name
, NET_NAME_UNKNOWN
, cxgb4_mgmt_setup
);
5606 pi
= netdev_priv(netdev
);
5610 SET_NETDEV_DEV(netdev
, &pdev
->dev
);
5612 adap
->port
[0] = netdev
;
5615 err
= register_netdev(adap
->port
[0]);
5617 pr_info("Unable to register VF mgmt netdev %s\n", name
);
5618 free_netdev(adap
->port
[0]);
5619 adap
->port
[0] = NULL
;
5622 /* Allocate and set up VF Information. */
5623 adap
->vfinfo
= kcalloc(pci_sriov_get_totalvfs(pdev
),
5624 sizeof(struct vf_info
), GFP_KERNEL
);
5625 if (!adap
->vfinfo
) {
5626 unregister_netdev(adap
->port
[0]);
5627 free_netdev(adap
->port
[0]);
5628 adap
->port
[0] = NULL
;
5631 cxgb4_mgmt_fill_vf_station_mac_addr(adap
);
5633 /* Instantiate the requested number of VFs. */
5634 err
= pci_enable_sriov(pdev
, num_vfs
);
5636 pr_info("Unable to instantiate %d VFs\n", num_vfs
);
5638 unregister_netdev(adap
->port
[0]);
5639 free_netdev(adap
->port
[0]);
5640 adap
->port
[0] = NULL
;
5641 kfree(adap
->vfinfo
);
5642 adap
->vfinfo
= NULL
;
5647 adap
->num_vfs
= num_vfs
;
5650 #endif /* CONFIG_PCI_IOV */
5652 static int init_one(struct pci_dev
*pdev
, const struct pci_device_id
*ent
)
5654 struct net_device
*netdev
;
5655 struct adapter
*adapter
;
5656 static int adap_idx
= 1;
5657 int s_qpp
, qpp
, num_seg
;
5658 struct port_info
*pi
;
5659 bool highdma
= false;
5660 enum chip_type chip
;
5667 printk_once(KERN_INFO
"%s - version %s\n", DRV_DESC
, DRV_VERSION
);
5669 err
= pci_request_regions(pdev
, KBUILD_MODNAME
);
5671 /* Just info, some other driver may have claimed the device. */
5672 dev_info(&pdev
->dev
, "cannot obtain PCI resources\n");
5676 err
= pci_enable_device(pdev
);
5678 dev_err(&pdev
->dev
, "cannot enable PCI device\n");
5679 goto out_release_regions
;
5682 regs
= pci_ioremap_bar(pdev
, 0);
5684 dev_err(&pdev
->dev
, "cannot map device registers\n");
5686 goto out_disable_device
;
5689 adapter
= kzalloc(sizeof(*adapter
), GFP_KERNEL
);
5692 goto out_unmap_bar0
;
5695 adapter
->regs
= regs
;
5696 err
= t4_wait_dev_ready(regs
);
5698 goto out_free_adapter
;
5700 /* We control everything through one PF */
5701 whoami
= t4_read_reg(adapter
, PL_WHOAMI_A
);
5702 pci_read_config_word(pdev
, PCI_DEVICE_ID
, &device_id
);
5703 chip
= t4_get_chip_type(adapter
, CHELSIO_PCI_ID_VER(device_id
));
5705 dev_err(&pdev
->dev
, "Device %d is not supported\n", device_id
);
5707 goto out_free_adapter
;
5709 chip_ver
= CHELSIO_CHIP_VERSION(chip
);
5710 func
= chip_ver
<= CHELSIO_T5
?
5711 SOURCEPF_G(whoami
) : T6_SOURCEPF_G(whoami
);
5713 adapter
->pdev
= pdev
;
5714 adapter
->pdev_dev
= &pdev
->dev
;
5715 adapter
->name
= pci_name(pdev
);
5716 adapter
->mbox
= func
;
5718 adapter
->params
.chip
= chip
;
5719 adapter
->adap_idx
= adap_idx
;
5720 adapter
->msg_enable
= DFLT_MSG_ENABLE
;
5721 adapter
->mbox_log
= kzalloc(sizeof(*adapter
->mbox_log
) +
5722 (sizeof(struct mbox_cmd
) *
5723 T4_OS_LOG_MBOX_CMDS
),
5725 if (!adapter
->mbox_log
) {
5727 goto out_free_adapter
;
5729 spin_lock_init(&adapter
->mbox_lock
);
5730 INIT_LIST_HEAD(&adapter
->mlist
.list
);
5731 adapter
->mbox_log
->size
= T4_OS_LOG_MBOX_CMDS
;
5732 pci_set_drvdata(pdev
, adapter
);
5734 if (func
!= ent
->driver_data
) {
5735 pci_disable_device(pdev
);
5736 pci_save_state(pdev
); /* to restore SR-IOV later */
5740 if (!pci_set_dma_mask(pdev
, DMA_BIT_MASK(64))) {
5742 err
= pci_set_consistent_dma_mask(pdev
, DMA_BIT_MASK(64));
5744 dev_err(&pdev
->dev
, "unable to obtain 64-bit DMA for "
5745 "coherent allocations\n");
5746 goto out_free_adapter
;
5749 err
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(32));
5751 dev_err(&pdev
->dev
, "no usable DMA configuration\n");
5752 goto out_free_adapter
;
5756 pci_enable_pcie_error_reporting(pdev
);
5757 pci_set_master(pdev
);
5758 pci_save_state(pdev
);
5760 adapter
->workq
= create_singlethread_workqueue("cxgb4");
5761 if (!adapter
->workq
) {
5763 goto out_free_adapter
;
5766 /* PCI device has been enabled */
5767 adapter
->flags
|= CXGB4_DEV_ENABLED
;
5768 memset(adapter
->chan_map
, 0xff, sizeof(adapter
->chan_map
));
5770 /* If possible, we use PCIe Relaxed Ordering Attribute to deliver
5771 * Ingress Packet Data to Free List Buffers in order to allow for
5772 * chipset performance optimizations between the Root Complex and
5773 * Memory Controllers. (Messages to the associated Ingress Queue
5774 * notifying new Packet Placement in the Free Lists Buffers will be
5775 * send without the Relaxed Ordering Attribute thus guaranteeing that
5776 * all preceding PCIe Transaction Layer Packets will be processed
5777 * first.) But some Root Complexes have various issues with Upstream
5778 * Transaction Layer Packets with the Relaxed Ordering Attribute set.
5779 * The PCIe devices which under the Root Complexes will be cleared the
5780 * Relaxed Ordering bit in the configuration space, So we check our
5781 * PCIe configuration space to see if it's flagged with advice against
5782 * using Relaxed Ordering.
5784 if (!pcie_relaxed_ordering_enabled(pdev
))
5785 adapter
->flags
|= CXGB4_ROOT_NO_RELAXED_ORDERING
;
5787 spin_lock_init(&adapter
->stats_lock
);
5788 spin_lock_init(&adapter
->tid_release_lock
);
5789 spin_lock_init(&adapter
->win0_lock
);
5791 INIT_WORK(&adapter
->tid_release_task
, process_tid_release_list
);
5792 INIT_WORK(&adapter
->db_full_task
, process_db_full
);
5793 INIT_WORK(&adapter
->db_drop_task
, process_db_drop
);
5794 INIT_WORK(&adapter
->fatal_err_notify_task
, notify_fatal_err
);
5796 err
= t4_prep_adapter(adapter
);
5798 goto out_free_adapter
;
5800 if (is_kdump_kernel()) {
5801 /* Collect hardware state and append to /proc/vmcore */
5802 err
= cxgb4_cudbg_vmcore_add_dump(adapter
);
5804 dev_warn(adapter
->pdev_dev
,
5805 "Fail collecting vmcore device dump, err: %d. Continuing\n",
5811 if (!is_t4(adapter
->params
.chip
)) {
5812 s_qpp
= (QUEUESPERPAGEPF0_S
+
5813 (QUEUESPERPAGEPF1_S
- QUEUESPERPAGEPF0_S
) *
5815 qpp
= 1 << QUEUESPERPAGEPF0_G(t4_read_reg(adapter
,
5816 SGE_EGRESS_QUEUES_PER_PAGE_PF_A
) >> s_qpp
);
5817 num_seg
= PAGE_SIZE
/ SEGMENT_SIZE
;
5819 /* Each segment size is 128B. Write coalescing is enabled only
5820 * when SGE_EGRESS_QUEUES_PER_PAGE_PF reg value for the
5821 * queue is less no of segments that can be accommodated in
5824 if (qpp
> num_seg
) {
5826 "Incorrect number of egress queues per page\n");
5828 goto out_free_adapter
;
5830 adapter
->bar2
= ioremap_wc(pci_resource_start(pdev
, 2),
5831 pci_resource_len(pdev
, 2));
5832 if (!adapter
->bar2
) {
5833 dev_err(&pdev
->dev
, "cannot map device bar2 region\n");
5835 goto out_free_adapter
;
5839 setup_memwin(adapter
);
5840 err
= adap_init0(adapter
);
5841 #ifdef CONFIG_DEBUG_FS
5842 bitmap_zero(adapter
->sge
.blocked_fl
, adapter
->sge
.egr_sz
);
5844 setup_memwin_rdma(adapter
);
5848 /* configure SGE_STAT_CFG_A to read WC stats */
5849 if (!is_t4(adapter
->params
.chip
))
5850 t4_write_reg(adapter
, SGE_STAT_CFG_A
, STATSOURCE_T5_V(7) |
5851 (is_t5(adapter
->params
.chip
) ? STATMODE_V(0) :
5854 /* Initialize hash mac addr list */
5855 INIT_LIST_HEAD(&adapter
->mac_hlist
);
5857 for_each_port(adapter
, i
) {
5858 netdev
= alloc_etherdev_mq(sizeof(struct port_info
),
5865 SET_NETDEV_DEV(netdev
, &pdev
->dev
);
5867 adapter
->port
[i
] = netdev
;
5868 pi
= netdev_priv(netdev
);
5869 pi
->adapter
= adapter
;
5870 pi
->xact_addr_filt
= -1;
5872 netdev
->irq
= pdev
->irq
;
5874 netdev
->hw_features
= NETIF_F_SG
| TSO_FLAGS
|
5875 NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
|
5876 NETIF_F_RXCSUM
| NETIF_F_RXHASH
| NETIF_F_GRO
|
5877 NETIF_F_HW_VLAN_CTAG_TX
| NETIF_F_HW_VLAN_CTAG_RX
|
5880 if (chip_ver
> CHELSIO_T5
) {
5881 netdev
->hw_enc_features
|= NETIF_F_IP_CSUM
|
5884 NETIF_F_GSO_UDP_TUNNEL
|
5885 NETIF_F_GSO_UDP_TUNNEL_CSUM
|
5886 NETIF_F_TSO
| NETIF_F_TSO6
;
5888 netdev
->hw_features
|= NETIF_F_GSO_UDP_TUNNEL
|
5889 NETIF_F_GSO_UDP_TUNNEL_CSUM
|
5890 NETIF_F_HW_TLS_RECORD
;
5894 netdev
->hw_features
|= NETIF_F_HIGHDMA
;
5895 netdev
->features
|= netdev
->hw_features
;
5896 netdev
->vlan_features
= netdev
->features
& VLAN_FEAT
;
5898 netdev
->priv_flags
|= IFF_UNICAST_FLT
;
5900 /* MTU range: 81 - 9600 */
5901 netdev
->min_mtu
= 81; /* accommodate SACK */
5902 netdev
->max_mtu
= MAX_MTU
;
5904 netdev
->netdev_ops
= &cxgb4_netdev_ops
;
5905 #ifdef CONFIG_CHELSIO_T4_DCB
5906 netdev
->dcbnl_ops
= &cxgb4_dcb_ops
;
5907 cxgb4_dcb_state_init(netdev
);
5908 cxgb4_dcb_version_init(netdev
);
5910 cxgb4_set_ethtool_ops(netdev
);
5913 cxgb4_init_ethtool_dump(adapter
);
5915 pci_set_drvdata(pdev
, adapter
);
5917 if (adapter
->flags
& CXGB4_FW_OK
) {
5918 err
= t4_port_init(adapter
, func
, func
, 0);
5921 } else if (adapter
->params
.nports
== 1) {
5922 /* If we don't have a connection to the firmware -- possibly
5923 * because of an error -- grab the raw VPD parameters so we
5924 * can set the proper MAC Address on the debug network
5925 * interface that we've created.
5927 u8 hw_addr
[ETH_ALEN
];
5928 u8
*na
= adapter
->params
.vpd
.na
;
5930 err
= t4_get_raw_vpd_params(adapter
, &adapter
->params
.vpd
);
5932 for (i
= 0; i
< ETH_ALEN
; i
++)
5933 hw_addr
[i
] = (hex2val(na
[2 * i
+ 0]) * 16 +
5934 hex2val(na
[2 * i
+ 1]));
5935 t4_set_hw_addr(adapter
, 0, hw_addr
);
5939 if (!(adapter
->flags
& CXGB4_FW_OK
))
5940 goto fw_attach_fail
;
5942 /* Configure queues and allocate tables now, they can be needed as
5943 * soon as the first register_netdev completes.
5945 err
= cfg_queues(adapter
);
5949 adapter
->smt
= t4_init_smt();
5950 if (!adapter
->smt
) {
5951 /* We tolerate a lack of SMT, giving up some functionality */
5952 dev_warn(&pdev
->dev
, "could not allocate SMT, continuing\n");
5955 adapter
->l2t
= t4_init_l2t(adapter
->l2t_start
, adapter
->l2t_end
);
5956 if (!adapter
->l2t
) {
5957 /* We tolerate a lack of L2T, giving up some functionality */
5958 dev_warn(&pdev
->dev
, "could not allocate L2T, continuing\n");
5959 adapter
->params
.offload
= 0;
5962 #if IS_ENABLED(CONFIG_IPV6)
5963 if (chip_ver
<= CHELSIO_T5
&&
5964 (!(t4_read_reg(adapter
, LE_DB_CONFIG_A
) & ASLIPCOMPEN_F
))) {
5965 /* CLIP functionality is not present in hardware,
5966 * hence disable all offload features
5968 dev_warn(&pdev
->dev
,
5969 "CLIP not enabled in hardware, continuing\n");
5970 adapter
->params
.offload
= 0;
5972 adapter
->clipt
= t4_init_clip_tbl(adapter
->clipt_start
,
5973 adapter
->clipt_end
);
5974 if (!adapter
->clipt
) {
5975 /* We tolerate a lack of clip_table, giving up
5976 * some functionality
5978 dev_warn(&pdev
->dev
,
5979 "could not allocate Clip table, continuing\n");
5980 adapter
->params
.offload
= 0;
5985 for_each_port(adapter
, i
) {
5986 pi
= adap2pinfo(adapter
, i
);
5987 pi
->sched_tbl
= t4_init_sched(adapter
->params
.nsched_cls
);
5989 dev_warn(&pdev
->dev
,
5990 "could not activate scheduling on port %d\n",
5994 if (tid_init(&adapter
->tids
) < 0) {
5995 dev_warn(&pdev
->dev
, "could not allocate TID table, "
5997 adapter
->params
.offload
= 0;
5999 adapter
->tc_u32
= cxgb4_init_tc_u32(adapter
);
6000 if (!adapter
->tc_u32
)
6001 dev_warn(&pdev
->dev
,
6002 "could not offload tc u32, continuing\n");
6004 if (cxgb4_init_tc_flower(adapter
))
6005 dev_warn(&pdev
->dev
,
6006 "could not offload tc flower, continuing\n");
6009 if (is_offload(adapter
) || is_hashfilter(adapter
)) {
6010 if (t4_read_reg(adapter
, LE_DB_CONFIG_A
) & HASHEN_F
) {
6011 u32 hash_base
, hash_reg
;
6013 if (chip_ver
<= CHELSIO_T5
) {
6014 hash_reg
= LE_DB_TID_HASHBASE_A
;
6015 hash_base
= t4_read_reg(adapter
, hash_reg
);
6016 adapter
->tids
.hash_base
= hash_base
/ 4;
6018 hash_reg
= T6_LE_DB_HASH_TID_BASE_A
;
6019 hash_base
= t4_read_reg(adapter
, hash_reg
);
6020 adapter
->tids
.hash_base
= hash_base
;
6025 /* See what interrupts we'll be using */
6026 if (msi
> 1 && enable_msix(adapter
) == 0)
6027 adapter
->flags
|= CXGB4_USING_MSIX
;
6028 else if (msi
> 0 && pci_enable_msi(pdev
) == 0) {
6029 adapter
->flags
|= CXGB4_USING_MSI
;
6031 free_msix_info(adapter
);
6034 /* check for PCI Express bandwidth capabiltites */
6035 pcie_print_link_status(pdev
);
6037 cxgb4_init_mps_ref_entries(adapter
);
6039 err
= init_rss(adapter
);
6043 err
= setup_fw_sge_queues(adapter
);
6045 dev_err(adapter
->pdev_dev
,
6046 "FW sge queue allocation failed, err %d", err
);
6052 * The card is now ready to go. If any errors occur during device
6053 * registration we do not fail the whole card but rather proceed only
6054 * with the ports we manage to register successfully. However we must
6055 * register at least one net device.
6057 for_each_port(adapter
, i
) {
6058 pi
= adap2pinfo(adapter
, i
);
6059 adapter
->port
[i
]->dev_port
= pi
->lport
;
6060 netif_set_real_num_tx_queues(adapter
->port
[i
], pi
->nqsets
);
6061 netif_set_real_num_rx_queues(adapter
->port
[i
], pi
->nqsets
);
6063 netif_carrier_off(adapter
->port
[i
]);
6065 err
= register_netdev(adapter
->port
[i
]);
6068 adapter
->chan_map
[pi
->tx_chan
] = i
;
6069 print_port_info(adapter
->port
[i
]);
6072 dev_err(&pdev
->dev
, "could not register any net devices\n");
6076 dev_warn(&pdev
->dev
, "only %d net devices registered\n", i
);
6080 if (cxgb4_debugfs_root
) {
6081 adapter
->debugfs_root
= debugfs_create_dir(pci_name(pdev
),
6082 cxgb4_debugfs_root
);
6083 setup_debugfs(adapter
);
6086 /* PCIe EEH recovery on powerpc platforms needs fundamental reset */
6087 pdev
->needs_freset
= 1;
6089 if (is_uld(adapter
)) {
6090 mutex_lock(&uld_mutex
);
6091 list_add_tail(&adapter
->list_node
, &adapter_list
);
6092 mutex_unlock(&uld_mutex
);
6095 if (!is_t4(adapter
->params
.chip
))
6096 cxgb4_ptp_init(adapter
);
6098 if (IS_REACHABLE(CONFIG_THERMAL
) &&
6099 !is_t4(adapter
->params
.chip
) && (adapter
->flags
& CXGB4_FW_OK
))
6100 cxgb4_thermal_init(adapter
);
6102 print_adapter_info(adapter
);
6106 t4_free_sge_resources(adapter
);
6107 free_some_resources(adapter
);
6108 if (adapter
->flags
& CXGB4_USING_MSIX
)
6109 free_msix_info(adapter
);
6110 if (adapter
->num_uld
|| adapter
->num_ofld_uld
)
6111 t4_uld_mem_free(adapter
);
6113 if (!is_t4(adapter
->params
.chip
))
6114 iounmap(adapter
->bar2
);
6117 destroy_workqueue(adapter
->workq
);
6119 kfree(adapter
->mbox_log
);
6124 pci_disable_pcie_error_reporting(pdev
);
6125 pci_disable_device(pdev
);
6126 out_release_regions
:
6127 pci_release_regions(pdev
);
6131 static void remove_one(struct pci_dev
*pdev
)
6133 struct adapter
*adapter
= pci_get_drvdata(pdev
);
6134 struct hash_mac_addr
*entry
, *tmp
;
6137 pci_release_regions(pdev
);
6141 /* If we allocated filters, free up state associated with any
6144 clear_all_filters(adapter
);
6146 adapter
->flags
|= CXGB4_SHUTTING_DOWN
;
6148 if (adapter
->pf
== 4) {
6151 /* Tear down per-adapter Work Queue first since it can contain
6152 * references to our adapter data structure.
6154 destroy_workqueue(adapter
->workq
);
6156 if (is_uld(adapter
)) {
6157 detach_ulds(adapter
);
6158 t4_uld_clean_up(adapter
);
6161 adap_free_hma_mem(adapter
);
6163 disable_interrupts(adapter
);
6165 cxgb4_free_mps_ref_entries(adapter
);
6167 for_each_port(adapter
, i
)
6168 if (adapter
->port
[i
]->reg_state
== NETREG_REGISTERED
)
6169 unregister_netdev(adapter
->port
[i
]);
6171 debugfs_remove_recursive(adapter
->debugfs_root
);
6173 if (!is_t4(adapter
->params
.chip
))
6174 cxgb4_ptp_stop(adapter
);
6175 if (IS_REACHABLE(CONFIG_THERMAL
))
6176 cxgb4_thermal_remove(adapter
);
6178 if (adapter
->flags
& CXGB4_FULL_INIT_DONE
)
6181 if (adapter
->flags
& CXGB4_USING_MSIX
)
6182 free_msix_info(adapter
);
6183 if (adapter
->num_uld
|| adapter
->num_ofld_uld
)
6184 t4_uld_mem_free(adapter
);
6185 free_some_resources(adapter
);
6186 list_for_each_entry_safe(entry
, tmp
, &adapter
->mac_hlist
,
6188 list_del(&entry
->list
);
6192 #if IS_ENABLED(CONFIG_IPV6)
6193 t4_cleanup_clip_tbl(adapter
);
6195 if (!is_t4(adapter
->params
.chip
))
6196 iounmap(adapter
->bar2
);
6198 #ifdef CONFIG_PCI_IOV
6200 cxgb4_iov_configure(adapter
->pdev
, 0);
6203 iounmap(adapter
->regs
);
6204 pci_disable_pcie_error_reporting(pdev
);
6205 if ((adapter
->flags
& CXGB4_DEV_ENABLED
)) {
6206 pci_disable_device(pdev
);
6207 adapter
->flags
&= ~CXGB4_DEV_ENABLED
;
6209 pci_release_regions(pdev
);
6210 kfree(adapter
->mbox_log
);
6215 /* "Shutdown" quiesces the device, stopping Ingress Packet and Interrupt
6216 * delivery. This is essentially a stripped down version of the PCI remove()
6217 * function where we do the minimal amount of work necessary to shutdown any
6220 static void shutdown_one(struct pci_dev
*pdev
)
6222 struct adapter
*adapter
= pci_get_drvdata(pdev
);
6224 /* As with remove_one() above (see extended comment), we only want do
6225 * do cleanup on PCI Devices which went all the way through init_one()
6229 pci_release_regions(pdev
);
6233 adapter
->flags
|= CXGB4_SHUTTING_DOWN
;
6235 if (adapter
->pf
== 4) {
6238 for_each_port(adapter
, i
)
6239 if (adapter
->port
[i
]->reg_state
== NETREG_REGISTERED
)
6240 cxgb_close(adapter
->port
[i
]);
6242 if (is_uld(adapter
)) {
6243 detach_ulds(adapter
);
6244 t4_uld_clean_up(adapter
);
6247 disable_interrupts(adapter
);
6248 disable_msi(adapter
);
6250 t4_sge_stop(adapter
);
6251 if (adapter
->flags
& CXGB4_FW_OK
)
6252 t4_fw_bye(adapter
, adapter
->mbox
);
6256 static struct pci_driver cxgb4_driver
= {
6257 .name
= KBUILD_MODNAME
,
6258 .id_table
= cxgb4_pci_tbl
,
6260 .remove
= remove_one
,
6261 .shutdown
= shutdown_one
,
6262 #ifdef CONFIG_PCI_IOV
6263 .sriov_configure
= cxgb4_iov_configure
,
6265 .err_handler
= &cxgb4_eeh
,
6268 static int __init
cxgb4_init_module(void)
6272 /* Debugfs support is optional, just warn if this fails */
6273 cxgb4_debugfs_root
= debugfs_create_dir(KBUILD_MODNAME
, NULL
);
6274 if (!cxgb4_debugfs_root
)
6275 pr_warn("could not create debugfs entry, continuing\n");
6277 ret
= pci_register_driver(&cxgb4_driver
);
6281 #if IS_ENABLED(CONFIG_IPV6)
6282 if (!inet6addr_registered
) {
6283 ret
= register_inet6addr_notifier(&cxgb4_inet6addr_notifier
);
6285 pci_unregister_driver(&cxgb4_driver
);
6287 inet6addr_registered
= true;
6295 debugfs_remove(cxgb4_debugfs_root
);
6300 static void __exit
cxgb4_cleanup_module(void)
6302 #if IS_ENABLED(CONFIG_IPV6)
6303 if (inet6addr_registered
) {
6304 unregister_inet6addr_notifier(&cxgb4_inet6addr_notifier
);
6305 inet6addr_registered
= false;
6308 pci_unregister_driver(&cxgb4_driver
);
6309 debugfs_remove(cxgb4_debugfs_root
); /* NULL ok */
6312 module_init(cxgb4_init_module
);
6313 module_exit(cxgb4_cleanup_module
);