2 * QLogic qlcnic NIC Driver
3 * Copyright (c) 2009-2013 QLogic Corporation
5 * See LICENSE.qlcnic for copyright and licensing details.
8 #include <linux/netdevice.h>
9 #include <linux/if_vlan.h>
11 #include <linux/ipv6.h>
15 #define TX_ETHER_PKT 0x01
16 #define TX_TCP_PKT 0x02
17 #define TX_UDP_PKT 0x03
18 #define TX_IP_PKT 0x04
19 #define TX_TCP_LSO 0x05
20 #define TX_TCP_LSO6 0x06
21 #define TX_TCPV6_PKT 0x0b
22 #define TX_UDPV6_PKT 0x0c
23 #define FLAGS_VLAN_TAGGED 0x10
24 #define FLAGS_VLAN_OOB 0x40
26 #define qlcnic_set_tx_vlan_tci(cmd_desc, v) \
27 (cmd_desc)->vlan_TCI = cpu_to_le16(v);
28 #define qlcnic_set_cmd_desc_port(cmd_desc, var) \
29 ((cmd_desc)->port_ctxid |= ((var) & 0x0F))
30 #define qlcnic_set_cmd_desc_ctxid(cmd_desc, var) \
31 ((cmd_desc)->port_ctxid |= ((var) << 4 & 0xF0))
33 #define qlcnic_set_tx_port(_desc, _port) \
34 ((_desc)->port_ctxid = ((_port) & 0xf) | (((_port) << 4) & 0xf0))
36 #define qlcnic_set_tx_flags_opcode(_desc, _flags, _opcode) \
37 ((_desc)->flags_opcode |= \
38 cpu_to_le16(((_flags) & 0x7f) | (((_opcode) & 0x3f) << 7)))
40 #define qlcnic_set_tx_frags_len(_desc, _frags, _len) \
41 ((_desc)->nfrags__length = \
42 cpu_to_le32(((_frags) & 0xff) | (((_len) & 0xffffff) << 8)))
44 /* owner bits of status_desc */
45 #define STATUS_OWNER_HOST (0x1ULL << 56)
46 #define STATUS_OWNER_PHANTOM (0x2ULL << 56)
49 0-3 port, 4-7 status, 8-11 type, 12-27 total_length
50 28-43 reference_handle, 44-47 protocol, 48-52 pkt_offset
51 53-55 desc_cnt, 56-57 owner, 58-63 opcode
53 #define qlcnic_get_sts_port(sts_data) \
55 #define qlcnic_get_sts_status(sts_data) \
56 (((sts_data) >> 4) & 0x0F)
57 #define qlcnic_get_sts_type(sts_data) \
58 (((sts_data) >> 8) & 0x0F)
59 #define qlcnic_get_sts_totallength(sts_data) \
60 (((sts_data) >> 12) & 0xFFFF)
61 #define qlcnic_get_sts_refhandle(sts_data) \
62 (((sts_data) >> 28) & 0xFFFF)
63 #define qlcnic_get_sts_prot(sts_data) \
64 (((sts_data) >> 44) & 0x0F)
65 #define qlcnic_get_sts_pkt_offset(sts_data) \
66 (((sts_data) >> 48) & 0x1F)
67 #define qlcnic_get_sts_desc_cnt(sts_data) \
68 (((sts_data) >> 53) & 0x7)
69 #define qlcnic_get_sts_opcode(sts_data) \
70 (((sts_data) >> 58) & 0x03F)
72 #define qlcnic_get_lro_sts_refhandle(sts_data) \
73 ((sts_data) & 0x07FFF)
74 #define qlcnic_get_lro_sts_length(sts_data) \
75 (((sts_data) >> 16) & 0x0FFFF)
76 #define qlcnic_get_lro_sts_l2_hdr_offset(sts_data) \
77 (((sts_data) >> 32) & 0x0FF)
78 #define qlcnic_get_lro_sts_l4_hdr_offset(sts_data) \
79 (((sts_data) >> 40) & 0x0FF)
80 #define qlcnic_get_lro_sts_timestamp(sts_data) \
81 (((sts_data) >> 48) & 0x1)
82 #define qlcnic_get_lro_sts_type(sts_data) \
83 (((sts_data) >> 49) & 0x7)
84 #define qlcnic_get_lro_sts_push_flag(sts_data) \
85 (((sts_data) >> 52) & 0x1)
86 #define qlcnic_get_lro_sts_seq_number(sts_data) \
87 ((sts_data) & 0x0FFFFFFFF)
88 #define qlcnic_get_lro_sts_mss(sts_data1) \
89 ((sts_data1 >> 32) & 0x0FFFF)
91 #define qlcnic_83xx_get_lro_sts_mss(sts) ((sts) & 0xffff)
93 /* opcode field in status_desc */
94 #define QLCNIC_SYN_OFFLOAD 0x03
95 #define QLCNIC_RXPKT_DESC 0x04
96 #define QLCNIC_OLD_RXPKT_DESC 0x3f
97 #define QLCNIC_RESPONSE_DESC 0x05
98 #define QLCNIC_LRO_DESC 0x12
100 #define QLCNIC_TX_POLL_BUDGET 128
101 #define QLCNIC_TCP_HDR_SIZE 20
102 #define QLCNIC_TCP_TS_OPTION_SIZE 12
103 #define QLCNIC_FETCH_RING_ID(handle) ((handle) >> 63)
104 #define QLCNIC_DESC_OWNER_FW cpu_to_le64(STATUS_OWNER_PHANTOM)
106 #define QLCNIC_TCP_TS_HDR_SIZE (QLCNIC_TCP_HDR_SIZE + QLCNIC_TCP_TS_OPTION_SIZE)
108 /* for status field in status_desc */
109 #define STATUS_CKSUM_LOOP 0
110 #define STATUS_CKSUM_OK 2
112 #define qlcnic_83xx_pktln(sts) ((sts >> 32) & 0x3FFF)
113 #define qlcnic_83xx_hndl(sts) ((sts >> 48) & 0x7FFF)
114 #define qlcnic_83xx_csum_status(sts) ((sts >> 39) & 7)
115 #define qlcnic_83xx_opcode(sts) ((sts >> 42) & 0xF)
116 #define qlcnic_83xx_vlan_tag(sts) (((sts) >> 48) & 0xFFFF)
117 #define qlcnic_83xx_lro_pktln(sts) (((sts) >> 32) & 0x3FFF)
118 #define qlcnic_83xx_l2_hdr_off(sts) (((sts) >> 16) & 0xFF)
119 #define qlcnic_83xx_l4_hdr_off(sts) (((sts) >> 24) & 0xFF)
120 #define qlcnic_83xx_pkt_cnt(sts) (((sts) >> 16) & 0x7)
121 #define qlcnic_83xx_is_tstamp(sts) (((sts) >> 40) & 1)
122 #define qlcnic_83xx_is_psh_bit(sts) (((sts) >> 41) & 1)
123 #define qlcnic_83xx_is_ip_align(sts) (((sts) >> 46) & 1)
124 #define qlcnic_83xx_has_vlan_tag(sts) (((sts) >> 47) & 1)
126 struct sk_buff
*qlcnic_process_rxbuf(struct qlcnic_adapter
*,
127 struct qlcnic_host_rds_ring
*, u16
, u16
);
129 inline void qlcnic_83xx_enable_tx_intr(struct qlcnic_adapter
*adapter
,
130 struct qlcnic_host_tx_ring
*tx_ring
)
132 writel(0, tx_ring
->crb_intr_mask
);
135 inline void qlcnic_83xx_disable_tx_intr(struct qlcnic_adapter
*adapter
,
136 struct qlcnic_host_tx_ring
*tx_ring
)
138 writel(1, tx_ring
->crb_intr_mask
);
141 static inline u8
qlcnic_mac_hash(u64 mac
)
143 return (u8
)((mac
& 0xff) ^ ((mac
>> 40) & 0xff));
146 static inline u32
qlcnic_get_ref_handle(struct qlcnic_adapter
*adapter
,
147 u16 handle
, u8 ring_id
)
149 if (adapter
->pdev
->device
== PCI_DEVICE_ID_QLOGIC_QLE834X
)
150 return handle
| (ring_id
<< 15);
155 static inline int qlcnic_82xx_is_lb_pkt(u64 sts_data
)
157 return (qlcnic_get_sts_status(sts_data
) == STATUS_CKSUM_LOOP
) ? 1 : 0;
160 void qlcnic_add_lb_filter(struct qlcnic_adapter
*adapter
, struct sk_buff
*skb
,
161 int loopback_pkt
, __le16 vlan_id
)
163 struct ethhdr
*phdr
= (struct ethhdr
*)(skb
->data
);
164 struct qlcnic_filter
*fil
, *tmp_fil
;
165 struct hlist_node
*n
;
166 struct hlist_head
*head
;
169 u8 hindex
, found
= 0, op
;
172 memcpy(&src_addr
, phdr
->h_source
, ETH_ALEN
);
175 if (adapter
->rx_fhash
.fnum
>= adapter
->rx_fhash
.fmax
)
178 hindex
= qlcnic_mac_hash(src_addr
) &
179 (adapter
->fhash
.fbucket_size
- 1);
180 head
= &(adapter
->rx_fhash
.fhead
[hindex
]);
182 hlist_for_each_entry_safe(tmp_fil
, n
, head
, fnode
) {
183 if (!memcmp(tmp_fil
->faddr
, &src_addr
, ETH_ALEN
) &&
184 tmp_fil
->vlan_id
== vlan_id
) {
185 time
= tmp_fil
->ftime
;
186 if (jiffies
> (QLCNIC_READD_AGE
* HZ
+ time
))
187 tmp_fil
->ftime
= jiffies
;
192 fil
= kzalloc(sizeof(struct qlcnic_filter
), GFP_ATOMIC
);
196 fil
->ftime
= jiffies
;
197 memcpy(fil
->faddr
, &src_addr
, ETH_ALEN
);
198 fil
->vlan_id
= vlan_id
;
199 spin_lock(&adapter
->rx_mac_learn_lock
);
200 hlist_add_head(&(fil
->fnode
), head
);
201 adapter
->rx_fhash
.fnum
++;
202 spin_unlock(&adapter
->rx_mac_learn_lock
);
204 hindex
= qlcnic_mac_hash(src_addr
) &
205 (adapter
->fhash
.fbucket_size
- 1);
206 head
= &(adapter
->rx_fhash
.fhead
[hindex
]);
207 spin_lock(&adapter
->rx_mac_learn_lock
);
208 hlist_for_each_entry_safe(tmp_fil
, n
, head
, fnode
) {
209 if (!memcmp(tmp_fil
->faddr
, &src_addr
, ETH_ALEN
) &&
210 tmp_fil
->vlan_id
== vlan_id
) {
217 spin_unlock(&adapter
->rx_mac_learn_lock
);
221 op
= vlan_id
? QLCNIC_MAC_VLAN_ADD
: QLCNIC_MAC_ADD
;
222 ret
= qlcnic_sre_macaddr_change(adapter
, (u8
*)&src_addr
,
225 op
= vlan_id
? QLCNIC_MAC_VLAN_DEL
: QLCNIC_MAC_DEL
;
226 ret
= qlcnic_sre_macaddr_change(adapter
,
230 hlist_del(&(tmp_fil
->fnode
));
231 adapter
->rx_fhash
.fnum
--;
234 spin_unlock(&adapter
->rx_mac_learn_lock
);
238 void qlcnic_82xx_change_filter(struct qlcnic_adapter
*adapter
, u64
*uaddr
,
241 struct cmd_desc_type0
*hwdesc
;
242 struct qlcnic_nic_req
*req
;
243 struct qlcnic_mac_req
*mac_req
;
244 struct qlcnic_vlan_req
*vlan_req
;
245 struct qlcnic_host_tx_ring
*tx_ring
= adapter
->tx_ring
;
249 producer
= tx_ring
->producer
;
250 hwdesc
= &tx_ring
->desc_head
[tx_ring
->producer
];
252 req
= (struct qlcnic_nic_req
*)hwdesc
;
253 memset(req
, 0, sizeof(struct qlcnic_nic_req
));
254 req
->qhdr
= cpu_to_le64(QLCNIC_REQUEST
<< 23);
256 word
= QLCNIC_MAC_EVENT
| ((u64
)(adapter
->portnum
) << 16);
257 req
->req_hdr
= cpu_to_le64(word
);
259 mac_req
= (struct qlcnic_mac_req
*)&(req
->words
[0]);
260 mac_req
->op
= vlan_id
? QLCNIC_MAC_VLAN_ADD
: QLCNIC_MAC_ADD
;
261 memcpy(mac_req
->mac_addr
, &uaddr
, ETH_ALEN
);
263 vlan_req
= (struct qlcnic_vlan_req
*)&req
->words
[1];
264 vlan_req
->vlan_id
= vlan_id
;
266 tx_ring
->producer
= get_next_index(producer
, tx_ring
->num_desc
);
270 static void qlcnic_send_filter(struct qlcnic_adapter
*adapter
,
271 struct cmd_desc_type0
*first_desc
,
274 struct qlcnic_filter
*fil
, *tmp_fil
;
275 struct hlist_node
*n
;
276 struct hlist_head
*head
;
277 struct net_device
*netdev
= adapter
->netdev
;
278 struct ethhdr
*phdr
= (struct ethhdr
*)(skb
->data
);
283 if (ether_addr_equal(phdr
->h_source
, adapter
->mac_addr
))
286 if (adapter
->fhash
.fnum
>= adapter
->fhash
.fmax
) {
287 adapter
->stats
.mac_filter_limit_overrun
++;
288 netdev_info(netdev
, "Can not add more than %d mac addresses\n",
289 adapter
->fhash
.fmax
);
293 memcpy(&src_addr
, phdr
->h_source
, ETH_ALEN
);
294 hindex
= qlcnic_mac_hash(src_addr
) & (adapter
->fhash
.fbucket_size
- 1);
295 head
= &(adapter
->fhash
.fhead
[hindex
]);
297 hlist_for_each_entry_safe(tmp_fil
, n
, head
, fnode
) {
298 if (!memcmp(tmp_fil
->faddr
, &src_addr
, ETH_ALEN
) &&
299 tmp_fil
->vlan_id
== vlan_id
) {
300 if (jiffies
> (QLCNIC_READD_AGE
* HZ
+ tmp_fil
->ftime
))
301 qlcnic_change_filter(adapter
, &src_addr
,
303 tmp_fil
->ftime
= jiffies
;
308 fil
= kzalloc(sizeof(struct qlcnic_filter
), GFP_ATOMIC
);
312 qlcnic_change_filter(adapter
, &src_addr
, vlan_id
);
313 fil
->ftime
= jiffies
;
314 fil
->vlan_id
= vlan_id
;
315 memcpy(fil
->faddr
, &src_addr
, ETH_ALEN
);
316 spin_lock(&adapter
->mac_learn_lock
);
317 hlist_add_head(&(fil
->fnode
), head
);
318 adapter
->fhash
.fnum
++;
319 spin_unlock(&adapter
->mac_learn_lock
);
322 static int qlcnic_tx_pkt(struct qlcnic_adapter
*adapter
,
323 struct cmd_desc_type0
*first_desc
, struct sk_buff
*skb
)
325 u8 l4proto
, opcode
= 0, hdr_len
= 0;
326 u16 flags
= 0, vlan_tci
= 0;
327 int copied
, offset
, copy_len
, size
;
328 struct cmd_desc_type0
*hwdesc
;
329 struct vlan_ethhdr
*vh
;
330 struct qlcnic_host_tx_ring
*tx_ring
= adapter
->tx_ring
;
331 u16 protocol
= ntohs(skb
->protocol
);
332 u32 producer
= tx_ring
->producer
;
334 if (protocol
== ETH_P_8021Q
) {
335 vh
= (struct vlan_ethhdr
*)skb
->data
;
336 flags
= FLAGS_VLAN_TAGGED
;
337 vlan_tci
= ntohs(vh
->h_vlan_TCI
);
338 protocol
= ntohs(vh
->h_vlan_encapsulated_proto
);
339 } else if (vlan_tx_tag_present(skb
)) {
340 flags
= FLAGS_VLAN_OOB
;
341 vlan_tci
= vlan_tx_tag_get(skb
);
343 if (unlikely(adapter
->pvid
)) {
344 if (vlan_tci
&& !(adapter
->flags
& QLCNIC_TAGGING_ENABLED
))
346 if (vlan_tci
&& (adapter
->flags
& QLCNIC_TAGGING_ENABLED
))
349 flags
= FLAGS_VLAN_OOB
;
350 vlan_tci
= adapter
->pvid
;
353 qlcnic_set_tx_vlan_tci(first_desc
, vlan_tci
);
354 qlcnic_set_tx_flags_opcode(first_desc
, flags
, opcode
);
356 if (*(skb
->data
) & BIT_0
) {
358 memcpy(&first_desc
->eth_addr
, skb
->data
, ETH_ALEN
);
360 opcode
= TX_ETHER_PKT
;
361 if ((adapter
->netdev
->features
& (NETIF_F_TSO
| NETIF_F_TSO6
)) &&
362 skb_shinfo(skb
)->gso_size
> 0) {
363 hdr_len
= skb_transport_offset(skb
) + tcp_hdrlen(skb
);
364 first_desc
->mss
= cpu_to_le16(skb_shinfo(skb
)->gso_size
);
365 first_desc
->total_hdr_length
= hdr_len
;
366 opcode
= (protocol
== ETH_P_IPV6
) ? TX_TCP_LSO6
: TX_TCP_LSO
;
368 /* For LSO, we need to copy the MAC/IP/TCP headers into
369 * the descriptor ring */
373 if (flags
& FLAGS_VLAN_OOB
) {
374 first_desc
->total_hdr_length
+= VLAN_HLEN
;
375 first_desc
->tcp_hdr_offset
= VLAN_HLEN
;
376 first_desc
->ip_hdr_offset
= VLAN_HLEN
;
378 /* Only in case of TSO on vlan device */
379 flags
|= FLAGS_VLAN_TAGGED
;
381 /* Create a TSO vlan header template for firmware */
382 hwdesc
= &tx_ring
->desc_head
[producer
];
383 tx_ring
->cmd_buf_arr
[producer
].skb
= NULL
;
385 copy_len
= min((int)sizeof(struct cmd_desc_type0
) -
386 offset
, hdr_len
+ VLAN_HLEN
);
388 vh
= (struct vlan_ethhdr
*)((char *) hwdesc
+ 2);
389 skb_copy_from_linear_data(skb
, vh
, 12);
390 vh
->h_vlan_proto
= htons(ETH_P_8021Q
);
391 vh
->h_vlan_TCI
= htons(vlan_tci
);
393 skb_copy_from_linear_data_offset(skb
, 12,
396 copied
= copy_len
- VLAN_HLEN
;
398 producer
= get_next_index(producer
, tx_ring
->num_desc
);
401 while (copied
< hdr_len
) {
402 size
= (int)sizeof(struct cmd_desc_type0
) - offset
;
403 copy_len
= min(size
, (hdr_len
- copied
));
404 hwdesc
= &tx_ring
->desc_head
[producer
];
405 tx_ring
->cmd_buf_arr
[producer
].skb
= NULL
;
406 skb_copy_from_linear_data_offset(skb
, copied
,
411 producer
= get_next_index(producer
, tx_ring
->num_desc
);
414 tx_ring
->producer
= producer
;
416 adapter
->stats
.lso_frames
++;
418 } else if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
419 if (protocol
== ETH_P_IP
) {
420 l4proto
= ip_hdr(skb
)->protocol
;
422 if (l4proto
== IPPROTO_TCP
)
424 else if (l4proto
== IPPROTO_UDP
)
426 } else if (protocol
== ETH_P_IPV6
) {
427 l4proto
= ipv6_hdr(skb
)->nexthdr
;
429 if (l4proto
== IPPROTO_TCP
)
430 opcode
= TX_TCPV6_PKT
;
431 else if (l4proto
== IPPROTO_UDP
)
432 opcode
= TX_UDPV6_PKT
;
435 first_desc
->tcp_hdr_offset
+= skb_transport_offset(skb
);
436 first_desc
->ip_hdr_offset
+= skb_network_offset(skb
);
437 qlcnic_set_tx_flags_opcode(first_desc
, flags
, opcode
);
442 static int qlcnic_map_tx_skb(struct pci_dev
*pdev
, struct sk_buff
*skb
,
443 struct qlcnic_cmd_buffer
*pbuf
)
445 struct qlcnic_skb_frag
*nf
;
446 struct skb_frag_struct
*frag
;
450 nr_frags
= skb_shinfo(skb
)->nr_frags
;
451 nf
= &pbuf
->frag_array
[0];
453 map
= pci_map_single(pdev
, skb
->data
, skb_headlen(skb
),
455 if (pci_dma_mapping_error(pdev
, map
))
459 nf
->length
= skb_headlen(skb
);
461 for (i
= 0; i
< nr_frags
; i
++) {
462 frag
= &skb_shinfo(skb
)->frags
[i
];
463 nf
= &pbuf
->frag_array
[i
+1];
464 map
= skb_frag_dma_map(&pdev
->dev
, frag
, 0, skb_frag_size(frag
),
466 if (dma_mapping_error(&pdev
->dev
, map
))
470 nf
->length
= skb_frag_size(frag
);
477 nf
= &pbuf
->frag_array
[i
+1];
478 pci_unmap_page(pdev
, nf
->dma
, nf
->length
, PCI_DMA_TODEVICE
);
481 nf
= &pbuf
->frag_array
[0];
482 pci_unmap_single(pdev
, nf
->dma
, skb_headlen(skb
), PCI_DMA_TODEVICE
);
488 static void qlcnic_unmap_buffers(struct pci_dev
*pdev
, struct sk_buff
*skb
,
489 struct qlcnic_cmd_buffer
*pbuf
)
491 struct qlcnic_skb_frag
*nf
= &pbuf
->frag_array
[0];
492 int i
, nr_frags
= skb_shinfo(skb
)->nr_frags
;
494 for (i
= 0; i
< nr_frags
; i
++) {
495 nf
= &pbuf
->frag_array
[i
+1];
496 pci_unmap_page(pdev
, nf
->dma
, nf
->length
, PCI_DMA_TODEVICE
);
499 nf
= &pbuf
->frag_array
[0];
500 pci_unmap_single(pdev
, nf
->dma
, skb_headlen(skb
), PCI_DMA_TODEVICE
);
504 static inline void qlcnic_clear_cmddesc(u64
*desc
)
511 netdev_tx_t
qlcnic_xmit_frame(struct sk_buff
*skb
, struct net_device
*netdev
)
513 struct qlcnic_adapter
*adapter
= netdev_priv(netdev
);
514 struct qlcnic_host_tx_ring
*tx_ring
= adapter
->tx_ring
;
515 struct qlcnic_cmd_buffer
*pbuf
;
516 struct qlcnic_skb_frag
*buffrag
;
517 struct cmd_desc_type0
*hwdesc
, *first_desc
;
518 struct pci_dev
*pdev
;
520 int i
, k
, frag_count
, delta
= 0;
521 u32 producer
, num_txd
;
523 num_txd
= tx_ring
->num_desc
;
525 if (!test_bit(__QLCNIC_DEV_UP
, &adapter
->state
)) {
526 netif_stop_queue(netdev
);
527 return NETDEV_TX_BUSY
;
530 if (adapter
->flags
& QLCNIC_MACSPOOF
) {
531 phdr
= (struct ethhdr
*)skb
->data
;
532 if (!ether_addr_equal(phdr
->h_source
, adapter
->mac_addr
))
536 frag_count
= skb_shinfo(skb
)->nr_frags
+ 1;
537 /* 14 frags supported for normal packet and
538 * 32 frags supported for TSO packet
540 if (!skb_is_gso(skb
) && frag_count
> QLCNIC_MAX_FRAGS_PER_TX
) {
541 for (i
= 0; i
< (frag_count
- QLCNIC_MAX_FRAGS_PER_TX
); i
++)
542 delta
+= skb_frag_size(&skb_shinfo(skb
)->frags
[i
]);
544 if (!__pskb_pull_tail(skb
, delta
))
547 frag_count
= 1 + skb_shinfo(skb
)->nr_frags
;
550 if (unlikely(qlcnic_tx_avail(tx_ring
) <= TX_STOP_THRESH
)) {
551 netif_stop_queue(netdev
);
552 if (qlcnic_tx_avail(tx_ring
) > TX_STOP_THRESH
) {
553 netif_start_queue(netdev
);
555 adapter
->stats
.xmit_off
++;
556 return NETDEV_TX_BUSY
;
560 producer
= tx_ring
->producer
;
561 pbuf
= &tx_ring
->cmd_buf_arr
[producer
];
562 pdev
= adapter
->pdev
;
563 first_desc
= &tx_ring
->desc_head
[producer
];
564 hwdesc
= &tx_ring
->desc_head
[producer
];
565 qlcnic_clear_cmddesc((u64
*)hwdesc
);
567 if (qlcnic_map_tx_skb(pdev
, skb
, pbuf
)) {
568 adapter
->stats
.tx_dma_map_error
++;
573 pbuf
->frag_count
= frag_count
;
575 qlcnic_set_tx_frags_len(first_desc
, frag_count
, skb
->len
);
576 qlcnic_set_tx_port(first_desc
, adapter
->portnum
);
578 for (i
= 0; i
< frag_count
; i
++) {
581 if ((k
== 0) && (i
> 0)) {
582 /* move to next desc.*/
583 producer
= get_next_index(producer
, num_txd
);
584 hwdesc
= &tx_ring
->desc_head
[producer
];
585 qlcnic_clear_cmddesc((u64
*)hwdesc
);
586 tx_ring
->cmd_buf_arr
[producer
].skb
= NULL
;
589 buffrag
= &pbuf
->frag_array
[i
];
590 hwdesc
->buffer_length
[k
] = cpu_to_le16(buffrag
->length
);
593 hwdesc
->addr_buffer1
= cpu_to_le64(buffrag
->dma
);
596 hwdesc
->addr_buffer2
= cpu_to_le64(buffrag
->dma
);
599 hwdesc
->addr_buffer3
= cpu_to_le64(buffrag
->dma
);
602 hwdesc
->addr_buffer4
= cpu_to_le64(buffrag
->dma
);
607 tx_ring
->producer
= get_next_index(producer
, num_txd
);
610 if (unlikely(qlcnic_tx_pkt(adapter
, first_desc
, skb
)))
613 if (adapter
->drv_mac_learn
)
614 qlcnic_send_filter(adapter
, first_desc
, skb
);
616 adapter
->stats
.txbytes
+= skb
->len
;
617 adapter
->stats
.xmitcalled
++;
619 qlcnic_update_cmd_producer(tx_ring
);
624 qlcnic_unmap_buffers(pdev
, skb
, pbuf
);
626 adapter
->stats
.txdropped
++;
627 dev_kfree_skb_any(skb
);
631 void qlcnic_advert_link_change(struct qlcnic_adapter
*adapter
, int linkup
)
633 struct net_device
*netdev
= adapter
->netdev
;
635 if (adapter
->ahw
->linkup
&& !linkup
) {
636 netdev_info(netdev
, "NIC Link is down\n");
637 adapter
->ahw
->linkup
= 0;
638 if (netif_running(netdev
)) {
639 netif_carrier_off(netdev
);
640 netif_stop_queue(netdev
);
642 } else if (!adapter
->ahw
->linkup
&& linkup
) {
643 netdev_info(netdev
, "NIC Link is up\n");
644 adapter
->ahw
->linkup
= 1;
645 if (netif_running(netdev
)) {
646 netif_carrier_on(netdev
);
647 netif_wake_queue(netdev
);
652 static int qlcnic_alloc_rx_skb(struct qlcnic_adapter
*adapter
,
653 struct qlcnic_host_rds_ring
*rds_ring
,
654 struct qlcnic_rx_buffer
*buffer
)
658 struct pci_dev
*pdev
= adapter
->pdev
;
660 skb
= netdev_alloc_skb(adapter
->netdev
, rds_ring
->skb_size
);
662 adapter
->stats
.skb_alloc_failure
++;
666 skb_reserve(skb
, NET_IP_ALIGN
);
667 dma
= pci_map_single(pdev
, skb
->data
,
668 rds_ring
->dma_size
, PCI_DMA_FROMDEVICE
);
670 if (pci_dma_mapping_error(pdev
, dma
)) {
671 adapter
->stats
.rx_dma_map_error
++;
672 dev_kfree_skb_any(skb
);
682 static void qlcnic_post_rx_buffers_nodb(struct qlcnic_adapter
*adapter
,
683 struct qlcnic_host_rds_ring
*rds_ring
,
686 struct rcv_desc
*pdesc
;
687 struct qlcnic_rx_buffer
*buffer
;
689 uint32_t producer
, handle
;
690 struct list_head
*head
;
692 if (!spin_trylock(&rds_ring
->lock
))
695 producer
= rds_ring
->producer
;
696 head
= &rds_ring
->free_list
;
697 while (!list_empty(head
)) {
698 buffer
= list_entry(head
->next
, struct qlcnic_rx_buffer
, list
);
701 if (qlcnic_alloc_rx_skb(adapter
, rds_ring
, buffer
))
705 list_del(&buffer
->list
);
707 /* make a rcv descriptor */
708 pdesc
= &rds_ring
->desc_head
[producer
];
709 handle
= qlcnic_get_ref_handle(adapter
,
710 buffer
->ref_handle
, ring_id
);
711 pdesc
->reference_handle
= cpu_to_le16(handle
);
712 pdesc
->buffer_length
= cpu_to_le32(rds_ring
->dma_size
);
713 pdesc
->addr_buffer
= cpu_to_le64(buffer
->dma
);
714 producer
= get_next_index(producer
, rds_ring
->num_desc
);
717 rds_ring
->producer
= producer
;
718 writel((producer
- 1) & (rds_ring
->num_desc
- 1),
719 rds_ring
->crb_rcv_producer
);
721 spin_unlock(&rds_ring
->lock
);
724 static int qlcnic_process_cmd_ring(struct qlcnic_adapter
*adapter
,
725 struct qlcnic_host_tx_ring
*tx_ring
,
728 u32 sw_consumer
, hw_consumer
;
729 int i
, done
, count
= 0;
730 struct qlcnic_cmd_buffer
*buffer
;
731 struct pci_dev
*pdev
= adapter
->pdev
;
732 struct net_device
*netdev
= adapter
->netdev
;
733 struct qlcnic_skb_frag
*frag
;
735 if (!spin_trylock(&adapter
->tx_clean_lock
))
738 sw_consumer
= tx_ring
->sw_consumer
;
739 hw_consumer
= le32_to_cpu(*(tx_ring
->hw_consumer
));
741 while (sw_consumer
!= hw_consumer
) {
742 buffer
= &tx_ring
->cmd_buf_arr
[sw_consumer
];
744 frag
= &buffer
->frag_array
[0];
745 pci_unmap_single(pdev
, frag
->dma
, frag
->length
,
748 for (i
= 1; i
< buffer
->frag_count
; i
++) {
750 pci_unmap_page(pdev
, frag
->dma
, frag
->length
,
754 adapter
->stats
.xmitfinished
++;
755 dev_kfree_skb_any(buffer
->skb
);
759 sw_consumer
= get_next_index(sw_consumer
, tx_ring
->num_desc
);
760 if (++count
>= budget
)
764 if (count
&& netif_running(netdev
)) {
765 tx_ring
->sw_consumer
= sw_consumer
;
767 if (netif_queue_stopped(netdev
) && netif_carrier_ok(netdev
)) {
768 if (qlcnic_tx_avail(tx_ring
) > TX_STOP_THRESH
) {
769 netif_wake_queue(netdev
);
770 adapter
->stats
.xmit_on
++;
773 adapter
->tx_timeo_cnt
= 0;
776 * If everything is freed up to consumer then check if the ring is full
777 * If the ring is full then check if more needs to be freed and
778 * schedule the call back again.
780 * This happens when there are 2 CPUs. One could be freeing and the
781 * other filling it. If the ring is full when we get out of here and
782 * the card has already interrupted the host then the host can miss the
785 * There is still a possible race condition and the host could miss an
786 * interrupt. The card has to take care of this.
788 hw_consumer
= le32_to_cpu(*(tx_ring
->hw_consumer
));
789 done
= (sw_consumer
== hw_consumer
);
790 spin_unlock(&adapter
->tx_clean_lock
);
795 static int qlcnic_poll(struct napi_struct
*napi
, int budget
)
797 int tx_complete
, work_done
;
798 struct qlcnic_host_sds_ring
*sds_ring
;
799 struct qlcnic_adapter
*adapter
;
801 sds_ring
= container_of(napi
, struct qlcnic_host_sds_ring
, napi
);
802 adapter
= sds_ring
->adapter
;
803 tx_complete
= qlcnic_process_cmd_ring(adapter
, adapter
->tx_ring
,
805 work_done
= qlcnic_process_rcv_ring(sds_ring
, budget
);
806 if ((work_done
< budget
) && tx_complete
) {
807 napi_complete(&sds_ring
->napi
);
808 if (test_bit(__QLCNIC_DEV_UP
, &adapter
->state
))
809 qlcnic_enable_int(sds_ring
);
815 static int qlcnic_rx_poll(struct napi_struct
*napi
, int budget
)
817 struct qlcnic_host_sds_ring
*sds_ring
;
818 struct qlcnic_adapter
*adapter
;
821 sds_ring
= container_of(napi
, struct qlcnic_host_sds_ring
, napi
);
822 adapter
= sds_ring
->adapter
;
824 work_done
= qlcnic_process_rcv_ring(sds_ring
, budget
);
826 if (work_done
< budget
) {
827 napi_complete(&sds_ring
->napi
);
828 if (test_bit(__QLCNIC_DEV_UP
, &adapter
->state
))
829 qlcnic_enable_int(sds_ring
);
835 static void qlcnic_handle_linkevent(struct qlcnic_adapter
*adapter
,
836 struct qlcnic_fw_msg
*msg
)
839 u16 cable_len
, link_speed
;
840 u8 link_status
, module
, duplex
, autoneg
, lb_status
= 0;
841 struct net_device
*netdev
= adapter
->netdev
;
843 adapter
->ahw
->has_link_events
= 1;
845 cable_OUI
= msg
->body
[1] & 0xffffffff;
846 cable_len
= (msg
->body
[1] >> 32) & 0xffff;
847 link_speed
= (msg
->body
[1] >> 48) & 0xffff;
849 link_status
= msg
->body
[2] & 0xff;
850 duplex
= (msg
->body
[2] >> 16) & 0xff;
851 autoneg
= (msg
->body
[2] >> 24) & 0xff;
852 lb_status
= (msg
->body
[2] >> 32) & 0x3;
854 module
= (msg
->body
[2] >> 8) & 0xff;
855 if (module
== LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLE
)
856 dev_info(&netdev
->dev
,
857 "unsupported cable: OUI 0x%x, length %d\n",
858 cable_OUI
, cable_len
);
859 else if (module
== LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLELEN
)
860 dev_info(&netdev
->dev
, "unsupported cable length %d\n",
863 if (!link_status
&& (lb_status
== QLCNIC_ILB_MODE
||
864 lb_status
== QLCNIC_ELB_MODE
))
865 adapter
->ahw
->loopback_state
|= QLCNIC_LINKEVENT
;
867 qlcnic_advert_link_change(adapter
, link_status
);
869 if (duplex
== LINKEVENT_FULL_DUPLEX
)
870 adapter
->ahw
->link_duplex
= DUPLEX_FULL
;
872 adapter
->ahw
->link_duplex
= DUPLEX_HALF
;
874 adapter
->ahw
->module_type
= module
;
875 adapter
->ahw
->link_autoneg
= autoneg
;
878 adapter
->ahw
->link_speed
= link_speed
;
880 adapter
->ahw
->link_speed
= SPEED_UNKNOWN
;
881 adapter
->ahw
->link_duplex
= DUPLEX_UNKNOWN
;
885 static void qlcnic_handle_fw_message(int desc_cnt
, int index
,
886 struct qlcnic_host_sds_ring
*sds_ring
)
888 struct qlcnic_fw_msg msg
;
889 struct status_desc
*desc
;
890 struct qlcnic_adapter
*adapter
;
892 int i
= 0, opcode
, ret
;
894 while (desc_cnt
> 0 && i
< 8) {
895 desc
= &sds_ring
->desc_head
[index
];
896 msg
.words
[i
++] = le64_to_cpu(desc
->status_desc_data
[0]);
897 msg
.words
[i
++] = le64_to_cpu(desc
->status_desc_data
[1]);
899 index
= get_next_index(index
, sds_ring
->num_desc
);
903 adapter
= sds_ring
->adapter
;
904 dev
= &adapter
->pdev
->dev
;
905 opcode
= qlcnic_get_nic_msg_opcode(msg
.body
[0]);
908 case QLCNIC_C2H_OPCODE_GET_LINKEVENT_RESPONSE
:
909 qlcnic_handle_linkevent(adapter
, &msg
);
911 case QLCNIC_C2H_OPCODE_CONFIG_LOOPBACK
:
912 ret
= (u32
)(msg
.body
[1]);
915 adapter
->ahw
->loopback_state
|= QLCNIC_LB_RESPONSE
;
918 dev_info(dev
, "loopback already in progress\n");
919 adapter
->ahw
->diag_cnt
= -QLCNIC_TEST_IN_PROGRESS
;
922 dev_info(dev
, "loopback cable is not connected\n");
923 adapter
->ahw
->diag_cnt
= -QLCNIC_LB_CABLE_NOT_CONN
;
927 "loopback configure request failed, err %x\n",
929 adapter
->ahw
->diag_cnt
= -QLCNIC_UNDEFINED_ERROR
;
938 struct sk_buff
*qlcnic_process_rxbuf(struct qlcnic_adapter
*adapter
,
939 struct qlcnic_host_rds_ring
*ring
,
940 u16 index
, u16 cksum
)
942 struct qlcnic_rx_buffer
*buffer
;
945 buffer
= &ring
->rx_buf_arr
[index
];
946 if (unlikely(buffer
->skb
== NULL
)) {
951 pci_unmap_single(adapter
->pdev
, buffer
->dma
, ring
->dma_size
,
955 if (likely((adapter
->netdev
->features
& NETIF_F_RXCSUM
) &&
956 (cksum
== STATUS_CKSUM_OK
|| cksum
== STATUS_CKSUM_LOOP
))) {
957 adapter
->stats
.csummed
++;
958 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
960 skb_checksum_none_assert(skb
);
969 static inline int qlcnic_check_rx_tagging(struct qlcnic_adapter
*adapter
,
970 struct sk_buff
*skb
, u16
*vlan_tag
)
972 struct ethhdr
*eth_hdr
;
974 if (!__vlan_get_tag(skb
, vlan_tag
)) {
975 eth_hdr
= (struct ethhdr
*)skb
->data
;
976 memmove(skb
->data
+ VLAN_HLEN
, eth_hdr
, ETH_ALEN
* 2);
977 skb_pull(skb
, VLAN_HLEN
);
982 if (*vlan_tag
== adapter
->pvid
) {
983 /* Outer vlan tag. Packet should follow non-vlan path */
987 if (adapter
->flags
& QLCNIC_TAGGING_ENABLED
)
993 static struct qlcnic_rx_buffer
*
994 qlcnic_process_rcv(struct qlcnic_adapter
*adapter
,
995 struct qlcnic_host_sds_ring
*sds_ring
, int ring
,
998 struct net_device
*netdev
= adapter
->netdev
;
999 struct qlcnic_recv_context
*recv_ctx
= adapter
->recv_ctx
;
1000 struct qlcnic_rx_buffer
*buffer
;
1001 struct sk_buff
*skb
;
1002 struct qlcnic_host_rds_ring
*rds_ring
;
1003 int index
, length
, cksum
, pkt_offset
, is_lb_pkt
;
1004 u16 vid
= 0xffff, t_vid
;
1006 if (unlikely(ring
>= adapter
->max_rds_rings
))
1009 rds_ring
= &recv_ctx
->rds_rings
[ring
];
1011 index
= qlcnic_get_sts_refhandle(sts_data0
);
1012 if (unlikely(index
>= rds_ring
->num_desc
))
1015 buffer
= &rds_ring
->rx_buf_arr
[index
];
1016 length
= qlcnic_get_sts_totallength(sts_data0
);
1017 cksum
= qlcnic_get_sts_status(sts_data0
);
1018 pkt_offset
= qlcnic_get_sts_pkt_offset(sts_data0
);
1020 skb
= qlcnic_process_rxbuf(adapter
, rds_ring
, index
, cksum
);
1024 if (adapter
->drv_mac_learn
&&
1025 (adapter
->flags
& QLCNIC_ESWITCH_ENABLED
)) {
1027 is_lb_pkt
= qlcnic_82xx_is_lb_pkt(sts_data0
);
1028 qlcnic_add_lb_filter(adapter
, skb
, is_lb_pkt
,
1029 cpu_to_le16(t_vid
));
1032 if (length
> rds_ring
->skb_size
)
1033 skb_put(skb
, rds_ring
->skb_size
);
1035 skb_put(skb
, length
);
1038 skb_pull(skb
, pkt_offset
);
1040 if (unlikely(qlcnic_check_rx_tagging(adapter
, skb
, &vid
))) {
1041 adapter
->stats
.rxdropped
++;
1046 skb
->protocol
= eth_type_trans(skb
, netdev
);
1049 __vlan_hwaccel_put_tag(skb
, vid
);
1051 napi_gro_receive(&sds_ring
->napi
, skb
);
1053 adapter
->stats
.rx_pkts
++;
1054 adapter
->stats
.rxbytes
+= length
;
1059 #define QLC_TCP_HDR_SIZE 20
1060 #define QLC_TCP_TS_OPTION_SIZE 12
1061 #define QLC_TCP_TS_HDR_SIZE (QLC_TCP_HDR_SIZE + QLC_TCP_TS_OPTION_SIZE)
1063 static struct qlcnic_rx_buffer
*
1064 qlcnic_process_lro(struct qlcnic_adapter
*adapter
,
1065 int ring
, u64 sts_data0
, u64 sts_data1
)
1067 struct net_device
*netdev
= adapter
->netdev
;
1068 struct qlcnic_recv_context
*recv_ctx
= adapter
->recv_ctx
;
1069 struct qlcnic_rx_buffer
*buffer
;
1070 struct sk_buff
*skb
;
1071 struct qlcnic_host_rds_ring
*rds_ring
;
1073 struct ipv6hdr
*ipv6h
;
1075 bool push
, timestamp
;
1076 int index
, l2_hdr_offset
, l4_hdr_offset
, is_lb_pkt
;
1077 u16 lro_length
, length
, data_offset
, t_vid
, vid
= 0xffff;
1080 if (unlikely(ring
> adapter
->max_rds_rings
))
1083 rds_ring
= &recv_ctx
->rds_rings
[ring
];
1085 index
= qlcnic_get_lro_sts_refhandle(sts_data0
);
1086 if (unlikely(index
> rds_ring
->num_desc
))
1089 buffer
= &rds_ring
->rx_buf_arr
[index
];
1091 timestamp
= qlcnic_get_lro_sts_timestamp(sts_data0
);
1092 lro_length
= qlcnic_get_lro_sts_length(sts_data0
);
1093 l2_hdr_offset
= qlcnic_get_lro_sts_l2_hdr_offset(sts_data0
);
1094 l4_hdr_offset
= qlcnic_get_lro_sts_l4_hdr_offset(sts_data0
);
1095 push
= qlcnic_get_lro_sts_push_flag(sts_data0
);
1096 seq_number
= qlcnic_get_lro_sts_seq_number(sts_data1
);
1098 skb
= qlcnic_process_rxbuf(adapter
, rds_ring
, index
, STATUS_CKSUM_OK
);
1102 if (adapter
->drv_mac_learn
&&
1103 (adapter
->flags
& QLCNIC_ESWITCH_ENABLED
)) {
1105 is_lb_pkt
= qlcnic_82xx_is_lb_pkt(sts_data0
);
1106 qlcnic_add_lb_filter(adapter
, skb
, is_lb_pkt
,
1107 cpu_to_le16(t_vid
));
1111 data_offset
= l4_hdr_offset
+ QLC_TCP_TS_HDR_SIZE
;
1113 data_offset
= l4_hdr_offset
+ QLC_TCP_HDR_SIZE
;
1115 skb_put(skb
, lro_length
+ data_offset
);
1116 skb_pull(skb
, l2_hdr_offset
);
1118 if (unlikely(qlcnic_check_rx_tagging(adapter
, skb
, &vid
))) {
1119 adapter
->stats
.rxdropped
++;
1124 skb
->protocol
= eth_type_trans(skb
, netdev
);
1126 if (ntohs(skb
->protocol
) == ETH_P_IPV6
) {
1127 ipv6h
= (struct ipv6hdr
*)skb
->data
;
1128 th
= (struct tcphdr
*)(skb
->data
+ sizeof(struct ipv6hdr
));
1129 length
= (th
->doff
<< 2) + lro_length
;
1130 ipv6h
->payload_len
= htons(length
);
1132 iph
= (struct iphdr
*)skb
->data
;
1133 th
= (struct tcphdr
*)(skb
->data
+ (iph
->ihl
<< 2));
1134 length
= (iph
->ihl
<< 2) + (th
->doff
<< 2) + lro_length
;
1135 iph
->tot_len
= htons(length
);
1137 iph
->check
= ip_fast_csum((unsigned char *)iph
, iph
->ihl
);
1141 th
->seq
= htonl(seq_number
);
1144 if (adapter
->flags
& QLCNIC_FW_LRO_MSS_CAP
) {
1145 skb_shinfo(skb
)->gso_size
= qlcnic_get_lro_sts_mss(sts_data1
);
1146 if (skb
->protocol
== htons(ETH_P_IPV6
))
1147 skb_shinfo(skb
)->gso_type
= SKB_GSO_TCPV6
;
1149 skb_shinfo(skb
)->gso_type
= SKB_GSO_TCPV4
;
1153 __vlan_hwaccel_put_tag(skb
, vid
);
1154 netif_receive_skb(skb
);
1156 adapter
->stats
.lro_pkts
++;
1157 adapter
->stats
.lrobytes
+= length
;
1162 int qlcnic_process_rcv_ring(struct qlcnic_host_sds_ring
*sds_ring
, int max
)
1164 struct qlcnic_host_rds_ring
*rds_ring
;
1165 struct qlcnic_adapter
*adapter
= sds_ring
->adapter
;
1166 struct list_head
*cur
;
1167 struct status_desc
*desc
;
1168 struct qlcnic_rx_buffer
*rxbuf
;
1169 int opcode
, desc_cnt
, count
= 0;
1170 u64 sts_data0
, sts_data1
;
1172 u32 consumer
= sds_ring
->consumer
;
1174 while (count
< max
) {
1175 desc
= &sds_ring
->desc_head
[consumer
];
1176 sts_data0
= le64_to_cpu(desc
->status_desc_data
[0]);
1178 if (!(sts_data0
& STATUS_OWNER_HOST
))
1181 desc_cnt
= qlcnic_get_sts_desc_cnt(sts_data0
);
1182 opcode
= qlcnic_get_sts_opcode(sts_data0
);
1184 case QLCNIC_RXPKT_DESC
:
1185 case QLCNIC_OLD_RXPKT_DESC
:
1186 case QLCNIC_SYN_OFFLOAD
:
1187 ring
= qlcnic_get_sts_type(sts_data0
);
1188 rxbuf
= qlcnic_process_rcv(adapter
, sds_ring
, ring
,
1191 case QLCNIC_LRO_DESC
:
1192 ring
= qlcnic_get_lro_sts_type(sts_data0
);
1193 sts_data1
= le64_to_cpu(desc
->status_desc_data
[1]);
1194 rxbuf
= qlcnic_process_lro(adapter
, ring
, sts_data0
,
1197 case QLCNIC_RESPONSE_DESC
:
1198 qlcnic_handle_fw_message(desc_cnt
, consumer
, sds_ring
);
1202 WARN_ON(desc_cnt
> 1);
1205 list_add_tail(&rxbuf
->list
, &sds_ring
->free_list
[ring
]);
1207 adapter
->stats
.null_rxbuf
++;
1209 for (; desc_cnt
> 0; desc_cnt
--) {
1210 desc
= &sds_ring
->desc_head
[consumer
];
1211 desc
->status_desc_data
[0] = QLCNIC_DESC_OWNER_FW
;
1212 consumer
= get_next_index(consumer
, sds_ring
->num_desc
);
1217 for (ring
= 0; ring
< adapter
->max_rds_rings
; ring
++) {
1218 rds_ring
= &adapter
->recv_ctx
->rds_rings
[ring
];
1219 if (!list_empty(&sds_ring
->free_list
[ring
])) {
1220 list_for_each(cur
, &sds_ring
->free_list
[ring
]) {
1221 rxbuf
= list_entry(cur
, struct qlcnic_rx_buffer
,
1223 qlcnic_alloc_rx_skb(adapter
, rds_ring
, rxbuf
);
1225 spin_lock(&rds_ring
->lock
);
1226 list_splice_tail_init(&sds_ring
->free_list
[ring
],
1227 &rds_ring
->free_list
);
1228 spin_unlock(&rds_ring
->lock
);
1231 qlcnic_post_rx_buffers_nodb(adapter
, rds_ring
, ring
);
1235 sds_ring
->consumer
= consumer
;
1236 writel(consumer
, sds_ring
->crb_sts_consumer
);
1242 void qlcnic_post_rx_buffers(struct qlcnic_adapter
*adapter
,
1243 struct qlcnic_host_rds_ring
*rds_ring
, u8 ring_id
)
1245 struct rcv_desc
*pdesc
;
1246 struct qlcnic_rx_buffer
*buffer
;
1248 u32 producer
, handle
;
1249 struct list_head
*head
;
1251 producer
= rds_ring
->producer
;
1252 head
= &rds_ring
->free_list
;
1254 while (!list_empty(head
)) {
1256 buffer
= list_entry(head
->next
, struct qlcnic_rx_buffer
, list
);
1259 if (qlcnic_alloc_rx_skb(adapter
, rds_ring
, buffer
))
1264 list_del(&buffer
->list
);
1266 /* make a rcv descriptor */
1267 pdesc
= &rds_ring
->desc_head
[producer
];
1268 pdesc
->addr_buffer
= cpu_to_le64(buffer
->dma
);
1269 handle
= qlcnic_get_ref_handle(adapter
, buffer
->ref_handle
,
1271 pdesc
->reference_handle
= cpu_to_le16(handle
);
1272 pdesc
->buffer_length
= cpu_to_le32(rds_ring
->dma_size
);
1273 producer
= get_next_index(producer
, rds_ring
->num_desc
);
1277 rds_ring
->producer
= producer
;
1278 writel((producer
-1) & (rds_ring
->num_desc
-1),
1279 rds_ring
->crb_rcv_producer
);
1283 static void dump_skb(struct sk_buff
*skb
, struct qlcnic_adapter
*adapter
)
1286 unsigned char *data
= skb
->data
;
1288 pr_info(KERN_INFO
"\n");
1289 for (i
= 0; i
< skb
->len
; i
++) {
1290 QLCDB(adapter
, DRV
, "%02x ", data
[i
]);
1291 if ((i
& 0x0f) == 8)
1292 pr_info(KERN_INFO
"\n");
1296 static void qlcnic_process_rcv_diag(struct qlcnic_adapter
*adapter
, int ring
,
1299 struct qlcnic_recv_context
*recv_ctx
= adapter
->recv_ctx
;
1300 struct sk_buff
*skb
;
1301 struct qlcnic_host_rds_ring
*rds_ring
;
1302 int index
, length
, cksum
, pkt_offset
;
1304 if (unlikely(ring
>= adapter
->max_rds_rings
))
1307 rds_ring
= &recv_ctx
->rds_rings
[ring
];
1309 index
= qlcnic_get_sts_refhandle(sts_data0
);
1310 length
= qlcnic_get_sts_totallength(sts_data0
);
1311 if (unlikely(index
>= rds_ring
->num_desc
))
1314 cksum
= qlcnic_get_sts_status(sts_data0
);
1315 pkt_offset
= qlcnic_get_sts_pkt_offset(sts_data0
);
1317 skb
= qlcnic_process_rxbuf(adapter
, rds_ring
, index
, cksum
);
1321 if (length
> rds_ring
->skb_size
)
1322 skb_put(skb
, rds_ring
->skb_size
);
1324 skb_put(skb
, length
);
1327 skb_pull(skb
, pkt_offset
);
1329 if (!qlcnic_check_loopback_buff(skb
->data
, adapter
->mac_addr
))
1330 adapter
->ahw
->diag_cnt
++;
1332 dump_skb(skb
, adapter
);
1334 dev_kfree_skb_any(skb
);
1335 adapter
->stats
.rx_pkts
++;
1336 adapter
->stats
.rxbytes
+= length
;
1341 void qlcnic_82xx_process_rcv_ring_diag(struct qlcnic_host_sds_ring
*sds_ring
)
1343 struct qlcnic_adapter
*adapter
= sds_ring
->adapter
;
1344 struct status_desc
*desc
;
1346 int ring
, opcode
, desc_cnt
;
1348 u32 consumer
= sds_ring
->consumer
;
1350 desc
= &sds_ring
->desc_head
[consumer
];
1351 sts_data0
= le64_to_cpu(desc
->status_desc_data
[0]);
1353 if (!(sts_data0
& STATUS_OWNER_HOST
))
1356 desc_cnt
= qlcnic_get_sts_desc_cnt(sts_data0
);
1357 opcode
= qlcnic_get_sts_opcode(sts_data0
);
1359 case QLCNIC_RESPONSE_DESC
:
1360 qlcnic_handle_fw_message(desc_cnt
, consumer
, sds_ring
);
1363 ring
= qlcnic_get_sts_type(sts_data0
);
1364 qlcnic_process_rcv_diag(adapter
, ring
, sts_data0
);
1368 for (; desc_cnt
> 0; desc_cnt
--) {
1369 desc
= &sds_ring
->desc_head
[consumer
];
1370 desc
->status_desc_data
[0] = cpu_to_le64(STATUS_OWNER_PHANTOM
);
1371 consumer
= get_next_index(consumer
, sds_ring
->num_desc
);
1374 sds_ring
->consumer
= consumer
;
1375 writel(consumer
, sds_ring
->crb_sts_consumer
);
1378 int qlcnic_82xx_napi_add(struct qlcnic_adapter
*adapter
,
1379 struct net_device
*netdev
)
1381 int ring
, max_sds_rings
;
1382 struct qlcnic_host_sds_ring
*sds_ring
;
1383 struct qlcnic_recv_context
*recv_ctx
= adapter
->recv_ctx
;
1385 if (qlcnic_alloc_sds_rings(recv_ctx
, adapter
->max_sds_rings
))
1388 max_sds_rings
= adapter
->max_sds_rings
;
1390 for (ring
= 0; ring
< adapter
->max_sds_rings
; ring
++) {
1391 sds_ring
= &recv_ctx
->sds_rings
[ring
];
1392 if (ring
== adapter
->max_sds_rings
- 1)
1393 netif_napi_add(netdev
, &sds_ring
->napi
, qlcnic_poll
,
1394 QLCNIC_NETDEV_WEIGHT
/ max_sds_rings
);
1396 netif_napi_add(netdev
, &sds_ring
->napi
, qlcnic_rx_poll
,
1397 QLCNIC_NETDEV_WEIGHT
*2);
1400 if (qlcnic_alloc_tx_rings(adapter
, netdev
)) {
1401 qlcnic_free_sds_rings(recv_ctx
);
1408 void qlcnic_82xx_napi_del(struct qlcnic_adapter
*adapter
)
1411 struct qlcnic_host_sds_ring
*sds_ring
;
1412 struct qlcnic_recv_context
*recv_ctx
= adapter
->recv_ctx
;
1414 for (ring
= 0; ring
< adapter
->max_sds_rings
; ring
++) {
1415 sds_ring
= &recv_ctx
->sds_rings
[ring
];
1416 netif_napi_del(&sds_ring
->napi
);
1419 qlcnic_free_sds_rings(adapter
->recv_ctx
);
1420 qlcnic_free_tx_rings(adapter
);
1423 void qlcnic_82xx_napi_enable(struct qlcnic_adapter
*adapter
)
1426 struct qlcnic_host_sds_ring
*sds_ring
;
1427 struct qlcnic_recv_context
*recv_ctx
= adapter
->recv_ctx
;
1429 if (adapter
->is_up
!= QLCNIC_ADAPTER_UP_MAGIC
)
1432 for (ring
= 0; ring
< adapter
->max_sds_rings
; ring
++) {
1433 sds_ring
= &recv_ctx
->sds_rings
[ring
];
1434 napi_enable(&sds_ring
->napi
);
1435 qlcnic_enable_int(sds_ring
);
1439 void qlcnic_82xx_napi_disable(struct qlcnic_adapter
*adapter
)
1442 struct qlcnic_host_sds_ring
*sds_ring
;
1443 struct qlcnic_recv_context
*recv_ctx
= adapter
->recv_ctx
;
1445 if (adapter
->is_up
!= QLCNIC_ADAPTER_UP_MAGIC
)
1448 for (ring
= 0; ring
< adapter
->max_sds_rings
; ring
++) {
1449 sds_ring
= &recv_ctx
->sds_rings
[ring
];
1450 qlcnic_disable_int(sds_ring
);
1451 napi_synchronize(&sds_ring
->napi
);
1452 napi_disable(&sds_ring
->napi
);
1456 #define QLC_83XX_NORMAL_LB_PKT (1ULL << 36)
1457 #define QLC_83XX_LRO_LB_PKT (1ULL << 46)
1459 static inline int qlcnic_83xx_is_lb_pkt(u64 sts_data
, int lro_pkt
)
1462 return (sts_data
& QLC_83XX_LRO_LB_PKT
) ? 1 : 0;
1464 return (sts_data
& QLC_83XX_NORMAL_LB_PKT
) ? 1 : 0;
1467 static struct qlcnic_rx_buffer
*
1468 qlcnic_83xx_process_rcv(struct qlcnic_adapter
*adapter
,
1469 struct qlcnic_host_sds_ring
*sds_ring
,
1470 u8 ring
, u64 sts_data
[])
1472 struct net_device
*netdev
= adapter
->netdev
;
1473 struct qlcnic_recv_context
*recv_ctx
= adapter
->recv_ctx
;
1474 struct qlcnic_rx_buffer
*buffer
;
1475 struct sk_buff
*skb
;
1476 struct qlcnic_host_rds_ring
*rds_ring
;
1477 int index
, length
, cksum
, is_lb_pkt
;
1478 u16 vid
= 0xffff, t_vid
;
1480 if (unlikely(ring
>= adapter
->max_rds_rings
))
1483 rds_ring
= &recv_ctx
->rds_rings
[ring
];
1485 index
= qlcnic_83xx_hndl(sts_data
[0]);
1486 if (unlikely(index
>= rds_ring
->num_desc
))
1489 buffer
= &rds_ring
->rx_buf_arr
[index
];
1490 length
= qlcnic_83xx_pktln(sts_data
[0]);
1491 cksum
= qlcnic_83xx_csum_status(sts_data
[1]);
1492 skb
= qlcnic_process_rxbuf(adapter
, rds_ring
, index
, cksum
);
1496 if (adapter
->drv_mac_learn
&&
1497 (adapter
->flags
& QLCNIC_ESWITCH_ENABLED
)) {
1499 is_lb_pkt
= qlcnic_83xx_is_lb_pkt(sts_data
[1], 0);
1500 qlcnic_add_lb_filter(adapter
, skb
, is_lb_pkt
,
1501 cpu_to_le16(t_vid
));
1504 if (length
> rds_ring
->skb_size
)
1505 skb_put(skb
, rds_ring
->skb_size
);
1507 skb_put(skb
, length
);
1509 if (unlikely(qlcnic_check_rx_tagging(adapter
, skb
, &vid
))) {
1510 adapter
->stats
.rxdropped
++;
1515 skb
->protocol
= eth_type_trans(skb
, netdev
);
1518 __vlan_hwaccel_put_tag(skb
, vid
);
1520 napi_gro_receive(&sds_ring
->napi
, skb
);
1522 adapter
->stats
.rx_pkts
++;
1523 adapter
->stats
.rxbytes
+= length
;
1528 static struct qlcnic_rx_buffer
*
1529 qlcnic_83xx_process_lro(struct qlcnic_adapter
*adapter
,
1530 u8 ring
, u64 sts_data
[])
1532 struct net_device
*netdev
= adapter
->netdev
;
1533 struct qlcnic_recv_context
*recv_ctx
= adapter
->recv_ctx
;
1534 struct qlcnic_rx_buffer
*buffer
;
1535 struct sk_buff
*skb
;
1536 struct qlcnic_host_rds_ring
*rds_ring
;
1538 struct ipv6hdr
*ipv6h
;
1541 int l2_hdr_offset
, l4_hdr_offset
;
1542 int index
, is_lb_pkt
;
1543 u16 lro_length
, length
, data_offset
, gso_size
;
1544 u16 vid
= 0xffff, t_vid
;
1546 if (unlikely(ring
> adapter
->max_rds_rings
))
1549 rds_ring
= &recv_ctx
->rds_rings
[ring
];
1551 index
= qlcnic_83xx_hndl(sts_data
[0]);
1552 if (unlikely(index
> rds_ring
->num_desc
))
1555 buffer
= &rds_ring
->rx_buf_arr
[index
];
1557 lro_length
= qlcnic_83xx_lro_pktln(sts_data
[0]);
1558 l2_hdr_offset
= qlcnic_83xx_l2_hdr_off(sts_data
[1]);
1559 l4_hdr_offset
= qlcnic_83xx_l4_hdr_off(sts_data
[1]);
1560 push
= qlcnic_83xx_is_psh_bit(sts_data
[1]);
1562 skb
= qlcnic_process_rxbuf(adapter
, rds_ring
, index
, STATUS_CKSUM_OK
);
1566 if (adapter
->drv_mac_learn
&&
1567 (adapter
->flags
& QLCNIC_ESWITCH_ENABLED
)) {
1569 is_lb_pkt
= qlcnic_83xx_is_lb_pkt(sts_data
[1], 1);
1570 qlcnic_add_lb_filter(adapter
, skb
, is_lb_pkt
,
1571 cpu_to_le16(t_vid
));
1573 if (qlcnic_83xx_is_tstamp(sts_data
[1]))
1574 data_offset
= l4_hdr_offset
+ QLCNIC_TCP_TS_HDR_SIZE
;
1576 data_offset
= l4_hdr_offset
+ QLCNIC_TCP_HDR_SIZE
;
1578 skb_put(skb
, lro_length
+ data_offset
);
1579 skb_pull(skb
, l2_hdr_offset
);
1581 if (unlikely(qlcnic_check_rx_tagging(adapter
, skb
, &vid
))) {
1582 adapter
->stats
.rxdropped
++;
1587 skb
->protocol
= eth_type_trans(skb
, netdev
);
1588 if (ntohs(skb
->protocol
) == ETH_P_IPV6
) {
1589 ipv6h
= (struct ipv6hdr
*)skb
->data
;
1590 th
= (struct tcphdr
*)(skb
->data
+ sizeof(struct ipv6hdr
));
1592 length
= (th
->doff
<< 2) + lro_length
;
1593 ipv6h
->payload_len
= htons(length
);
1595 iph
= (struct iphdr
*)skb
->data
;
1596 th
= (struct tcphdr
*)(skb
->data
+ (iph
->ihl
<< 2));
1597 length
= (iph
->ihl
<< 2) + (th
->doff
<< 2) + lro_length
;
1598 iph
->tot_len
= htons(length
);
1600 iph
->check
= ip_fast_csum((unsigned char *)iph
, iph
->ihl
);
1606 if (adapter
->flags
& QLCNIC_FW_LRO_MSS_CAP
) {
1607 gso_size
= qlcnic_83xx_get_lro_sts_mss(sts_data
[0]);
1608 skb_shinfo(skb
)->gso_size
= gso_size
;
1609 if (skb
->protocol
== htons(ETH_P_IPV6
))
1610 skb_shinfo(skb
)->gso_type
= SKB_GSO_TCPV6
;
1612 skb_shinfo(skb
)->gso_type
= SKB_GSO_TCPV4
;
1616 __vlan_hwaccel_put_tag(skb
, vid
);
1618 netif_receive_skb(skb
);
1620 adapter
->stats
.lro_pkts
++;
1621 adapter
->stats
.lrobytes
+= length
;
1625 static int qlcnic_83xx_process_rcv_ring(struct qlcnic_host_sds_ring
*sds_ring
,
1628 struct qlcnic_host_rds_ring
*rds_ring
;
1629 struct qlcnic_adapter
*adapter
= sds_ring
->adapter
;
1630 struct list_head
*cur
;
1631 struct status_desc
*desc
;
1632 struct qlcnic_rx_buffer
*rxbuf
= NULL
;
1635 int count
= 0, opcode
;
1636 u32 consumer
= sds_ring
->consumer
;
1638 while (count
< max
) {
1639 desc
= &sds_ring
->desc_head
[consumer
];
1640 sts_data
[1] = le64_to_cpu(desc
->status_desc_data
[1]);
1641 opcode
= qlcnic_83xx_opcode(sts_data
[1]);
1644 sts_data
[0] = le64_to_cpu(desc
->status_desc_data
[0]);
1645 ring
= QLCNIC_FETCH_RING_ID(sts_data
[0]);
1648 case QLC_83XX_REG_DESC
:
1649 rxbuf
= qlcnic_83xx_process_rcv(adapter
, sds_ring
,
1652 case QLC_83XX_LRO_DESC
:
1653 rxbuf
= qlcnic_83xx_process_lro(adapter
, ring
,
1657 dev_info(&adapter
->pdev
->dev
,
1658 "Unkonwn opcode: 0x%x\n", opcode
);
1663 list_add_tail(&rxbuf
->list
, &sds_ring
->free_list
[ring
]);
1665 adapter
->stats
.null_rxbuf
++;
1667 desc
= &sds_ring
->desc_head
[consumer
];
1668 /* Reset the descriptor */
1669 desc
->status_desc_data
[1] = 0;
1670 consumer
= get_next_index(consumer
, sds_ring
->num_desc
);
1673 for (ring
= 0; ring
< adapter
->max_rds_rings
; ring
++) {
1674 rds_ring
= &adapter
->recv_ctx
->rds_rings
[ring
];
1675 if (!list_empty(&sds_ring
->free_list
[ring
])) {
1676 list_for_each(cur
, &sds_ring
->free_list
[ring
]) {
1677 rxbuf
= list_entry(cur
, struct qlcnic_rx_buffer
,
1679 qlcnic_alloc_rx_skb(adapter
, rds_ring
, rxbuf
);
1681 spin_lock(&rds_ring
->lock
);
1682 list_splice_tail_init(&sds_ring
->free_list
[ring
],
1683 &rds_ring
->free_list
);
1684 spin_unlock(&rds_ring
->lock
);
1686 qlcnic_post_rx_buffers_nodb(adapter
, rds_ring
, ring
);
1689 sds_ring
->consumer
= consumer
;
1690 writel(consumer
, sds_ring
->crb_sts_consumer
);
1695 static int qlcnic_83xx_poll(struct napi_struct
*napi
, int budget
)
1699 struct qlcnic_host_sds_ring
*sds_ring
;
1700 struct qlcnic_adapter
*adapter
;
1701 struct qlcnic_host_tx_ring
*tx_ring
;
1703 sds_ring
= container_of(napi
, struct qlcnic_host_sds_ring
, napi
);
1704 adapter
= sds_ring
->adapter
;
1705 /* tx ring count = 1 */
1706 tx_ring
= adapter
->tx_ring
;
1708 tx_complete
= qlcnic_process_cmd_ring(adapter
, tx_ring
, budget
);
1709 work_done
= qlcnic_83xx_process_rcv_ring(sds_ring
, budget
);
1710 if ((work_done
< budget
) && tx_complete
) {
1711 napi_complete(&sds_ring
->napi
);
1712 qlcnic_83xx_enable_intr(adapter
, sds_ring
);
1718 static int qlcnic_83xx_msix_tx_poll(struct napi_struct
*napi
, int budget
)
1721 struct qlcnic_host_tx_ring
*tx_ring
;
1722 struct qlcnic_adapter
*adapter
;
1724 budget
= QLCNIC_TX_POLL_BUDGET
;
1725 tx_ring
= container_of(napi
, struct qlcnic_host_tx_ring
, napi
);
1726 adapter
= tx_ring
->adapter
;
1727 work_done
= qlcnic_process_cmd_ring(adapter
, tx_ring
, budget
);
1729 napi_complete(&tx_ring
->napi
);
1730 if (test_bit(__QLCNIC_DEV_UP
, &adapter
->state
))
1731 qlcnic_83xx_enable_tx_intr(adapter
, tx_ring
);
1737 static int qlcnic_83xx_rx_poll(struct napi_struct
*napi
, int budget
)
1740 struct qlcnic_host_sds_ring
*sds_ring
;
1741 struct qlcnic_adapter
*adapter
;
1743 sds_ring
= container_of(napi
, struct qlcnic_host_sds_ring
, napi
);
1744 adapter
= sds_ring
->adapter
;
1745 work_done
= qlcnic_83xx_process_rcv_ring(sds_ring
, budget
);
1746 if (work_done
< budget
) {
1747 napi_complete(&sds_ring
->napi
);
1748 if (test_bit(__QLCNIC_DEV_UP
, &adapter
->state
))
1749 qlcnic_83xx_enable_intr(adapter
, sds_ring
);
1755 void qlcnic_83xx_napi_enable(struct qlcnic_adapter
*adapter
)
1758 struct qlcnic_host_sds_ring
*sds_ring
;
1759 struct qlcnic_host_tx_ring
*tx_ring
;
1760 struct qlcnic_recv_context
*recv_ctx
= adapter
->recv_ctx
;
1762 if (adapter
->is_up
!= QLCNIC_ADAPTER_UP_MAGIC
)
1765 for (ring
= 0; ring
< adapter
->max_sds_rings
; ring
++) {
1766 sds_ring
= &recv_ctx
->sds_rings
[ring
];
1767 napi_enable(&sds_ring
->napi
);
1768 if (adapter
->flags
& QLCNIC_MSIX_ENABLED
)
1769 qlcnic_83xx_enable_intr(adapter
, sds_ring
);
1772 if (adapter
->flags
& QLCNIC_MSIX_ENABLED
) {
1773 for (ring
= 0; ring
< adapter
->max_drv_tx_rings
; ring
++) {
1774 tx_ring
= &adapter
->tx_ring
[ring
];
1775 napi_enable(&tx_ring
->napi
);
1776 qlcnic_83xx_enable_tx_intr(adapter
, tx_ring
);
1781 void qlcnic_83xx_napi_disable(struct qlcnic_adapter
*adapter
)
1784 struct qlcnic_host_sds_ring
*sds_ring
;
1785 struct qlcnic_recv_context
*recv_ctx
= adapter
->recv_ctx
;
1786 struct qlcnic_host_tx_ring
*tx_ring
;
1788 if (adapter
->is_up
!= QLCNIC_ADAPTER_UP_MAGIC
)
1791 for (ring
= 0; ring
< adapter
->max_sds_rings
; ring
++) {
1792 sds_ring
= &recv_ctx
->sds_rings
[ring
];
1793 if (adapter
->flags
& QLCNIC_MSIX_ENABLED
)
1794 qlcnic_83xx_disable_intr(adapter
, sds_ring
);
1795 napi_synchronize(&sds_ring
->napi
);
1796 napi_disable(&sds_ring
->napi
);
1799 if (adapter
->flags
& QLCNIC_MSIX_ENABLED
) {
1800 for (ring
= 0; ring
< adapter
->max_drv_tx_rings
; ring
++) {
1801 tx_ring
= &adapter
->tx_ring
[ring
];
1802 qlcnic_83xx_disable_tx_intr(adapter
, tx_ring
);
1803 napi_synchronize(&tx_ring
->napi
);
1804 napi_disable(&tx_ring
->napi
);
1809 int qlcnic_83xx_napi_add(struct qlcnic_adapter
*adapter
,
1810 struct net_device
*netdev
)
1812 int ring
, max_sds_rings
;
1813 struct qlcnic_host_sds_ring
*sds_ring
;
1814 struct qlcnic_host_tx_ring
*tx_ring
;
1815 struct qlcnic_recv_context
*recv_ctx
= adapter
->recv_ctx
;
1817 if (qlcnic_alloc_sds_rings(recv_ctx
, adapter
->max_sds_rings
))
1820 max_sds_rings
= adapter
->max_sds_rings
;
1821 for (ring
= 0; ring
< adapter
->max_sds_rings
; ring
++) {
1822 sds_ring
= &recv_ctx
->sds_rings
[ring
];
1823 if (adapter
->flags
& QLCNIC_MSIX_ENABLED
)
1824 netif_napi_add(netdev
, &sds_ring
->napi
,
1825 qlcnic_83xx_rx_poll
,
1826 QLCNIC_NETDEV_WEIGHT
* 2);
1828 netif_napi_add(netdev
, &sds_ring
->napi
,
1830 QLCNIC_NETDEV_WEIGHT
/ max_sds_rings
);
1833 if (qlcnic_alloc_tx_rings(adapter
, netdev
)) {
1834 qlcnic_free_sds_rings(recv_ctx
);
1838 if (adapter
->flags
& QLCNIC_MSIX_ENABLED
) {
1839 for (ring
= 0; ring
< adapter
->max_drv_tx_rings
; ring
++) {
1840 tx_ring
= &adapter
->tx_ring
[ring
];
1841 netif_napi_add(netdev
, &tx_ring
->napi
,
1842 qlcnic_83xx_msix_tx_poll
,
1843 QLCNIC_NETDEV_WEIGHT
);
1850 void qlcnic_83xx_napi_del(struct qlcnic_adapter
*adapter
)
1853 struct qlcnic_host_sds_ring
*sds_ring
;
1854 struct qlcnic_recv_context
*recv_ctx
= adapter
->recv_ctx
;
1855 struct qlcnic_host_tx_ring
*tx_ring
;
1857 for (ring
= 0; ring
< adapter
->max_sds_rings
; ring
++) {
1858 sds_ring
= &recv_ctx
->sds_rings
[ring
];
1859 netif_napi_del(&sds_ring
->napi
);
1862 qlcnic_free_sds_rings(adapter
->recv_ctx
);
1864 if ((adapter
->flags
& QLCNIC_MSIX_ENABLED
)) {
1865 for (ring
= 0; ring
< adapter
->max_drv_tx_rings
; ring
++) {
1866 tx_ring
= &adapter
->tx_ring
[ring
];
1867 netif_napi_del(&tx_ring
->napi
);
1871 qlcnic_free_tx_rings(adapter
);
1874 void qlcnic_83xx_process_rcv_diag(struct qlcnic_adapter
*adapter
,
1875 int ring
, u64 sts_data
[])
1877 struct qlcnic_recv_context
*recv_ctx
= adapter
->recv_ctx
;
1878 struct sk_buff
*skb
;
1879 struct qlcnic_host_rds_ring
*rds_ring
;
1882 if (unlikely(ring
>= adapter
->max_rds_rings
))
1885 rds_ring
= &recv_ctx
->rds_rings
[ring
];
1886 index
= qlcnic_83xx_hndl(sts_data
[0]);
1887 if (unlikely(index
>= rds_ring
->num_desc
))
1890 length
= qlcnic_83xx_pktln(sts_data
[0]);
1892 skb
= qlcnic_process_rxbuf(adapter
, rds_ring
, index
, STATUS_CKSUM_OK
);
1896 if (length
> rds_ring
->skb_size
)
1897 skb_put(skb
, rds_ring
->skb_size
);
1899 skb_put(skb
, length
);
1901 if (!qlcnic_check_loopback_buff(skb
->data
, adapter
->mac_addr
))
1902 adapter
->ahw
->diag_cnt
++;
1904 dump_skb(skb
, adapter
);
1906 dev_kfree_skb_any(skb
);
1910 void qlcnic_83xx_process_rcv_ring_diag(struct qlcnic_host_sds_ring
*sds_ring
)
1912 struct qlcnic_adapter
*adapter
= sds_ring
->adapter
;
1913 struct status_desc
*desc
;
1916 u32 consumer
= sds_ring
->consumer
;
1918 desc
= &sds_ring
->desc_head
[consumer
];
1919 sts_data
[0] = le64_to_cpu(desc
->status_desc_data
[0]);
1920 sts_data
[1] = le64_to_cpu(desc
->status_desc_data
[1]);
1921 opcode
= qlcnic_83xx_opcode(sts_data
[1]);
1925 ring
= QLCNIC_FETCH_RING_ID(qlcnic_83xx_hndl(sts_data
[0]));
1926 qlcnic_83xx_process_rcv_diag(adapter
, ring
, sts_data
);
1927 desc
= &sds_ring
->desc_head
[consumer
];
1928 desc
->status_desc_data
[0] = cpu_to_le64(STATUS_OWNER_PHANTOM
);
1929 consumer
= get_next_index(consumer
, sds_ring
->num_desc
);
1930 sds_ring
->consumer
= consumer
;
1931 writel(consumer
, sds_ring
->crb_sts_consumer
);