2 * Copyright (c) 2009-2014 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 #include <linux/module.h>
33 #include <linux/list.h>
34 #include <linux/workqueue.h>
35 #include <linux/skbuff.h>
36 #include <linux/timer.h>
37 #include <linux/notifier.h>
38 #include <linux/inetdevice.h>
40 #include <linux/tcp.h>
41 #include <linux/if_vlan.h>
43 #include <net/neighbour.h>
44 #include <net/netevent.h>
45 #include <net/route.h>
47 #include <net/ip6_route.h>
48 #include <net/addrconf.h>
50 #include <rdma/ib_addr.h>
55 static char *states
[] = {
72 module_param(nocong
, int, 0644);
73 MODULE_PARM_DESC(nocong
, "Turn of congestion control (default=0)");
75 static int enable_ecn
;
76 module_param(enable_ecn
, int, 0644);
77 MODULE_PARM_DESC(enable_ecn
, "Enable ECN (default=0/disabled)");
79 static int dack_mode
= 1;
80 module_param(dack_mode
, int, 0644);
81 MODULE_PARM_DESC(dack_mode
, "Delayed ack mode (default=1)");
83 uint c4iw_max_read_depth
= 32;
84 module_param(c4iw_max_read_depth
, int, 0644);
85 MODULE_PARM_DESC(c4iw_max_read_depth
,
86 "Per-connection max ORD/IRD (default=32)");
88 static int enable_tcp_timestamps
;
89 module_param(enable_tcp_timestamps
, int, 0644);
90 MODULE_PARM_DESC(enable_tcp_timestamps
, "Enable tcp timestamps (default=0)");
92 static int enable_tcp_sack
;
93 module_param(enable_tcp_sack
, int, 0644);
94 MODULE_PARM_DESC(enable_tcp_sack
, "Enable tcp SACK (default=0)");
96 static int enable_tcp_window_scaling
= 1;
97 module_param(enable_tcp_window_scaling
, int, 0644);
98 MODULE_PARM_DESC(enable_tcp_window_scaling
,
99 "Enable tcp window scaling (default=1)");
102 module_param(c4iw_debug
, int, 0644);
103 MODULE_PARM_DESC(c4iw_debug
, "Enable debug logging (default=0)");
105 static int peer2peer
= 1;
106 module_param(peer2peer
, int, 0644);
107 MODULE_PARM_DESC(peer2peer
, "Support peer2peer ULPs (default=1)");
109 static int p2p_type
= FW_RI_INIT_P2PTYPE_READ_REQ
;
110 module_param(p2p_type
, int, 0644);
111 MODULE_PARM_DESC(p2p_type
, "RDMAP opcode to use for the RTR message: "
112 "1=RDMA_READ 0=RDMA_WRITE (default 1)");
114 static int ep_timeout_secs
= 60;
115 module_param(ep_timeout_secs
, int, 0644);
116 MODULE_PARM_DESC(ep_timeout_secs
, "CM Endpoint operation timeout "
117 "in seconds (default=60)");
119 static int mpa_rev
= 2;
120 module_param(mpa_rev
, int, 0644);
121 MODULE_PARM_DESC(mpa_rev
, "MPA Revision, 0 supports amso1100, "
122 "1 is RFC5044 spec compliant, 2 is IETF MPA Peer Connect Draft"
123 " compliant (default=2)");
125 static int markers_enabled
;
126 module_param(markers_enabled
, int, 0644);
127 MODULE_PARM_DESC(markers_enabled
, "Enable MPA MARKERS (default(0)=disabled)");
129 static int crc_enabled
= 1;
130 module_param(crc_enabled
, int, 0644);
131 MODULE_PARM_DESC(crc_enabled
, "Enable MPA CRC (default(1)=enabled)");
133 static int rcv_win
= 256 * 1024;
134 module_param(rcv_win
, int, 0644);
135 MODULE_PARM_DESC(rcv_win
, "TCP receive window in bytes (default=256KB)");
137 static int snd_win
= 128 * 1024;
138 module_param(snd_win
, int, 0644);
139 MODULE_PARM_DESC(snd_win
, "TCP send window in bytes (default=128KB)");
141 static struct workqueue_struct
*workq
;
143 static struct sk_buff_head rxq
;
145 static struct sk_buff
*get_skb(struct sk_buff
*skb
, int len
, gfp_t gfp
);
146 static void ep_timeout(unsigned long arg
);
147 static void connect_reply_upcall(struct c4iw_ep
*ep
, int status
);
148 static int sched(struct c4iw_dev
*dev
, struct sk_buff
*skb
);
150 static LIST_HEAD(timeout_list
);
151 static spinlock_t timeout_lock
;
153 static void deref_cm_id(struct c4iw_ep_common
*epc
)
155 epc
->cm_id
->rem_ref(epc
->cm_id
);
157 set_bit(CM_ID_DEREFED
, &epc
->history
);
160 static void ref_cm_id(struct c4iw_ep_common
*epc
)
162 set_bit(CM_ID_REFED
, &epc
->history
);
163 epc
->cm_id
->add_ref(epc
->cm_id
);
166 static void deref_qp(struct c4iw_ep
*ep
)
168 c4iw_qp_rem_ref(&ep
->com
.qp
->ibqp
);
169 clear_bit(QP_REFERENCED
, &ep
->com
.flags
);
170 set_bit(QP_DEREFED
, &ep
->com
.history
);
173 static void ref_qp(struct c4iw_ep
*ep
)
175 set_bit(QP_REFERENCED
, &ep
->com
.flags
);
176 set_bit(QP_REFED
, &ep
->com
.history
);
177 c4iw_qp_add_ref(&ep
->com
.qp
->ibqp
);
180 static void start_ep_timer(struct c4iw_ep
*ep
)
182 PDBG("%s ep %p\n", __func__
, ep
);
183 if (timer_pending(&ep
->timer
)) {
184 pr_err("%s timer already started! ep %p\n",
188 clear_bit(TIMEOUT
, &ep
->com
.flags
);
189 c4iw_get_ep(&ep
->com
);
190 ep
->timer
.expires
= jiffies
+ ep_timeout_secs
* HZ
;
191 ep
->timer
.data
= (unsigned long)ep
;
192 ep
->timer
.function
= ep_timeout
;
193 add_timer(&ep
->timer
);
196 static int stop_ep_timer(struct c4iw_ep
*ep
)
198 PDBG("%s ep %p stopping\n", __func__
, ep
);
199 del_timer_sync(&ep
->timer
);
200 if (!test_and_set_bit(TIMEOUT
, &ep
->com
.flags
)) {
201 c4iw_put_ep(&ep
->com
);
207 static int c4iw_l2t_send(struct c4iw_rdev
*rdev
, struct sk_buff
*skb
,
208 struct l2t_entry
*l2e
)
212 if (c4iw_fatal_error(rdev
)) {
214 PDBG("%s - device in error state - dropping\n", __func__
);
217 error
= cxgb4_l2t_send(rdev
->lldi
.ports
[0], skb
, l2e
);
220 else if (error
== NET_XMIT_DROP
)
222 return error
< 0 ? error
: 0;
225 int c4iw_ofld_send(struct c4iw_rdev
*rdev
, struct sk_buff
*skb
)
229 if (c4iw_fatal_error(rdev
)) {
231 PDBG("%s - device in error state - dropping\n", __func__
);
234 error
= cxgb4_ofld_send(rdev
->lldi
.ports
[0], skb
);
237 return error
< 0 ? error
: 0;
240 static void release_tid(struct c4iw_rdev
*rdev
, u32 hwtid
, struct sk_buff
*skb
)
242 struct cpl_tid_release
*req
;
244 skb
= get_skb(skb
, sizeof *req
, GFP_KERNEL
);
247 req
= (struct cpl_tid_release
*) skb_put(skb
, sizeof(*req
));
248 INIT_TP_WR(req
, hwtid
);
249 OPCODE_TID(req
) = cpu_to_be32(MK_OPCODE_TID(CPL_TID_RELEASE
, hwtid
));
250 set_wr_txq(skb
, CPL_PRIORITY_SETUP
, 0);
251 c4iw_ofld_send(rdev
, skb
);
255 static void set_emss(struct c4iw_ep
*ep
, u16 opt
)
257 ep
->emss
= ep
->com
.dev
->rdev
.lldi
.mtus
[TCPOPT_MSS_G(opt
)] -
258 ((AF_INET
== ep
->com
.remote_addr
.ss_family
) ?
259 sizeof(struct iphdr
) : sizeof(struct ipv6hdr
)) -
260 sizeof(struct tcphdr
);
262 if (TCPOPT_TSTAMP_G(opt
))
263 ep
->emss
-= round_up(TCPOLEN_TIMESTAMP
, 4);
267 PDBG("Warning: misaligned mtu idx %u mss %u emss=%u\n",
268 TCPOPT_MSS_G(opt
), ep
->mss
, ep
->emss
);
269 PDBG("%s mss_idx %u mss %u emss=%u\n", __func__
, TCPOPT_MSS_G(opt
),
273 static enum c4iw_ep_state
state_read(struct c4iw_ep_common
*epc
)
275 enum c4iw_ep_state state
;
277 mutex_lock(&epc
->mutex
);
279 mutex_unlock(&epc
->mutex
);
283 static void __state_set(struct c4iw_ep_common
*epc
, enum c4iw_ep_state
new)
288 static void state_set(struct c4iw_ep_common
*epc
, enum c4iw_ep_state
new)
290 mutex_lock(&epc
->mutex
);
291 PDBG("%s - %s -> %s\n", __func__
, states
[epc
->state
], states
[new]);
292 __state_set(epc
, new);
293 mutex_unlock(&epc
->mutex
);
297 static int alloc_ep_skb_list(struct sk_buff_head
*ep_skb_list
, int size
)
303 len
= roundup(sizeof(union cpl_wr_size
), 16);
304 for (i
= 0; i
< size
; i
++) {
305 skb
= alloc_skb(len
, GFP_KERNEL
);
308 skb_queue_tail(ep_skb_list
, skb
);
312 skb_queue_purge(ep_skb_list
);
316 static void *alloc_ep(int size
, gfp_t gfp
)
318 struct c4iw_ep_common
*epc
;
320 epc
= kzalloc(size
, gfp
);
322 kref_init(&epc
->kref
);
323 mutex_init(&epc
->mutex
);
324 c4iw_init_wr_wait(&epc
->wr_wait
);
326 PDBG("%s alloc ep %p\n", __func__
, epc
);
330 static void remove_ep_tid(struct c4iw_ep
*ep
)
334 spin_lock_irqsave(&ep
->com
.dev
->lock
, flags
);
335 _remove_handle(ep
->com
.dev
, &ep
->com
.dev
->hwtid_idr
, ep
->hwtid
, 0);
336 if (idr_is_empty(&ep
->com
.dev
->hwtid_idr
))
337 wake_up(&ep
->com
.dev
->wait
);
338 spin_unlock_irqrestore(&ep
->com
.dev
->lock
, flags
);
341 static void insert_ep_tid(struct c4iw_ep
*ep
)
345 spin_lock_irqsave(&ep
->com
.dev
->lock
, flags
);
346 _insert_handle(ep
->com
.dev
, &ep
->com
.dev
->hwtid_idr
, ep
, ep
->hwtid
, 0);
347 spin_unlock_irqrestore(&ep
->com
.dev
->lock
, flags
);
351 * Atomically lookup the ep ptr given the tid and grab a reference on the ep.
353 static struct c4iw_ep
*get_ep_from_tid(struct c4iw_dev
*dev
, unsigned int tid
)
358 spin_lock_irqsave(&dev
->lock
, flags
);
359 ep
= idr_find(&dev
->hwtid_idr
, tid
);
361 c4iw_get_ep(&ep
->com
);
362 spin_unlock_irqrestore(&dev
->lock
, flags
);
367 * Atomically lookup the ep ptr given the stid and grab a reference on the ep.
369 static struct c4iw_listen_ep
*get_ep_from_stid(struct c4iw_dev
*dev
,
372 struct c4iw_listen_ep
*ep
;
375 spin_lock_irqsave(&dev
->lock
, flags
);
376 ep
= idr_find(&dev
->stid_idr
, stid
);
378 c4iw_get_ep(&ep
->com
);
379 spin_unlock_irqrestore(&dev
->lock
, flags
);
383 void _c4iw_free_ep(struct kref
*kref
)
387 ep
= container_of(kref
, struct c4iw_ep
, com
.kref
);
388 PDBG("%s ep %p state %s\n", __func__
, ep
, states
[ep
->com
.state
]);
389 if (test_bit(QP_REFERENCED
, &ep
->com
.flags
))
391 if (test_bit(RELEASE_RESOURCES
, &ep
->com
.flags
)) {
392 if (ep
->com
.remote_addr
.ss_family
== AF_INET6
) {
393 struct sockaddr_in6
*sin6
=
394 (struct sockaddr_in6
*)
398 ep
->com
.dev
->rdev
.lldi
.ports
[0],
399 (const u32
*)&sin6
->sin6_addr
.s6_addr
,
402 cxgb4_remove_tid(ep
->com
.dev
->rdev
.lldi
.tids
, 0, ep
->hwtid
);
403 dst_release(ep
->dst
);
404 cxgb4_l2t_release(ep
->l2t
);
406 kfree_skb(ep
->mpa_skb
);
408 if (!skb_queue_empty(&ep
->com
.ep_skb_list
))
409 skb_queue_purge(&ep
->com
.ep_skb_list
);
413 static void release_ep_resources(struct c4iw_ep
*ep
)
415 set_bit(RELEASE_RESOURCES
, &ep
->com
.flags
);
418 * If we have a hwtid, then remove it from the idr table
419 * so lookups will no longer find this endpoint. Otherwise
420 * we have a race where one thread finds the ep ptr just
421 * before the other thread is freeing the ep memory.
425 c4iw_put_ep(&ep
->com
);
428 static int status2errno(int status
)
433 case CPL_ERR_CONN_RESET
:
435 case CPL_ERR_ARP_MISS
:
436 return -EHOSTUNREACH
;
437 case CPL_ERR_CONN_TIMEDOUT
:
439 case CPL_ERR_TCAM_FULL
:
441 case CPL_ERR_CONN_EXIST
:
449 * Try and reuse skbs already allocated...
451 static struct sk_buff
*get_skb(struct sk_buff
*skb
, int len
, gfp_t gfp
)
453 if (skb
&& !skb_is_nonlinear(skb
) && !skb_cloned(skb
)) {
456 skb_reset_transport_header(skb
);
458 skb
= alloc_skb(len
, gfp
);
460 t4_set_arp_err_handler(skb
, NULL
, NULL
);
464 static struct net_device
*get_real_dev(struct net_device
*egress_dev
)
466 return rdma_vlan_dev_real_dev(egress_dev
) ? : egress_dev
;
469 static int our_interface(struct c4iw_dev
*dev
, struct net_device
*egress_dev
)
473 egress_dev
= get_real_dev(egress_dev
);
474 for (i
= 0; i
< dev
->rdev
.lldi
.nports
; i
++)
475 if (dev
->rdev
.lldi
.ports
[i
] == egress_dev
)
480 static struct dst_entry
*find_route6(struct c4iw_dev
*dev
, __u8
*local_ip
,
481 __u8
*peer_ip
, __be16 local_port
,
482 __be16 peer_port
, u8 tos
,
485 struct dst_entry
*dst
= NULL
;
487 if (IS_ENABLED(CONFIG_IPV6
)) {
490 memset(&fl6
, 0, sizeof(fl6
));
491 memcpy(&fl6
.daddr
, peer_ip
, 16);
492 memcpy(&fl6
.saddr
, local_ip
, 16);
493 if (ipv6_addr_type(&fl6
.daddr
) & IPV6_ADDR_LINKLOCAL
)
494 fl6
.flowi6_oif
= sin6_scope_id
;
495 dst
= ip6_route_output(&init_net
, NULL
, &fl6
);
498 if (!our_interface(dev
, ip6_dst_idev(dst
)->dev
) &&
499 !(ip6_dst_idev(dst
)->dev
->flags
& IFF_LOOPBACK
)) {
509 static struct dst_entry
*find_route(struct c4iw_dev
*dev
, __be32 local_ip
,
510 __be32 peer_ip
, __be16 local_port
,
511 __be16 peer_port
, u8 tos
)
517 rt
= ip_route_output_ports(&init_net
, &fl4
, NULL
, peer_ip
, local_ip
,
518 peer_port
, local_port
, IPPROTO_TCP
,
522 n
= dst_neigh_lookup(&rt
->dst
, &peer_ip
);
525 if (!our_interface(dev
, n
->dev
) &&
526 !(n
->dev
->flags
& IFF_LOOPBACK
)) {
528 dst_release(&rt
->dst
);
535 static void arp_failure_discard(void *handle
, struct sk_buff
*skb
)
537 pr_err(MOD
"ARP failure\n");
541 static void mpa_start_arp_failure(void *handle
, struct sk_buff
*skb
)
543 pr_err("ARP failure during MPA Negotiation - Closing Connection\n");
548 FAKE_CPL_PUT_EP_SAFE
= NUM_CPL_CMDS
+ 0,
549 FAKE_CPL_PASS_PUT_EP_SAFE
= NUM_CPL_CMDS
+ 1,
552 static int _put_ep_safe(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
556 ep
= *((struct c4iw_ep
**)(skb
->cb
+ 2 * sizeof(void *)));
557 release_ep_resources(ep
);
561 static int _put_pass_ep_safe(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
565 ep
= *((struct c4iw_ep
**)(skb
->cb
+ 2 * sizeof(void *)));
566 c4iw_put_ep(&ep
->parent_ep
->com
);
567 release_ep_resources(ep
);
572 * Fake up a special CPL opcode and call sched() so process_work() will call
573 * _put_ep_safe() in a safe context to free the ep resources. This is needed
574 * because ARP error handlers are called in an ATOMIC context, and
575 * _c4iw_free_ep() needs to block.
577 static void queue_arp_failure_cpl(struct c4iw_ep
*ep
, struct sk_buff
*skb
,
580 struct cpl_act_establish
*rpl
= cplhdr(skb
);
582 /* Set our special ARP_FAILURE opcode */
583 rpl
->ot
.opcode
= cpl
;
586 * Save ep in the skb->cb area, after where sched() will save the dev
589 *((struct c4iw_ep
**)(skb
->cb
+ 2 * sizeof(void *))) = ep
;
590 sched(ep
->com
.dev
, skb
);
593 /* Handle an ARP failure for an accept */
594 static void pass_accept_rpl_arp_failure(void *handle
, struct sk_buff
*skb
)
596 struct c4iw_ep
*ep
= handle
;
598 pr_err(MOD
"ARP failure during accept - tid %u -dropping connection\n",
601 __state_set(&ep
->com
, DEAD
);
602 queue_arp_failure_cpl(ep
, skb
, FAKE_CPL_PASS_PUT_EP_SAFE
);
606 * Handle an ARP failure for an active open.
608 static void act_open_req_arp_failure(void *handle
, struct sk_buff
*skb
)
610 struct c4iw_ep
*ep
= handle
;
612 printk(KERN_ERR MOD
"ARP failure during connect\n");
613 connect_reply_upcall(ep
, -EHOSTUNREACH
);
614 __state_set(&ep
->com
, DEAD
);
615 if (ep
->com
.remote_addr
.ss_family
== AF_INET6
) {
616 struct sockaddr_in6
*sin6
=
617 (struct sockaddr_in6
*)&ep
->com
.local_addr
;
618 cxgb4_clip_release(ep
->com
.dev
->rdev
.lldi
.ports
[0],
619 (const u32
*)&sin6
->sin6_addr
.s6_addr
, 1);
621 remove_handle(ep
->com
.dev
, &ep
->com
.dev
->atid_idr
, ep
->atid
);
622 cxgb4_free_atid(ep
->com
.dev
->rdev
.lldi
.tids
, ep
->atid
);
623 queue_arp_failure_cpl(ep
, skb
, FAKE_CPL_PUT_EP_SAFE
);
627 * Handle an ARP failure for a CPL_ABORT_REQ. Change it into a no RST variant
630 static void abort_arp_failure(void *handle
, struct sk_buff
*skb
)
633 struct c4iw_ep
*ep
= handle
;
634 struct c4iw_rdev
*rdev
= &ep
->com
.dev
->rdev
;
635 struct cpl_abort_req
*req
= cplhdr(skb
);
637 PDBG("%s rdev %p\n", __func__
, rdev
);
638 req
->cmd
= CPL_ABORT_NO_RST
;
639 ret
= c4iw_ofld_send(rdev
, skb
);
641 __state_set(&ep
->com
, DEAD
);
642 queue_arp_failure_cpl(ep
, skb
, FAKE_CPL_PUT_EP_SAFE
);
646 static int send_flowc(struct c4iw_ep
*ep
)
648 struct fw_flowc_wr
*flowc
;
649 struct sk_buff
*skb
= skb_dequeue(&ep
->com
.ep_skb_list
);
651 u16 vlan
= ep
->l2t
->vlan
;
657 if (vlan
== CPL_L2T_VLAN_NONE
)
662 flowc
= (struct fw_flowc_wr
*)__skb_put(skb
, FLOWC_LEN
);
664 flowc
->op_to_nparams
= cpu_to_be32(FW_WR_OP_V(FW_FLOWC_WR
) |
665 FW_FLOWC_WR_NPARAMS_V(nparams
));
666 flowc
->flowid_len16
= cpu_to_be32(FW_WR_LEN16_V(DIV_ROUND_UP(FLOWC_LEN
,
667 16)) | FW_WR_FLOWID_V(ep
->hwtid
));
669 flowc
->mnemval
[0].mnemonic
= FW_FLOWC_MNEM_PFNVFN
;
670 flowc
->mnemval
[0].val
= cpu_to_be32(FW_PFVF_CMD_PFN_V
671 (ep
->com
.dev
->rdev
.lldi
.pf
));
672 flowc
->mnemval
[1].mnemonic
= FW_FLOWC_MNEM_CH
;
673 flowc
->mnemval
[1].val
= cpu_to_be32(ep
->tx_chan
);
674 flowc
->mnemval
[2].mnemonic
= FW_FLOWC_MNEM_PORT
;
675 flowc
->mnemval
[2].val
= cpu_to_be32(ep
->tx_chan
);
676 flowc
->mnemval
[3].mnemonic
= FW_FLOWC_MNEM_IQID
;
677 flowc
->mnemval
[3].val
= cpu_to_be32(ep
->rss_qid
);
678 flowc
->mnemval
[4].mnemonic
= FW_FLOWC_MNEM_SNDNXT
;
679 flowc
->mnemval
[4].val
= cpu_to_be32(ep
->snd_seq
);
680 flowc
->mnemval
[5].mnemonic
= FW_FLOWC_MNEM_RCVNXT
;
681 flowc
->mnemval
[5].val
= cpu_to_be32(ep
->rcv_seq
);
682 flowc
->mnemval
[6].mnemonic
= FW_FLOWC_MNEM_SNDBUF
;
683 flowc
->mnemval
[6].val
= cpu_to_be32(ep
->snd_win
);
684 flowc
->mnemval
[7].mnemonic
= FW_FLOWC_MNEM_MSS
;
685 flowc
->mnemval
[7].val
= cpu_to_be32(ep
->emss
);
689 pri
= (vlan
& VLAN_PRIO_MASK
) >> VLAN_PRIO_SHIFT
;
690 flowc
->mnemval
[8].mnemonic
= FW_FLOWC_MNEM_SCHEDCLASS
;
691 flowc
->mnemval
[8].val
= cpu_to_be32(pri
);
693 /* Pad WR to 16 byte boundary */
694 flowc
->mnemval
[8].mnemonic
= 0;
695 flowc
->mnemval
[8].val
= 0;
697 for (i
= 0; i
< 9; i
++) {
698 flowc
->mnemval
[i
].r4
[0] = 0;
699 flowc
->mnemval
[i
].r4
[1] = 0;
700 flowc
->mnemval
[i
].r4
[2] = 0;
703 set_wr_txq(skb
, CPL_PRIORITY_DATA
, ep
->txq_idx
);
704 return c4iw_ofld_send(&ep
->com
.dev
->rdev
, skb
);
707 static int send_halfclose(struct c4iw_ep
*ep
)
709 struct cpl_close_con_req
*req
;
710 struct sk_buff
*skb
= skb_dequeue(&ep
->com
.ep_skb_list
);
711 int wrlen
= roundup(sizeof *req
, 16);
713 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
717 set_wr_txq(skb
, CPL_PRIORITY_DATA
, ep
->txq_idx
);
718 t4_set_arp_err_handler(skb
, NULL
, arp_failure_discard
);
719 req
= (struct cpl_close_con_req
*) skb_put(skb
, wrlen
);
720 memset(req
, 0, wrlen
);
721 INIT_TP_WR(req
, ep
->hwtid
);
722 OPCODE_TID(req
) = cpu_to_be32(MK_OPCODE_TID(CPL_CLOSE_CON_REQ
,
724 return c4iw_l2t_send(&ep
->com
.dev
->rdev
, skb
, ep
->l2t
);
727 static int send_abort(struct c4iw_ep
*ep
)
729 struct cpl_abort_req
*req
;
730 int wrlen
= roundup(sizeof *req
, 16);
731 struct sk_buff
*req_skb
= skb_dequeue(&ep
->com
.ep_skb_list
);
733 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
734 if (WARN_ON(!req_skb
))
737 set_wr_txq(req_skb
, CPL_PRIORITY_DATA
, ep
->txq_idx
);
738 t4_set_arp_err_handler(req_skb
, ep
, abort_arp_failure
);
739 req
= (struct cpl_abort_req
*)skb_put(req_skb
, wrlen
);
740 memset(req
, 0, wrlen
);
741 INIT_TP_WR(req
, ep
->hwtid
);
742 OPCODE_TID(req
) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_REQ
, ep
->hwtid
));
743 req
->cmd
= CPL_ABORT_SEND_RST
;
744 return c4iw_l2t_send(&ep
->com
.dev
->rdev
, req_skb
, ep
->l2t
);
747 static void best_mtu(const unsigned short *mtus
, unsigned short mtu
,
748 unsigned int *idx
, int use_ts
, int ipv6
)
750 unsigned short hdr_size
= (ipv6
?
751 sizeof(struct ipv6hdr
) :
752 sizeof(struct iphdr
)) +
753 sizeof(struct tcphdr
) +
755 round_up(TCPOLEN_TIMESTAMP
, 4) : 0);
756 unsigned short data_size
= mtu
- hdr_size
;
758 cxgb4_best_aligned_mtu(mtus
, hdr_size
, data_size
, 8, idx
);
761 static int send_connect(struct c4iw_ep
*ep
)
763 struct cpl_act_open_req
*req
= NULL
;
764 struct cpl_t5_act_open_req
*t5req
= NULL
;
765 struct cpl_t6_act_open_req
*t6req
= NULL
;
766 struct cpl_act_open_req6
*req6
= NULL
;
767 struct cpl_t5_act_open_req6
*t5req6
= NULL
;
768 struct cpl_t6_act_open_req6
*t6req6
= NULL
;
772 unsigned int mtu_idx
;
774 int win
, sizev4
, sizev6
, wrlen
;
775 struct sockaddr_in
*la
= (struct sockaddr_in
*)
777 struct sockaddr_in
*ra
= (struct sockaddr_in
*)
778 &ep
->com
.remote_addr
;
779 struct sockaddr_in6
*la6
= (struct sockaddr_in6
*)
781 struct sockaddr_in6
*ra6
= (struct sockaddr_in6
*)
782 &ep
->com
.remote_addr
;
784 enum chip_type adapter_type
= ep
->com
.dev
->rdev
.lldi
.adapter_type
;
785 u32 isn
= (prandom_u32() & ~7UL) - 1;
787 switch (CHELSIO_CHIP_VERSION(adapter_type
)) {
789 sizev4
= sizeof(struct cpl_act_open_req
);
790 sizev6
= sizeof(struct cpl_act_open_req6
);
793 sizev4
= sizeof(struct cpl_t5_act_open_req
);
794 sizev6
= sizeof(struct cpl_t5_act_open_req6
);
797 sizev4
= sizeof(struct cpl_t6_act_open_req
);
798 sizev6
= sizeof(struct cpl_t6_act_open_req6
);
801 pr_err("T%d Chip is not supported\n",
802 CHELSIO_CHIP_VERSION(adapter_type
));
806 wrlen
= (ep
->com
.remote_addr
.ss_family
== AF_INET
) ?
807 roundup(sizev4
, 16) :
810 PDBG("%s ep %p atid %u\n", __func__
, ep
, ep
->atid
);
812 skb
= get_skb(NULL
, wrlen
, GFP_KERNEL
);
814 printk(KERN_ERR MOD
"%s - failed to alloc skb.\n",
818 set_wr_txq(skb
, CPL_PRIORITY_SETUP
, ep
->ctrlq_idx
);
820 best_mtu(ep
->com
.dev
->rdev
.lldi
.mtus
, ep
->mtu
, &mtu_idx
,
821 enable_tcp_timestamps
,
822 (AF_INET
== ep
->com
.remote_addr
.ss_family
) ? 0 : 1);
823 wscale
= compute_wscale(rcv_win
);
826 * Specify the largest window that will fit in opt0. The
827 * remainder will be specified in the rx_data_ack.
829 win
= ep
->rcv_win
>> 10;
830 if (win
> RCV_BUFSIZ_M
)
833 opt0
= (nocong
? NO_CONG_F
: 0) |
836 WND_SCALE_V(wscale
) |
838 L2T_IDX_V(ep
->l2t
->idx
) |
839 TX_CHAN_V(ep
->tx_chan
) |
840 SMAC_SEL_V(ep
->smac_idx
) |
841 DSCP_V(ep
->tos
>> 2) |
842 ULP_MODE_V(ULP_MODE_TCPDDP
) |
844 opt2
= RX_CHANNEL_V(0) |
845 CCTRL_ECN_V(enable_ecn
) |
846 RSS_QUEUE_VALID_F
| RSS_QUEUE_V(ep
->rss_qid
);
847 if (enable_tcp_timestamps
)
848 opt2
|= TSTAMPS_EN_F
;
851 if (wscale
&& enable_tcp_window_scaling
)
852 opt2
|= WND_SCALE_EN_F
;
853 if (CHELSIO_CHIP_VERSION(adapter_type
) > CHELSIO_T4
) {
857 opt2
|= T5_OPT_2_VALID_F
;
858 opt2
|= CONG_CNTRL_V(CONG_ALG_TAHOE
);
862 if (ep
->com
.remote_addr
.ss_family
== AF_INET6
)
863 cxgb4_clip_get(ep
->com
.dev
->rdev
.lldi
.ports
[0],
864 (const u32
*)&la6
->sin6_addr
.s6_addr
, 1);
866 t4_set_arp_err_handler(skb
, ep
, act_open_req_arp_failure
);
868 if (ep
->com
.remote_addr
.ss_family
== AF_INET
) {
869 switch (CHELSIO_CHIP_VERSION(adapter_type
)) {
871 req
= (struct cpl_act_open_req
*)skb_put(skb
, wrlen
);
875 t5req
= (struct cpl_t5_act_open_req
*)skb_put(skb
,
877 INIT_TP_WR(t5req
, 0);
878 req
= (struct cpl_act_open_req
*)t5req
;
881 t6req
= (struct cpl_t6_act_open_req
*)skb_put(skb
,
883 INIT_TP_WR(t6req
, 0);
884 req
= (struct cpl_act_open_req
*)t6req
;
885 t5req
= (struct cpl_t5_act_open_req
*)t6req
;
888 pr_err("T%d Chip is not supported\n",
889 CHELSIO_CHIP_VERSION(adapter_type
));
894 OPCODE_TID(req
) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ
,
895 ((ep
->rss_qid
<<14) | ep
->atid
)));
896 req
->local_port
= la
->sin_port
;
897 req
->peer_port
= ra
->sin_port
;
898 req
->local_ip
= la
->sin_addr
.s_addr
;
899 req
->peer_ip
= ra
->sin_addr
.s_addr
;
900 req
->opt0
= cpu_to_be64(opt0
);
902 if (is_t4(ep
->com
.dev
->rdev
.lldi
.adapter_type
)) {
903 req
->params
= cpu_to_be32(cxgb4_select_ntuple(
904 ep
->com
.dev
->rdev
.lldi
.ports
[0],
906 req
->opt2
= cpu_to_be32(opt2
);
908 t5req
->params
= cpu_to_be64(FILTER_TUPLE_V(
910 ep
->com
.dev
->rdev
.lldi
.ports
[0],
912 t5req
->rsvd
= cpu_to_be32(isn
);
913 PDBG("%s snd_isn %u\n", __func__
, t5req
->rsvd
);
914 t5req
->opt2
= cpu_to_be32(opt2
);
917 switch (CHELSIO_CHIP_VERSION(adapter_type
)) {
919 req6
= (struct cpl_act_open_req6
*)skb_put(skb
, wrlen
);
923 t5req6
= (struct cpl_t5_act_open_req6
*)skb_put(skb
,
925 INIT_TP_WR(t5req6
, 0);
926 req6
= (struct cpl_act_open_req6
*)t5req6
;
929 t6req6
= (struct cpl_t6_act_open_req6
*)skb_put(skb
,
931 INIT_TP_WR(t6req6
, 0);
932 req6
= (struct cpl_act_open_req6
*)t6req6
;
933 t5req6
= (struct cpl_t5_act_open_req6
*)t6req6
;
936 pr_err("T%d Chip is not supported\n",
937 CHELSIO_CHIP_VERSION(adapter_type
));
942 OPCODE_TID(req6
) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6
,
943 ((ep
->rss_qid
<<14)|ep
->atid
)));
944 req6
->local_port
= la6
->sin6_port
;
945 req6
->peer_port
= ra6
->sin6_port
;
946 req6
->local_ip_hi
= *((__be64
*)(la6
->sin6_addr
.s6_addr
));
947 req6
->local_ip_lo
= *((__be64
*)(la6
->sin6_addr
.s6_addr
+ 8));
948 req6
->peer_ip_hi
= *((__be64
*)(ra6
->sin6_addr
.s6_addr
));
949 req6
->peer_ip_lo
= *((__be64
*)(ra6
->sin6_addr
.s6_addr
+ 8));
950 req6
->opt0
= cpu_to_be64(opt0
);
952 if (is_t4(ep
->com
.dev
->rdev
.lldi
.adapter_type
)) {
953 req6
->params
= cpu_to_be32(cxgb4_select_ntuple(
954 ep
->com
.dev
->rdev
.lldi
.ports
[0],
956 req6
->opt2
= cpu_to_be32(opt2
);
958 t5req6
->params
= cpu_to_be64(FILTER_TUPLE_V(
960 ep
->com
.dev
->rdev
.lldi
.ports
[0],
962 t5req6
->rsvd
= cpu_to_be32(isn
);
963 PDBG("%s snd_isn %u\n", __func__
, t5req6
->rsvd
);
964 t5req6
->opt2
= cpu_to_be32(opt2
);
968 set_bit(ACT_OPEN_REQ
, &ep
->com
.history
);
969 ret
= c4iw_l2t_send(&ep
->com
.dev
->rdev
, skb
, ep
->l2t
);
971 if (ret
&& ep
->com
.remote_addr
.ss_family
== AF_INET6
)
972 cxgb4_clip_release(ep
->com
.dev
->rdev
.lldi
.ports
[0],
973 (const u32
*)&la6
->sin6_addr
.s6_addr
, 1);
977 static int send_mpa_req(struct c4iw_ep
*ep
, struct sk_buff
*skb
,
980 int mpalen
, wrlen
, ret
;
981 struct fw_ofld_tx_data_wr
*req
;
982 struct mpa_message
*mpa
;
983 struct mpa_v2_conn_params mpa_v2_params
;
985 PDBG("%s ep %p tid %u pd_len %d\n", __func__
, ep
, ep
->hwtid
, ep
->plen
);
987 BUG_ON(skb_cloned(skb
));
989 mpalen
= sizeof(*mpa
) + ep
->plen
;
990 if (mpa_rev_to_use
== 2)
991 mpalen
+= sizeof(struct mpa_v2_conn_params
);
992 wrlen
= roundup(mpalen
+ sizeof *req
, 16);
993 skb
= get_skb(skb
, wrlen
, GFP_KERNEL
);
995 connect_reply_upcall(ep
, -ENOMEM
);
998 set_wr_txq(skb
, CPL_PRIORITY_DATA
, ep
->txq_idx
);
1000 req
= (struct fw_ofld_tx_data_wr
*)skb_put(skb
, wrlen
);
1001 memset(req
, 0, wrlen
);
1002 req
->op_to_immdlen
= cpu_to_be32(
1003 FW_WR_OP_V(FW_OFLD_TX_DATA_WR
) |
1005 FW_WR_IMMDLEN_V(mpalen
));
1006 req
->flowid_len16
= cpu_to_be32(
1007 FW_WR_FLOWID_V(ep
->hwtid
) |
1008 FW_WR_LEN16_V(wrlen
>> 4));
1009 req
->plen
= cpu_to_be32(mpalen
);
1010 req
->tunnel_to_proxy
= cpu_to_be32(
1011 FW_OFLD_TX_DATA_WR_FLUSH_F
|
1012 FW_OFLD_TX_DATA_WR_SHOVE_F
);
1014 mpa
= (struct mpa_message
*)(req
+ 1);
1015 memcpy(mpa
->key
, MPA_KEY_REQ
, sizeof(mpa
->key
));
1019 mpa
->flags
|= MPA_CRC
;
1020 if (markers_enabled
) {
1021 mpa
->flags
|= MPA_MARKERS
;
1022 ep
->mpa_attr
.recv_marker_enabled
= 1;
1024 ep
->mpa_attr
.recv_marker_enabled
= 0;
1026 if (mpa_rev_to_use
== 2)
1027 mpa
->flags
|= MPA_ENHANCED_RDMA_CONN
;
1029 mpa
->private_data_size
= htons(ep
->plen
);
1030 mpa
->revision
= mpa_rev_to_use
;
1031 if (mpa_rev_to_use
== 1) {
1032 ep
->tried_with_mpa_v1
= 1;
1033 ep
->retry_with_mpa_v1
= 0;
1036 if (mpa_rev_to_use
== 2) {
1037 mpa
->private_data_size
= htons(ntohs(mpa
->private_data_size
) +
1038 sizeof (struct mpa_v2_conn_params
));
1039 PDBG("%s initiator ird %u ord %u\n", __func__
, ep
->ird
,
1041 mpa_v2_params
.ird
= htons((u16
)ep
->ird
);
1042 mpa_v2_params
.ord
= htons((u16
)ep
->ord
);
1045 mpa_v2_params
.ird
|= htons(MPA_V2_PEER2PEER_MODEL
);
1046 if (p2p_type
== FW_RI_INIT_P2PTYPE_RDMA_WRITE
)
1047 mpa_v2_params
.ord
|=
1048 htons(MPA_V2_RDMA_WRITE_RTR
);
1049 else if (p2p_type
== FW_RI_INIT_P2PTYPE_READ_REQ
)
1050 mpa_v2_params
.ord
|=
1051 htons(MPA_V2_RDMA_READ_RTR
);
1053 memcpy(mpa
->private_data
, &mpa_v2_params
,
1054 sizeof(struct mpa_v2_conn_params
));
1057 memcpy(mpa
->private_data
+
1058 sizeof(struct mpa_v2_conn_params
),
1059 ep
->mpa_pkt
+ sizeof(*mpa
), ep
->plen
);
1062 memcpy(mpa
->private_data
,
1063 ep
->mpa_pkt
+ sizeof(*mpa
), ep
->plen
);
1066 * Reference the mpa skb. This ensures the data area
1067 * will remain in memory until the hw acks the tx.
1068 * Function fw4_ack() will deref it.
1071 t4_set_arp_err_handler(skb
, NULL
, arp_failure_discard
);
1072 BUG_ON(ep
->mpa_skb
);
1074 ret
= c4iw_l2t_send(&ep
->com
.dev
->rdev
, skb
, ep
->l2t
);
1078 __state_set(&ep
->com
, MPA_REQ_SENT
);
1079 ep
->mpa_attr
.initiator
= 1;
1080 ep
->snd_seq
+= mpalen
;
1084 static int send_mpa_reject(struct c4iw_ep
*ep
, const void *pdata
, u8 plen
)
1087 struct fw_ofld_tx_data_wr
*req
;
1088 struct mpa_message
*mpa
;
1089 struct sk_buff
*skb
;
1090 struct mpa_v2_conn_params mpa_v2_params
;
1092 PDBG("%s ep %p tid %u pd_len %d\n", __func__
, ep
, ep
->hwtid
, ep
->plen
);
1094 mpalen
= sizeof(*mpa
) + plen
;
1095 if (ep
->mpa_attr
.version
== 2 && ep
->mpa_attr
.enhanced_rdma_conn
)
1096 mpalen
+= sizeof(struct mpa_v2_conn_params
);
1097 wrlen
= roundup(mpalen
+ sizeof *req
, 16);
1099 skb
= get_skb(NULL
, wrlen
, GFP_KERNEL
);
1101 printk(KERN_ERR MOD
"%s - cannot alloc skb!\n", __func__
);
1104 set_wr_txq(skb
, CPL_PRIORITY_DATA
, ep
->txq_idx
);
1106 req
= (struct fw_ofld_tx_data_wr
*)skb_put(skb
, wrlen
);
1107 memset(req
, 0, wrlen
);
1108 req
->op_to_immdlen
= cpu_to_be32(
1109 FW_WR_OP_V(FW_OFLD_TX_DATA_WR
) |
1111 FW_WR_IMMDLEN_V(mpalen
));
1112 req
->flowid_len16
= cpu_to_be32(
1113 FW_WR_FLOWID_V(ep
->hwtid
) |
1114 FW_WR_LEN16_V(wrlen
>> 4));
1115 req
->plen
= cpu_to_be32(mpalen
);
1116 req
->tunnel_to_proxy
= cpu_to_be32(
1117 FW_OFLD_TX_DATA_WR_FLUSH_F
|
1118 FW_OFLD_TX_DATA_WR_SHOVE_F
);
1120 mpa
= (struct mpa_message
*)(req
+ 1);
1121 memset(mpa
, 0, sizeof(*mpa
));
1122 memcpy(mpa
->key
, MPA_KEY_REP
, sizeof(mpa
->key
));
1123 mpa
->flags
= MPA_REJECT
;
1124 mpa
->revision
= ep
->mpa_attr
.version
;
1125 mpa
->private_data_size
= htons(plen
);
1127 if (ep
->mpa_attr
.version
== 2 && ep
->mpa_attr
.enhanced_rdma_conn
) {
1128 mpa
->flags
|= MPA_ENHANCED_RDMA_CONN
;
1129 mpa
->private_data_size
= htons(ntohs(mpa
->private_data_size
) +
1130 sizeof (struct mpa_v2_conn_params
));
1131 mpa_v2_params
.ird
= htons(((u16
)ep
->ird
) |
1132 (peer2peer
? MPA_V2_PEER2PEER_MODEL
:
1134 mpa_v2_params
.ord
= htons(((u16
)ep
->ord
) | (peer2peer
?
1136 FW_RI_INIT_P2PTYPE_RDMA_WRITE
?
1137 MPA_V2_RDMA_WRITE_RTR
: p2p_type
==
1138 FW_RI_INIT_P2PTYPE_READ_REQ
?
1139 MPA_V2_RDMA_READ_RTR
: 0) : 0));
1140 memcpy(mpa
->private_data
, &mpa_v2_params
,
1141 sizeof(struct mpa_v2_conn_params
));
1144 memcpy(mpa
->private_data
+
1145 sizeof(struct mpa_v2_conn_params
), pdata
, plen
);
1148 memcpy(mpa
->private_data
, pdata
, plen
);
1151 * Reference the mpa skb again. This ensures the data area
1152 * will remain in memory until the hw acks the tx.
1153 * Function fw4_ack() will deref it.
1156 set_wr_txq(skb
, CPL_PRIORITY_DATA
, ep
->txq_idx
);
1157 t4_set_arp_err_handler(skb
, NULL
, mpa_start_arp_failure
);
1158 BUG_ON(ep
->mpa_skb
);
1160 ep
->snd_seq
+= mpalen
;
1161 return c4iw_l2t_send(&ep
->com
.dev
->rdev
, skb
, ep
->l2t
);
1164 static int send_mpa_reply(struct c4iw_ep
*ep
, const void *pdata
, u8 plen
)
1167 struct fw_ofld_tx_data_wr
*req
;
1168 struct mpa_message
*mpa
;
1169 struct sk_buff
*skb
;
1170 struct mpa_v2_conn_params mpa_v2_params
;
1172 PDBG("%s ep %p tid %u pd_len %d\n", __func__
, ep
, ep
->hwtid
, ep
->plen
);
1174 mpalen
= sizeof(*mpa
) + plen
;
1175 if (ep
->mpa_attr
.version
== 2 && ep
->mpa_attr
.enhanced_rdma_conn
)
1176 mpalen
+= sizeof(struct mpa_v2_conn_params
);
1177 wrlen
= roundup(mpalen
+ sizeof *req
, 16);
1179 skb
= get_skb(NULL
, wrlen
, GFP_KERNEL
);
1181 printk(KERN_ERR MOD
"%s - cannot alloc skb!\n", __func__
);
1184 set_wr_txq(skb
, CPL_PRIORITY_DATA
, ep
->txq_idx
);
1186 req
= (struct fw_ofld_tx_data_wr
*) skb_put(skb
, wrlen
);
1187 memset(req
, 0, wrlen
);
1188 req
->op_to_immdlen
= cpu_to_be32(
1189 FW_WR_OP_V(FW_OFLD_TX_DATA_WR
) |
1191 FW_WR_IMMDLEN_V(mpalen
));
1192 req
->flowid_len16
= cpu_to_be32(
1193 FW_WR_FLOWID_V(ep
->hwtid
) |
1194 FW_WR_LEN16_V(wrlen
>> 4));
1195 req
->plen
= cpu_to_be32(mpalen
);
1196 req
->tunnel_to_proxy
= cpu_to_be32(
1197 FW_OFLD_TX_DATA_WR_FLUSH_F
|
1198 FW_OFLD_TX_DATA_WR_SHOVE_F
);
1200 mpa
= (struct mpa_message
*)(req
+ 1);
1201 memset(mpa
, 0, sizeof(*mpa
));
1202 memcpy(mpa
->key
, MPA_KEY_REP
, sizeof(mpa
->key
));
1204 if (ep
->mpa_attr
.crc_enabled
)
1205 mpa
->flags
|= MPA_CRC
;
1206 if (ep
->mpa_attr
.recv_marker_enabled
)
1207 mpa
->flags
|= MPA_MARKERS
;
1208 mpa
->revision
= ep
->mpa_attr
.version
;
1209 mpa
->private_data_size
= htons(plen
);
1211 if (ep
->mpa_attr
.version
== 2 && ep
->mpa_attr
.enhanced_rdma_conn
) {
1212 mpa
->flags
|= MPA_ENHANCED_RDMA_CONN
;
1213 mpa
->private_data_size
= htons(ntohs(mpa
->private_data_size
) +
1214 sizeof (struct mpa_v2_conn_params
));
1215 mpa_v2_params
.ird
= htons((u16
)ep
->ird
);
1216 mpa_v2_params
.ord
= htons((u16
)ep
->ord
);
1217 if (peer2peer
&& (ep
->mpa_attr
.p2p_type
!=
1218 FW_RI_INIT_P2PTYPE_DISABLED
)) {
1219 mpa_v2_params
.ird
|= htons(MPA_V2_PEER2PEER_MODEL
);
1221 if (p2p_type
== FW_RI_INIT_P2PTYPE_RDMA_WRITE
)
1222 mpa_v2_params
.ord
|=
1223 htons(MPA_V2_RDMA_WRITE_RTR
);
1224 else if (p2p_type
== FW_RI_INIT_P2PTYPE_READ_REQ
)
1225 mpa_v2_params
.ord
|=
1226 htons(MPA_V2_RDMA_READ_RTR
);
1229 memcpy(mpa
->private_data
, &mpa_v2_params
,
1230 sizeof(struct mpa_v2_conn_params
));
1233 memcpy(mpa
->private_data
+
1234 sizeof(struct mpa_v2_conn_params
), pdata
, plen
);
1237 memcpy(mpa
->private_data
, pdata
, plen
);
1240 * Reference the mpa skb. This ensures the data area
1241 * will remain in memory until the hw acks the tx.
1242 * Function fw4_ack() will deref it.
1245 t4_set_arp_err_handler(skb
, NULL
, mpa_start_arp_failure
);
1247 __state_set(&ep
->com
, MPA_REP_SENT
);
1248 ep
->snd_seq
+= mpalen
;
1249 return c4iw_l2t_send(&ep
->com
.dev
->rdev
, skb
, ep
->l2t
);
1252 static int act_establish(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
1255 struct cpl_act_establish
*req
= cplhdr(skb
);
1256 unsigned int tid
= GET_TID(req
);
1257 unsigned int atid
= TID_TID_G(ntohl(req
->tos_atid
));
1258 struct tid_info
*t
= dev
->rdev
.lldi
.tids
;
1261 ep
= lookup_atid(t
, atid
);
1263 PDBG("%s ep %p tid %u snd_isn %u rcv_isn %u\n", __func__
, ep
, tid
,
1264 be32_to_cpu(req
->snd_isn
), be32_to_cpu(req
->rcv_isn
));
1266 mutex_lock(&ep
->com
.mutex
);
1267 dst_confirm(ep
->dst
);
1269 /* setup the hwtid for this connection */
1271 cxgb4_insert_tid(t
, ep
, tid
);
1274 ep
->snd_seq
= be32_to_cpu(req
->snd_isn
);
1275 ep
->rcv_seq
= be32_to_cpu(req
->rcv_isn
);
1277 set_emss(ep
, ntohs(req
->tcp_opt
));
1279 /* dealloc the atid */
1280 remove_handle(ep
->com
.dev
, &ep
->com
.dev
->atid_idr
, atid
);
1281 cxgb4_free_atid(t
, atid
);
1282 set_bit(ACT_ESTAB
, &ep
->com
.history
);
1284 /* start MPA negotiation */
1285 ret
= send_flowc(ep
);
1288 if (ep
->retry_with_mpa_v1
)
1289 ret
= send_mpa_req(ep
, skb
, 1);
1291 ret
= send_mpa_req(ep
, skb
, mpa_rev
);
1294 mutex_unlock(&ep
->com
.mutex
);
1297 mutex_unlock(&ep
->com
.mutex
);
1298 connect_reply_upcall(ep
, -ENOMEM
);
1299 c4iw_ep_disconnect(ep
, 0, GFP_KERNEL
);
1303 static void close_complete_upcall(struct c4iw_ep
*ep
, int status
)
1305 struct iw_cm_event event
;
1307 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
1308 memset(&event
, 0, sizeof(event
));
1309 event
.event
= IW_CM_EVENT_CLOSE
;
1310 event
.status
= status
;
1311 if (ep
->com
.cm_id
) {
1312 PDBG("close complete delivered ep %p cm_id %p tid %u\n",
1313 ep
, ep
->com
.cm_id
, ep
->hwtid
);
1314 ep
->com
.cm_id
->event_handler(ep
->com
.cm_id
, &event
);
1315 deref_cm_id(&ep
->com
);
1316 set_bit(CLOSE_UPCALL
, &ep
->com
.history
);
1320 static void peer_close_upcall(struct c4iw_ep
*ep
)
1322 struct iw_cm_event event
;
1324 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
1325 memset(&event
, 0, sizeof(event
));
1326 event
.event
= IW_CM_EVENT_DISCONNECT
;
1327 if (ep
->com
.cm_id
) {
1328 PDBG("peer close delivered ep %p cm_id %p tid %u\n",
1329 ep
, ep
->com
.cm_id
, ep
->hwtid
);
1330 ep
->com
.cm_id
->event_handler(ep
->com
.cm_id
, &event
);
1331 set_bit(DISCONN_UPCALL
, &ep
->com
.history
);
1335 static void peer_abort_upcall(struct c4iw_ep
*ep
)
1337 struct iw_cm_event event
;
1339 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
1340 memset(&event
, 0, sizeof(event
));
1341 event
.event
= IW_CM_EVENT_CLOSE
;
1342 event
.status
= -ECONNRESET
;
1343 if (ep
->com
.cm_id
) {
1344 PDBG("abort delivered ep %p cm_id %p tid %u\n", ep
,
1345 ep
->com
.cm_id
, ep
->hwtid
);
1346 ep
->com
.cm_id
->event_handler(ep
->com
.cm_id
, &event
);
1347 deref_cm_id(&ep
->com
);
1348 set_bit(ABORT_UPCALL
, &ep
->com
.history
);
1352 static void connect_reply_upcall(struct c4iw_ep
*ep
, int status
)
1354 struct iw_cm_event event
;
1356 PDBG("%s ep %p tid %u status %d\n", __func__
, ep
, ep
->hwtid
, status
);
1357 memset(&event
, 0, sizeof(event
));
1358 event
.event
= IW_CM_EVENT_CONNECT_REPLY
;
1359 event
.status
= status
;
1360 memcpy(&event
.local_addr
, &ep
->com
.local_addr
,
1361 sizeof(ep
->com
.local_addr
));
1362 memcpy(&event
.remote_addr
, &ep
->com
.remote_addr
,
1363 sizeof(ep
->com
.remote_addr
));
1365 if ((status
== 0) || (status
== -ECONNREFUSED
)) {
1366 if (!ep
->tried_with_mpa_v1
) {
1367 /* this means MPA_v2 is used */
1368 event
.ord
= ep
->ird
;
1369 event
.ird
= ep
->ord
;
1370 event
.private_data_len
= ep
->plen
-
1371 sizeof(struct mpa_v2_conn_params
);
1372 event
.private_data
= ep
->mpa_pkt
+
1373 sizeof(struct mpa_message
) +
1374 sizeof(struct mpa_v2_conn_params
);
1376 /* this means MPA_v1 is used */
1377 event
.ord
= cur_max_read_depth(ep
->com
.dev
);
1378 event
.ird
= cur_max_read_depth(ep
->com
.dev
);
1379 event
.private_data_len
= ep
->plen
;
1380 event
.private_data
= ep
->mpa_pkt
+
1381 sizeof(struct mpa_message
);
1385 PDBG("%s ep %p tid %u status %d\n", __func__
, ep
,
1387 set_bit(CONN_RPL_UPCALL
, &ep
->com
.history
);
1388 ep
->com
.cm_id
->event_handler(ep
->com
.cm_id
, &event
);
1391 deref_cm_id(&ep
->com
);
1394 static int connect_request_upcall(struct c4iw_ep
*ep
)
1396 struct iw_cm_event event
;
1399 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
1400 memset(&event
, 0, sizeof(event
));
1401 event
.event
= IW_CM_EVENT_CONNECT_REQUEST
;
1402 memcpy(&event
.local_addr
, &ep
->com
.local_addr
,
1403 sizeof(ep
->com
.local_addr
));
1404 memcpy(&event
.remote_addr
, &ep
->com
.remote_addr
,
1405 sizeof(ep
->com
.remote_addr
));
1406 event
.provider_data
= ep
;
1407 if (!ep
->tried_with_mpa_v1
) {
1408 /* this means MPA_v2 is used */
1409 event
.ord
= ep
->ord
;
1410 event
.ird
= ep
->ird
;
1411 event
.private_data_len
= ep
->plen
-
1412 sizeof(struct mpa_v2_conn_params
);
1413 event
.private_data
= ep
->mpa_pkt
+ sizeof(struct mpa_message
) +
1414 sizeof(struct mpa_v2_conn_params
);
1416 /* this means MPA_v1 is used. Send max supported */
1417 event
.ord
= cur_max_read_depth(ep
->com
.dev
);
1418 event
.ird
= cur_max_read_depth(ep
->com
.dev
);
1419 event
.private_data_len
= ep
->plen
;
1420 event
.private_data
= ep
->mpa_pkt
+ sizeof(struct mpa_message
);
1422 c4iw_get_ep(&ep
->com
);
1423 ret
= ep
->parent_ep
->com
.cm_id
->event_handler(ep
->parent_ep
->com
.cm_id
,
1426 c4iw_put_ep(&ep
->com
);
1427 set_bit(CONNREQ_UPCALL
, &ep
->com
.history
);
1428 c4iw_put_ep(&ep
->parent_ep
->com
);
1432 static void established_upcall(struct c4iw_ep
*ep
)
1434 struct iw_cm_event event
;
1436 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
1437 memset(&event
, 0, sizeof(event
));
1438 event
.event
= IW_CM_EVENT_ESTABLISHED
;
1439 event
.ird
= ep
->ord
;
1440 event
.ord
= ep
->ird
;
1441 if (ep
->com
.cm_id
) {
1442 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
1443 ep
->com
.cm_id
->event_handler(ep
->com
.cm_id
, &event
);
1444 set_bit(ESTAB_UPCALL
, &ep
->com
.history
);
1448 static int update_rx_credits(struct c4iw_ep
*ep
, u32 credits
)
1450 struct cpl_rx_data_ack
*req
;
1451 struct sk_buff
*skb
;
1452 int wrlen
= roundup(sizeof *req
, 16);
1454 PDBG("%s ep %p tid %u credits %u\n", __func__
, ep
, ep
->hwtid
, credits
);
1455 skb
= get_skb(NULL
, wrlen
, GFP_KERNEL
);
1457 printk(KERN_ERR MOD
"update_rx_credits - cannot alloc skb!\n");
1462 * If we couldn't specify the entire rcv window at connection setup
1463 * due to the limit in the number of bits in the RCV_BUFSIZ field,
1464 * then add the overage in to the credits returned.
1466 if (ep
->rcv_win
> RCV_BUFSIZ_M
* 1024)
1467 credits
+= ep
->rcv_win
- RCV_BUFSIZ_M
* 1024;
1469 req
= (struct cpl_rx_data_ack
*) skb_put(skb
, wrlen
);
1470 memset(req
, 0, wrlen
);
1471 INIT_TP_WR(req
, ep
->hwtid
);
1472 OPCODE_TID(req
) = cpu_to_be32(MK_OPCODE_TID(CPL_RX_DATA_ACK
,
1474 req
->credit_dack
= cpu_to_be32(credits
| RX_FORCE_ACK_F
|
1476 RX_DACK_MODE_V(dack_mode
));
1477 set_wr_txq(skb
, CPL_PRIORITY_ACK
, ep
->ctrlq_idx
);
1478 c4iw_ofld_send(&ep
->com
.dev
->rdev
, skb
);
1482 #define RELAXED_IRD_NEGOTIATION 1
1485 * process_mpa_reply - process streaming mode MPA reply
1489 * 0 upon success indicating a connect request was delivered to the ULP
1490 * or the mpa request is incomplete but valid so far.
1492 * 1 if a failure requires the caller to close the connection.
1494 * 2 if a failure requires the caller to abort the connection.
1496 static int process_mpa_reply(struct c4iw_ep
*ep
, struct sk_buff
*skb
)
1498 struct mpa_message
*mpa
;
1499 struct mpa_v2_conn_params
*mpa_v2_params
;
1501 u16 resp_ird
, resp_ord
;
1502 u8 rtr_mismatch
= 0, insuff_ird
= 0;
1503 struct c4iw_qp_attributes attrs
;
1504 enum c4iw_qp_attr_mask mask
;
1508 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
1511 * If we get more than the supported amount of private data
1512 * then we must fail this connection.
1514 if (ep
->mpa_pkt_len
+ skb
->len
> sizeof(ep
->mpa_pkt
)) {
1516 goto err_stop_timer
;
1520 * copy the new data into our accumulation buffer.
1522 skb_copy_from_linear_data(skb
, &(ep
->mpa_pkt
[ep
->mpa_pkt_len
]),
1524 ep
->mpa_pkt_len
+= skb
->len
;
1527 * if we don't even have the mpa message, then bail.
1529 if (ep
->mpa_pkt_len
< sizeof(*mpa
))
1531 mpa
= (struct mpa_message
*) ep
->mpa_pkt
;
1533 /* Validate MPA header. */
1534 if (mpa
->revision
> mpa_rev
) {
1535 printk(KERN_ERR MOD
"%s MPA version mismatch. Local = %d,"
1536 " Received = %d\n", __func__
, mpa_rev
, mpa
->revision
);
1538 goto err_stop_timer
;
1540 if (memcmp(mpa
->key
, MPA_KEY_REP
, sizeof(mpa
->key
))) {
1542 goto err_stop_timer
;
1545 plen
= ntohs(mpa
->private_data_size
);
1548 * Fail if there's too much private data.
1550 if (plen
> MPA_MAX_PRIVATE_DATA
) {
1552 goto err_stop_timer
;
1556 * If plen does not account for pkt size
1558 if (ep
->mpa_pkt_len
> (sizeof(*mpa
) + plen
)) {
1560 goto err_stop_timer
;
1563 ep
->plen
= (u8
) plen
;
1566 * If we don't have all the pdata yet, then bail.
1567 * We'll continue process when more data arrives.
1569 if (ep
->mpa_pkt_len
< (sizeof(*mpa
) + plen
))
1572 if (mpa
->flags
& MPA_REJECT
) {
1573 err
= -ECONNREFUSED
;
1574 goto err_stop_timer
;
1578 * Stop mpa timer. If it expired, then
1579 * we ignore the MPA reply. process_timeout()
1580 * will abort the connection.
1582 if (stop_ep_timer(ep
))
1586 * If we get here we have accumulated the entire mpa
1587 * start reply message including private data. And
1588 * the MPA header is valid.
1590 __state_set(&ep
->com
, FPDU_MODE
);
1591 ep
->mpa_attr
.crc_enabled
= (mpa
->flags
& MPA_CRC
) | crc_enabled
? 1 : 0;
1592 ep
->mpa_attr
.xmit_marker_enabled
= mpa
->flags
& MPA_MARKERS
? 1 : 0;
1593 ep
->mpa_attr
.version
= mpa
->revision
;
1594 ep
->mpa_attr
.p2p_type
= FW_RI_INIT_P2PTYPE_DISABLED
;
1596 if (mpa
->revision
== 2) {
1597 ep
->mpa_attr
.enhanced_rdma_conn
=
1598 mpa
->flags
& MPA_ENHANCED_RDMA_CONN
? 1 : 0;
1599 if (ep
->mpa_attr
.enhanced_rdma_conn
) {
1600 mpa_v2_params
= (struct mpa_v2_conn_params
*)
1601 (ep
->mpa_pkt
+ sizeof(*mpa
));
1602 resp_ird
= ntohs(mpa_v2_params
->ird
) &
1603 MPA_V2_IRD_ORD_MASK
;
1604 resp_ord
= ntohs(mpa_v2_params
->ord
) &
1605 MPA_V2_IRD_ORD_MASK
;
1606 PDBG("%s responder ird %u ord %u ep ird %u ord %u\n",
1607 __func__
, resp_ird
, resp_ord
, ep
->ird
, ep
->ord
);
1610 * This is a double-check. Ideally, below checks are
1611 * not required since ird/ord stuff has been taken
1612 * care of in c4iw_accept_cr
1614 if (ep
->ird
< resp_ord
) {
1615 if (RELAXED_IRD_NEGOTIATION
&& resp_ord
<=
1616 ep
->com
.dev
->rdev
.lldi
.max_ordird_qp
)
1620 } else if (ep
->ird
> resp_ord
) {
1623 if (ep
->ord
> resp_ird
) {
1624 if (RELAXED_IRD_NEGOTIATION
)
1635 if (ntohs(mpa_v2_params
->ird
) &
1636 MPA_V2_PEER2PEER_MODEL
) {
1637 if (ntohs(mpa_v2_params
->ord
) &
1638 MPA_V2_RDMA_WRITE_RTR
)
1639 ep
->mpa_attr
.p2p_type
=
1640 FW_RI_INIT_P2PTYPE_RDMA_WRITE
;
1641 else if (ntohs(mpa_v2_params
->ord
) &
1642 MPA_V2_RDMA_READ_RTR
)
1643 ep
->mpa_attr
.p2p_type
=
1644 FW_RI_INIT_P2PTYPE_READ_REQ
;
1647 } else if (mpa
->revision
== 1)
1649 ep
->mpa_attr
.p2p_type
= p2p_type
;
1651 PDBG("%s - crc_enabled=%d, recv_marker_enabled=%d, "
1652 "xmit_marker_enabled=%d, version=%d p2p_type=%d local-p2p_type = "
1653 "%d\n", __func__
, ep
->mpa_attr
.crc_enabled
,
1654 ep
->mpa_attr
.recv_marker_enabled
,
1655 ep
->mpa_attr
.xmit_marker_enabled
, ep
->mpa_attr
.version
,
1656 ep
->mpa_attr
.p2p_type
, p2p_type
);
1659 * If responder's RTR does not match with that of initiator, assign
1660 * FW_RI_INIT_P2PTYPE_DISABLED in mpa attributes so that RTR is not
1661 * generated when moving QP to RTS state.
1662 * A TERM message will be sent after QP has moved to RTS state
1664 if ((ep
->mpa_attr
.version
== 2) && peer2peer
&&
1665 (ep
->mpa_attr
.p2p_type
!= p2p_type
)) {
1666 ep
->mpa_attr
.p2p_type
= FW_RI_INIT_P2PTYPE_DISABLED
;
1670 attrs
.mpa_attr
= ep
->mpa_attr
;
1671 attrs
.max_ird
= ep
->ird
;
1672 attrs
.max_ord
= ep
->ord
;
1673 attrs
.llp_stream_handle
= ep
;
1674 attrs
.next_state
= C4IW_QP_STATE_RTS
;
1676 mask
= C4IW_QP_ATTR_NEXT_STATE
|
1677 C4IW_QP_ATTR_LLP_STREAM_HANDLE
| C4IW_QP_ATTR_MPA_ATTR
|
1678 C4IW_QP_ATTR_MAX_IRD
| C4IW_QP_ATTR_MAX_ORD
;
1680 /* bind QP and TID with INIT_WR */
1681 err
= c4iw_modify_qp(ep
->com
.qp
->rhp
,
1682 ep
->com
.qp
, mask
, &attrs
, 1);
1687 * If responder's RTR requirement did not match with what initiator
1688 * supports, generate TERM message
1691 printk(KERN_ERR
"%s: RTR mismatch, sending TERM\n", __func__
);
1692 attrs
.layer_etype
= LAYER_MPA
| DDP_LLP
;
1693 attrs
.ecode
= MPA_NOMATCH_RTR
;
1694 attrs
.next_state
= C4IW_QP_STATE_TERMINATE
;
1695 attrs
.send_term
= 1;
1696 err
= c4iw_modify_qp(ep
->com
.qp
->rhp
, ep
->com
.qp
,
1697 C4IW_QP_ATTR_NEXT_STATE
, &attrs
, 1);
1704 * Generate TERM if initiator IRD is not sufficient for responder
1705 * provided ORD. Currently, we do the same behaviour even when
1706 * responder provided IRD is also not sufficient as regards to
1710 printk(KERN_ERR
"%s: Insufficient IRD, sending TERM\n",
1712 attrs
.layer_etype
= LAYER_MPA
| DDP_LLP
;
1713 attrs
.ecode
= MPA_INSUFF_IRD
;
1714 attrs
.next_state
= C4IW_QP_STATE_TERMINATE
;
1715 attrs
.send_term
= 1;
1716 err
= c4iw_modify_qp(ep
->com
.qp
->rhp
, ep
->com
.qp
,
1717 C4IW_QP_ATTR_NEXT_STATE
, &attrs
, 1);
1728 connect_reply_upcall(ep
, err
);
1733 * process_mpa_request - process streaming mode MPA request
1737 * 0 upon success indicating a connect request was delivered to the ULP
1738 * or the mpa request is incomplete but valid so far.
1740 * 1 if a failure requires the caller to close the connection.
1742 * 2 if a failure requires the caller to abort the connection.
1744 static int process_mpa_request(struct c4iw_ep
*ep
, struct sk_buff
*skb
)
1746 struct mpa_message
*mpa
;
1747 struct mpa_v2_conn_params
*mpa_v2_params
;
1750 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
1753 * If we get more than the supported amount of private data
1754 * then we must fail this connection.
1756 if (ep
->mpa_pkt_len
+ skb
->len
> sizeof(ep
->mpa_pkt
))
1757 goto err_stop_timer
;
1759 PDBG("%s enter (%s line %u)\n", __func__
, __FILE__
, __LINE__
);
1762 * Copy the new data into our accumulation buffer.
1764 skb_copy_from_linear_data(skb
, &(ep
->mpa_pkt
[ep
->mpa_pkt_len
]),
1766 ep
->mpa_pkt_len
+= skb
->len
;
1769 * If we don't even have the mpa message, then bail.
1770 * We'll continue process when more data arrives.
1772 if (ep
->mpa_pkt_len
< sizeof(*mpa
))
1775 PDBG("%s enter (%s line %u)\n", __func__
, __FILE__
, __LINE__
);
1776 mpa
= (struct mpa_message
*) ep
->mpa_pkt
;
1779 * Validate MPA Header.
1781 if (mpa
->revision
> mpa_rev
) {
1782 printk(KERN_ERR MOD
"%s MPA version mismatch. Local = %d,"
1783 " Received = %d\n", __func__
, mpa_rev
, mpa
->revision
);
1784 goto err_stop_timer
;
1787 if (memcmp(mpa
->key
, MPA_KEY_REQ
, sizeof(mpa
->key
)))
1788 goto err_stop_timer
;
1790 plen
= ntohs(mpa
->private_data_size
);
1793 * Fail if there's too much private data.
1795 if (plen
> MPA_MAX_PRIVATE_DATA
)
1796 goto err_stop_timer
;
1799 * If plen does not account for pkt size
1801 if (ep
->mpa_pkt_len
> (sizeof(*mpa
) + plen
))
1802 goto err_stop_timer
;
1803 ep
->plen
= (u8
) plen
;
1806 * If we don't have all the pdata yet, then bail.
1808 if (ep
->mpa_pkt_len
< (sizeof(*mpa
) + plen
))
1812 * If we get here we have accumulated the entire mpa
1813 * start reply message including private data.
1815 ep
->mpa_attr
.initiator
= 0;
1816 ep
->mpa_attr
.crc_enabled
= (mpa
->flags
& MPA_CRC
) | crc_enabled
? 1 : 0;
1817 ep
->mpa_attr
.recv_marker_enabled
= markers_enabled
;
1818 ep
->mpa_attr
.xmit_marker_enabled
= mpa
->flags
& MPA_MARKERS
? 1 : 0;
1819 ep
->mpa_attr
.version
= mpa
->revision
;
1820 if (mpa
->revision
== 1)
1821 ep
->tried_with_mpa_v1
= 1;
1822 ep
->mpa_attr
.p2p_type
= FW_RI_INIT_P2PTYPE_DISABLED
;
1824 if (mpa
->revision
== 2) {
1825 ep
->mpa_attr
.enhanced_rdma_conn
=
1826 mpa
->flags
& MPA_ENHANCED_RDMA_CONN
? 1 : 0;
1827 if (ep
->mpa_attr
.enhanced_rdma_conn
) {
1828 mpa_v2_params
= (struct mpa_v2_conn_params
*)
1829 (ep
->mpa_pkt
+ sizeof(*mpa
));
1830 ep
->ird
= ntohs(mpa_v2_params
->ird
) &
1831 MPA_V2_IRD_ORD_MASK
;
1832 ep
->ird
= min_t(u32
, ep
->ird
,
1833 cur_max_read_depth(ep
->com
.dev
));
1834 ep
->ord
= ntohs(mpa_v2_params
->ord
) &
1835 MPA_V2_IRD_ORD_MASK
;
1836 ep
->ord
= min_t(u32
, ep
->ord
,
1837 cur_max_read_depth(ep
->com
.dev
));
1838 PDBG("%s initiator ird %u ord %u\n", __func__
, ep
->ird
,
1840 if (ntohs(mpa_v2_params
->ird
) & MPA_V2_PEER2PEER_MODEL
)
1842 if (ntohs(mpa_v2_params
->ord
) &
1843 MPA_V2_RDMA_WRITE_RTR
)
1844 ep
->mpa_attr
.p2p_type
=
1845 FW_RI_INIT_P2PTYPE_RDMA_WRITE
;
1846 else if (ntohs(mpa_v2_params
->ord
) &
1847 MPA_V2_RDMA_READ_RTR
)
1848 ep
->mpa_attr
.p2p_type
=
1849 FW_RI_INIT_P2PTYPE_READ_REQ
;
1852 } else if (mpa
->revision
== 1)
1854 ep
->mpa_attr
.p2p_type
= p2p_type
;
1856 PDBG("%s - crc_enabled=%d, recv_marker_enabled=%d, "
1857 "xmit_marker_enabled=%d, version=%d p2p_type=%d\n", __func__
,
1858 ep
->mpa_attr
.crc_enabled
, ep
->mpa_attr
.recv_marker_enabled
,
1859 ep
->mpa_attr
.xmit_marker_enabled
, ep
->mpa_attr
.version
,
1860 ep
->mpa_attr
.p2p_type
);
1862 __state_set(&ep
->com
, MPA_REQ_RCVD
);
1865 mutex_lock_nested(&ep
->parent_ep
->com
.mutex
, SINGLE_DEPTH_NESTING
);
1866 if (ep
->parent_ep
->com
.state
!= DEAD
) {
1867 if (connect_request_upcall(ep
))
1868 goto err_unlock_parent
;
1870 goto err_unlock_parent
;
1872 mutex_unlock(&ep
->parent_ep
->com
.mutex
);
1876 mutex_unlock(&ep
->parent_ep
->com
.mutex
);
1879 (void)stop_ep_timer(ep
);
1884 static int rx_data(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
1887 struct cpl_rx_data
*hdr
= cplhdr(skb
);
1888 unsigned int dlen
= ntohs(hdr
->len
);
1889 unsigned int tid
= GET_TID(hdr
);
1890 __u8 status
= hdr
->status
;
1893 ep
= get_ep_from_tid(dev
, tid
);
1896 PDBG("%s ep %p tid %u dlen %u\n", __func__
, ep
, ep
->hwtid
, dlen
);
1897 skb_pull(skb
, sizeof(*hdr
));
1898 skb_trim(skb
, dlen
);
1899 mutex_lock(&ep
->com
.mutex
);
1901 /* update RX credits */
1902 update_rx_credits(ep
, dlen
);
1904 switch (ep
->com
.state
) {
1906 ep
->rcv_seq
+= dlen
;
1907 disconnect
= process_mpa_reply(ep
, skb
);
1910 ep
->rcv_seq
+= dlen
;
1911 disconnect
= process_mpa_request(ep
, skb
);
1914 struct c4iw_qp_attributes attrs
;
1915 BUG_ON(!ep
->com
.qp
);
1917 pr_err("%s Unexpected streaming data." \
1918 " qpid %u ep %p state %d tid %u status %d\n",
1919 __func__
, ep
->com
.qp
->wq
.sq
.qid
, ep
,
1920 ep
->com
.state
, ep
->hwtid
, status
);
1921 attrs
.next_state
= C4IW_QP_STATE_TERMINATE
;
1922 c4iw_modify_qp(ep
->com
.qp
->rhp
, ep
->com
.qp
,
1923 C4IW_QP_ATTR_NEXT_STATE
, &attrs
, 1);
1930 mutex_unlock(&ep
->com
.mutex
);
1932 c4iw_ep_disconnect(ep
, disconnect
== 2, GFP_KERNEL
);
1933 c4iw_put_ep(&ep
->com
);
1937 static int abort_rpl(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
1940 struct cpl_abort_rpl_rss
*rpl
= cplhdr(skb
);
1942 unsigned int tid
= GET_TID(rpl
);
1944 ep
= get_ep_from_tid(dev
, tid
);
1946 printk(KERN_WARNING MOD
"Abort rpl to freed endpoint\n");
1949 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
1950 mutex_lock(&ep
->com
.mutex
);
1951 switch (ep
->com
.state
) {
1953 c4iw_wake_up(&ep
->com
.wr_wait
, -ECONNRESET
);
1954 __state_set(&ep
->com
, DEAD
);
1958 printk(KERN_ERR
"%s ep %p state %d\n",
1959 __func__
, ep
, ep
->com
.state
);
1962 mutex_unlock(&ep
->com
.mutex
);
1965 release_ep_resources(ep
);
1966 c4iw_put_ep(&ep
->com
);
1970 static int send_fw_act_open_req(struct c4iw_ep
*ep
, unsigned int atid
)
1972 struct sk_buff
*skb
;
1973 struct fw_ofld_connection_wr
*req
;
1974 unsigned int mtu_idx
;
1976 struct sockaddr_in
*sin
;
1979 skb
= get_skb(NULL
, sizeof(*req
), GFP_KERNEL
);
1980 req
= (struct fw_ofld_connection_wr
*)__skb_put(skb
, sizeof(*req
));
1981 memset(req
, 0, sizeof(*req
));
1982 req
->op_compl
= htonl(WR_OP_V(FW_OFLD_CONNECTION_WR
));
1983 req
->len16_pkd
= htonl(FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*req
), 16)));
1984 req
->le
.filter
= cpu_to_be32(cxgb4_select_ntuple(
1985 ep
->com
.dev
->rdev
.lldi
.ports
[0],
1987 sin
= (struct sockaddr_in
*)&ep
->com
.local_addr
;
1988 req
->le
.lport
= sin
->sin_port
;
1989 req
->le
.u
.ipv4
.lip
= sin
->sin_addr
.s_addr
;
1990 sin
= (struct sockaddr_in
*)&ep
->com
.remote_addr
;
1991 req
->le
.pport
= sin
->sin_port
;
1992 req
->le
.u
.ipv4
.pip
= sin
->sin_addr
.s_addr
;
1993 req
->tcb
.t_state_to_astid
=
1994 htonl(FW_OFLD_CONNECTION_WR_T_STATE_V(TCP_SYN_SENT
) |
1995 FW_OFLD_CONNECTION_WR_ASTID_V(atid
));
1996 req
->tcb
.cplrxdataack_cplpassacceptrpl
=
1997 htons(FW_OFLD_CONNECTION_WR_CPLRXDATAACK_F
);
1998 req
->tcb
.tx_max
= (__force __be32
) jiffies
;
1999 req
->tcb
.rcv_adv
= htons(1);
2000 best_mtu(ep
->com
.dev
->rdev
.lldi
.mtus
, ep
->mtu
, &mtu_idx
,
2001 enable_tcp_timestamps
,
2002 (AF_INET
== ep
->com
.remote_addr
.ss_family
) ? 0 : 1);
2003 wscale
= compute_wscale(rcv_win
);
2006 * Specify the largest window that will fit in opt0. The
2007 * remainder will be specified in the rx_data_ack.
2009 win
= ep
->rcv_win
>> 10;
2010 if (win
> RCV_BUFSIZ_M
)
2013 req
->tcb
.opt0
= (__force __be64
) (TCAM_BYPASS_F
|
2014 (nocong
? NO_CONG_F
: 0) |
2017 WND_SCALE_V(wscale
) |
2018 MSS_IDX_V(mtu_idx
) |
2019 L2T_IDX_V(ep
->l2t
->idx
) |
2020 TX_CHAN_V(ep
->tx_chan
) |
2021 SMAC_SEL_V(ep
->smac_idx
) |
2022 DSCP_V(ep
->tos
>> 2) |
2023 ULP_MODE_V(ULP_MODE_TCPDDP
) |
2025 req
->tcb
.opt2
= (__force __be32
) (PACE_V(1) |
2026 TX_QUEUE_V(ep
->com
.dev
->rdev
.lldi
.tx_modq
[ep
->tx_chan
]) |
2028 CCTRL_ECN_V(enable_ecn
) |
2029 RSS_QUEUE_VALID_F
| RSS_QUEUE_V(ep
->rss_qid
));
2030 if (enable_tcp_timestamps
)
2031 req
->tcb
.opt2
|= (__force __be32
)TSTAMPS_EN_F
;
2032 if (enable_tcp_sack
)
2033 req
->tcb
.opt2
|= (__force __be32
)SACK_EN_F
;
2034 if (wscale
&& enable_tcp_window_scaling
)
2035 req
->tcb
.opt2
|= (__force __be32
)WND_SCALE_EN_F
;
2036 req
->tcb
.opt0
= cpu_to_be64((__force u64
)req
->tcb
.opt0
);
2037 req
->tcb
.opt2
= cpu_to_be32((__force u32
)req
->tcb
.opt2
);
2038 set_wr_txq(skb
, CPL_PRIORITY_CONTROL
, ep
->ctrlq_idx
);
2039 set_bit(ACT_OFLD_CONN
, &ep
->com
.history
);
2040 return c4iw_l2t_send(&ep
->com
.dev
->rdev
, skb
, ep
->l2t
);
2044 * Some of the error codes above implicitly indicate that there is no TID
2045 * allocated with the result of an ACT_OPEN. We use this predicate to make
2048 static inline int act_open_has_tid(int status
)
2050 return (status
!= CPL_ERR_TCAM_PARITY
&&
2051 status
!= CPL_ERR_TCAM_MISS
&&
2052 status
!= CPL_ERR_TCAM_FULL
&&
2053 status
!= CPL_ERR_CONN_EXIST_SYNRECV
&&
2054 status
!= CPL_ERR_CONN_EXIST
);
2057 /* Returns whether a CPL status conveys negative advice.
2059 static int is_neg_adv(unsigned int status
)
2061 return status
== CPL_ERR_RTX_NEG_ADVICE
||
2062 status
== CPL_ERR_PERSIST_NEG_ADVICE
||
2063 status
== CPL_ERR_KEEPALV_NEG_ADVICE
;
2066 static char *neg_adv_str(unsigned int status
)
2069 case CPL_ERR_RTX_NEG_ADVICE
:
2070 return "Retransmit timeout";
2071 case CPL_ERR_PERSIST_NEG_ADVICE
:
2072 return "Persist timeout";
2073 case CPL_ERR_KEEPALV_NEG_ADVICE
:
2074 return "Keepalive timeout";
2080 static void set_tcp_window(struct c4iw_ep
*ep
, struct port_info
*pi
)
2082 ep
->snd_win
= snd_win
;
2083 ep
->rcv_win
= rcv_win
;
2084 PDBG("%s snd_win %d rcv_win %d\n", __func__
, ep
->snd_win
, ep
->rcv_win
);
2087 #define ACT_OPEN_RETRY_COUNT 2
2089 static int import_ep(struct c4iw_ep
*ep
, int iptype
, __u8
*peer_ip
,
2090 struct dst_entry
*dst
, struct c4iw_dev
*cdev
,
2091 bool clear_mpa_v1
, enum chip_type adapter_type
, u8 tos
)
2093 struct neighbour
*n
;
2095 struct net_device
*pdev
;
2097 n
= dst_neigh_lookup(dst
, peer_ip
);
2103 if (n
->dev
->flags
& IFF_LOOPBACK
) {
2105 pdev
= ip_dev_find(&init_net
, *(__be32
*)peer_ip
);
2106 else if (IS_ENABLED(CONFIG_IPV6
))
2107 for_each_netdev(&init_net
, pdev
) {
2108 if (ipv6_chk_addr(&init_net
,
2109 (struct in6_addr
*)peer_ip
,
2120 ep
->l2t
= cxgb4_l2t_get(cdev
->rdev
.lldi
.l2t
,
2121 n
, pdev
, rt_tos2priority(tos
));
2126 ep
->mtu
= pdev
->mtu
;
2127 ep
->tx_chan
= cxgb4_port_chan(pdev
);
2128 ep
->smac_idx
= cxgb4_tp_smt_idx(adapter_type
,
2129 cxgb4_port_viid(pdev
));
2130 step
= cdev
->rdev
.lldi
.ntxq
/
2131 cdev
->rdev
.lldi
.nchan
;
2132 ep
->txq_idx
= cxgb4_port_idx(pdev
) * step
;
2133 step
= cdev
->rdev
.lldi
.nrxq
/
2134 cdev
->rdev
.lldi
.nchan
;
2135 ep
->ctrlq_idx
= cxgb4_port_idx(pdev
);
2136 ep
->rss_qid
= cdev
->rdev
.lldi
.rxq_ids
[
2137 cxgb4_port_idx(pdev
) * step
];
2138 set_tcp_window(ep
, (struct port_info
*)netdev_priv(pdev
));
2141 pdev
= get_real_dev(n
->dev
);
2142 ep
->l2t
= cxgb4_l2t_get(cdev
->rdev
.lldi
.l2t
,
2146 ep
->mtu
= dst_mtu(dst
);
2147 ep
->tx_chan
= cxgb4_port_chan(pdev
);
2148 ep
->smac_idx
= cxgb4_tp_smt_idx(adapter_type
,
2149 cxgb4_port_viid(pdev
));
2150 step
= cdev
->rdev
.lldi
.ntxq
/
2151 cdev
->rdev
.lldi
.nchan
;
2152 ep
->txq_idx
= cxgb4_port_idx(pdev
) * step
;
2153 ep
->ctrlq_idx
= cxgb4_port_idx(pdev
);
2154 step
= cdev
->rdev
.lldi
.nrxq
/
2155 cdev
->rdev
.lldi
.nchan
;
2156 ep
->rss_qid
= cdev
->rdev
.lldi
.rxq_ids
[
2157 cxgb4_port_idx(pdev
) * step
];
2158 set_tcp_window(ep
, (struct port_info
*)netdev_priv(pdev
));
2161 ep
->retry_with_mpa_v1
= 0;
2162 ep
->tried_with_mpa_v1
= 0;
2174 static int c4iw_reconnect(struct c4iw_ep
*ep
)
2178 struct sockaddr_in
*laddr
= (struct sockaddr_in
*)
2179 &ep
->com
.cm_id
->m_local_addr
;
2180 struct sockaddr_in
*raddr
= (struct sockaddr_in
*)
2181 &ep
->com
.cm_id
->m_remote_addr
;
2182 struct sockaddr_in6
*laddr6
= (struct sockaddr_in6
*)
2183 &ep
->com
.cm_id
->m_local_addr
;
2184 struct sockaddr_in6
*raddr6
= (struct sockaddr_in6
*)
2185 &ep
->com
.cm_id
->m_remote_addr
;
2189 PDBG("%s qp %p cm_id %p\n", __func__
, ep
->com
.qp
, ep
->com
.cm_id
);
2190 init_timer(&ep
->timer
);
2191 c4iw_init_wr_wait(&ep
->com
.wr_wait
);
2193 /* When MPA revision is different on nodes, the node with MPA_rev=2
2194 * tries to reconnect with MPA_rev 1 for the same EP through
2195 * c4iw_reconnect(), where the same EP is assigned with new tid for
2196 * further connection establishment. As we are using the same EP pointer
2197 * for reconnect, few skbs are used during the previous c4iw_connect(),
2198 * which leaves the EP with inadequate skbs for further
2199 * c4iw_reconnect(), Further causing an assert BUG_ON() due to empty
2200 * skb_list() during peer_abort(). Allocate skbs which is already used.
2202 size
= (CN_MAX_CON_BUF
- skb_queue_len(&ep
->com
.ep_skb_list
));
2203 if (alloc_ep_skb_list(&ep
->com
.ep_skb_list
, size
)) {
2209 * Allocate an active TID to initiate a TCP connection.
2211 ep
->atid
= cxgb4_alloc_atid(ep
->com
.dev
->rdev
.lldi
.tids
, ep
);
2212 if (ep
->atid
== -1) {
2213 pr_err("%s - cannot alloc atid.\n", __func__
);
2217 insert_handle(ep
->com
.dev
, &ep
->com
.dev
->atid_idr
, ep
, ep
->atid
);
2220 if (ep
->com
.cm_id
->m_local_addr
.ss_family
== AF_INET
) {
2221 ep
->dst
= find_route(ep
->com
.dev
, laddr
->sin_addr
.s_addr
,
2222 raddr
->sin_addr
.s_addr
, laddr
->sin_port
,
2223 raddr
->sin_port
, ep
->com
.cm_id
->tos
);
2225 ra
= (__u8
*)&raddr
->sin_addr
;
2227 ep
->dst
= find_route6(ep
->com
.dev
, laddr6
->sin6_addr
.s6_addr
,
2228 raddr6
->sin6_addr
.s6_addr
,
2229 laddr6
->sin6_port
, raddr6
->sin6_port
, 0,
2230 raddr6
->sin6_scope_id
);
2232 ra
= (__u8
*)&raddr6
->sin6_addr
;
2235 pr_err("%s - cannot find route.\n", __func__
);
2236 err
= -EHOSTUNREACH
;
2239 err
= import_ep(ep
, iptype
, ra
, ep
->dst
, ep
->com
.dev
, false,
2240 ep
->com
.dev
->rdev
.lldi
.adapter_type
,
2241 ep
->com
.cm_id
->tos
);
2243 pr_err("%s - cannot alloc l2e.\n", __func__
);
2247 PDBG("%s txq_idx %u tx_chan %u smac_idx %u rss_qid %u l2t_idx %u\n",
2248 __func__
, ep
->txq_idx
, ep
->tx_chan
, ep
->smac_idx
, ep
->rss_qid
,
2251 state_set(&ep
->com
, CONNECTING
);
2252 ep
->tos
= ep
->com
.cm_id
->tos
;
2254 /* send connect request to rnic */
2255 err
= send_connect(ep
);
2259 cxgb4_l2t_release(ep
->l2t
);
2261 dst_release(ep
->dst
);
2263 remove_handle(ep
->com
.dev
, &ep
->com
.dev
->atid_idr
, ep
->atid
);
2264 cxgb4_free_atid(ep
->com
.dev
->rdev
.lldi
.tids
, ep
->atid
);
2267 * remember to send notification to upper layer.
2268 * We are in here so the upper layer is not aware that this is
2269 * re-connect attempt and so, upper layer is still waiting for
2270 * response of 1st connect request.
2272 connect_reply_upcall(ep
, -ECONNRESET
);
2274 c4iw_put_ep(&ep
->com
);
2279 static int act_open_rpl(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
2282 struct cpl_act_open_rpl
*rpl
= cplhdr(skb
);
2283 unsigned int atid
= TID_TID_G(AOPEN_ATID_G(
2284 ntohl(rpl
->atid_status
)));
2285 struct tid_info
*t
= dev
->rdev
.lldi
.tids
;
2286 int status
= AOPEN_STATUS_G(ntohl(rpl
->atid_status
));
2287 struct sockaddr_in
*la
;
2288 struct sockaddr_in
*ra
;
2289 struct sockaddr_in6
*la6
;
2290 struct sockaddr_in6
*ra6
;
2293 ep
= lookup_atid(t
, atid
);
2294 la
= (struct sockaddr_in
*)&ep
->com
.local_addr
;
2295 ra
= (struct sockaddr_in
*)&ep
->com
.remote_addr
;
2296 la6
= (struct sockaddr_in6
*)&ep
->com
.local_addr
;
2297 ra6
= (struct sockaddr_in6
*)&ep
->com
.remote_addr
;
2299 PDBG("%s ep %p atid %u status %u errno %d\n", __func__
, ep
, atid
,
2300 status
, status2errno(status
));
2302 if (is_neg_adv(status
)) {
2303 PDBG("%s Connection problems for atid %u status %u (%s)\n",
2304 __func__
, atid
, status
, neg_adv_str(status
));
2305 ep
->stats
.connect_neg_adv
++;
2306 mutex_lock(&dev
->rdev
.stats
.lock
);
2307 dev
->rdev
.stats
.neg_adv
++;
2308 mutex_unlock(&dev
->rdev
.stats
.lock
);
2312 set_bit(ACT_OPEN_RPL
, &ep
->com
.history
);
2315 * Log interesting failures.
2318 case CPL_ERR_CONN_RESET
:
2319 case CPL_ERR_CONN_TIMEDOUT
:
2321 case CPL_ERR_TCAM_FULL
:
2322 mutex_lock(&dev
->rdev
.stats
.lock
);
2323 dev
->rdev
.stats
.tcam_full
++;
2324 mutex_unlock(&dev
->rdev
.stats
.lock
);
2325 if (ep
->com
.local_addr
.ss_family
== AF_INET
&&
2326 dev
->rdev
.lldi
.enable_fw_ofld_conn
) {
2327 ret
= send_fw_act_open_req(ep
, TID_TID_G(AOPEN_ATID_G(
2328 ntohl(rpl
->atid_status
))));
2334 case CPL_ERR_CONN_EXIST
:
2335 if (ep
->retry_count
++ < ACT_OPEN_RETRY_COUNT
) {
2336 set_bit(ACT_RETRY_INUSE
, &ep
->com
.history
);
2337 if (ep
->com
.remote_addr
.ss_family
== AF_INET6
) {
2338 struct sockaddr_in6
*sin6
=
2339 (struct sockaddr_in6
*)
2340 &ep
->com
.local_addr
;
2342 ep
->com
.dev
->rdev
.lldi
.ports
[0],
2344 &sin6
->sin6_addr
.s6_addr
, 1);
2346 remove_handle(ep
->com
.dev
, &ep
->com
.dev
->atid_idr
,
2348 cxgb4_free_atid(t
, atid
);
2349 dst_release(ep
->dst
);
2350 cxgb4_l2t_release(ep
->l2t
);
2356 if (ep
->com
.local_addr
.ss_family
== AF_INET
) {
2357 pr_info("Active open failure - atid %u status %u errno %d %pI4:%u->%pI4:%u\n",
2358 atid
, status
, status2errno(status
),
2359 &la
->sin_addr
.s_addr
, ntohs(la
->sin_port
),
2360 &ra
->sin_addr
.s_addr
, ntohs(ra
->sin_port
));
2362 pr_info("Active open failure - atid %u status %u errno %d %pI6:%u->%pI6:%u\n",
2363 atid
, status
, status2errno(status
),
2364 la6
->sin6_addr
.s6_addr
, ntohs(la6
->sin6_port
),
2365 ra6
->sin6_addr
.s6_addr
, ntohs(ra6
->sin6_port
));
2371 connect_reply_upcall(ep
, status2errno(status
));
2372 state_set(&ep
->com
, DEAD
);
2374 if (ep
->com
.remote_addr
.ss_family
== AF_INET6
) {
2375 struct sockaddr_in6
*sin6
=
2376 (struct sockaddr_in6
*)&ep
->com
.local_addr
;
2377 cxgb4_clip_release(ep
->com
.dev
->rdev
.lldi
.ports
[0],
2378 (const u32
*)&sin6
->sin6_addr
.s6_addr
, 1);
2380 if (status
&& act_open_has_tid(status
))
2381 cxgb4_remove_tid(ep
->com
.dev
->rdev
.lldi
.tids
, 0, GET_TID(rpl
));
2383 remove_handle(ep
->com
.dev
, &ep
->com
.dev
->atid_idr
, atid
);
2384 cxgb4_free_atid(t
, atid
);
2385 dst_release(ep
->dst
);
2386 cxgb4_l2t_release(ep
->l2t
);
2387 c4iw_put_ep(&ep
->com
);
2392 static int pass_open_rpl(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
2394 struct cpl_pass_open_rpl
*rpl
= cplhdr(skb
);
2395 unsigned int stid
= GET_TID(rpl
);
2396 struct c4iw_listen_ep
*ep
= get_ep_from_stid(dev
, stid
);
2399 PDBG("%s stid %d lookup failure!\n", __func__
, stid
);
2402 PDBG("%s ep %p status %d error %d\n", __func__
, ep
,
2403 rpl
->status
, status2errno(rpl
->status
));
2404 c4iw_wake_up(&ep
->com
.wr_wait
, status2errno(rpl
->status
));
2405 c4iw_put_ep(&ep
->com
);
2410 static int close_listsrv_rpl(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
2412 struct cpl_close_listsvr_rpl
*rpl
= cplhdr(skb
);
2413 unsigned int stid
= GET_TID(rpl
);
2414 struct c4iw_listen_ep
*ep
= get_ep_from_stid(dev
, stid
);
2416 PDBG("%s ep %p\n", __func__
, ep
);
2417 c4iw_wake_up(&ep
->com
.wr_wait
, status2errno(rpl
->status
));
2418 c4iw_put_ep(&ep
->com
);
2422 static int accept_cr(struct c4iw_ep
*ep
, struct sk_buff
*skb
,
2423 struct cpl_pass_accept_req
*req
)
2425 struct cpl_pass_accept_rpl
*rpl
;
2426 unsigned int mtu_idx
;
2430 struct cpl_t5_pass_accept_rpl
*rpl5
= NULL
;
2432 enum chip_type adapter_type
= ep
->com
.dev
->rdev
.lldi
.adapter_type
;
2434 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
2435 BUG_ON(skb_cloned(skb
));
2439 if (!is_t4(adapter_type
)) {
2440 skb_trim(skb
, roundup(sizeof(*rpl5
), 16));
2442 INIT_TP_WR(rpl5
, ep
->hwtid
);
2444 skb_trim(skb
, sizeof(*rpl
));
2445 INIT_TP_WR(rpl
, ep
->hwtid
);
2447 OPCODE_TID(rpl
) = cpu_to_be32(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL
,
2450 best_mtu(ep
->com
.dev
->rdev
.lldi
.mtus
, ep
->mtu
, &mtu_idx
,
2451 enable_tcp_timestamps
&& req
->tcpopt
.tstamp
,
2452 (AF_INET
== ep
->com
.remote_addr
.ss_family
) ? 0 : 1);
2453 wscale
= compute_wscale(rcv_win
);
2456 * Specify the largest window that will fit in opt0. The
2457 * remainder will be specified in the rx_data_ack.
2459 win
= ep
->rcv_win
>> 10;
2460 if (win
> RCV_BUFSIZ_M
)
2462 opt0
= (nocong
? NO_CONG_F
: 0) |
2465 WND_SCALE_V(wscale
) |
2466 MSS_IDX_V(mtu_idx
) |
2467 L2T_IDX_V(ep
->l2t
->idx
) |
2468 TX_CHAN_V(ep
->tx_chan
) |
2469 SMAC_SEL_V(ep
->smac_idx
) |
2470 DSCP_V(ep
->tos
>> 2) |
2471 ULP_MODE_V(ULP_MODE_TCPDDP
) |
2473 opt2
= RX_CHANNEL_V(0) |
2474 RSS_QUEUE_VALID_F
| RSS_QUEUE_V(ep
->rss_qid
);
2476 if (enable_tcp_timestamps
&& req
->tcpopt
.tstamp
)
2477 opt2
|= TSTAMPS_EN_F
;
2478 if (enable_tcp_sack
&& req
->tcpopt
.sack
)
2480 if (wscale
&& enable_tcp_window_scaling
)
2481 opt2
|= WND_SCALE_EN_F
;
2483 const struct tcphdr
*tcph
;
2484 u32 hlen
= ntohl(req
->hdr_len
);
2486 if (CHELSIO_CHIP_VERSION(adapter_type
) <= CHELSIO_T5
)
2487 tcph
= (const void *)(req
+ 1) + ETH_HDR_LEN_G(hlen
) +
2490 tcph
= (const void *)(req
+ 1) +
2491 T6_ETH_HDR_LEN_G(hlen
) + T6_IP_HDR_LEN_G(hlen
);
2492 if (tcph
->ece
&& tcph
->cwr
)
2493 opt2
|= CCTRL_ECN_V(1);
2495 if (CHELSIO_CHIP_VERSION(adapter_type
) > CHELSIO_T4
) {
2496 u32 isn
= (prandom_u32() & ~7UL) - 1;
2497 opt2
|= T5_OPT_2_VALID_F
;
2498 opt2
|= CONG_CNTRL_V(CONG_ALG_TAHOE
);
2501 memset(&rpl5
->iss
, 0, roundup(sizeof(*rpl5
)-sizeof(*rpl
), 16));
2504 rpl5
->iss
= cpu_to_be32(isn
);
2505 PDBG("%s iss %u\n", __func__
, be32_to_cpu(rpl5
->iss
));
2508 rpl
->opt0
= cpu_to_be64(opt0
);
2509 rpl
->opt2
= cpu_to_be32(opt2
);
2510 set_wr_txq(skb
, CPL_PRIORITY_SETUP
, ep
->ctrlq_idx
);
2511 t4_set_arp_err_handler(skb
, ep
, pass_accept_rpl_arp_failure
);
2513 return c4iw_l2t_send(&ep
->com
.dev
->rdev
, skb
, ep
->l2t
);
2516 static void reject_cr(struct c4iw_dev
*dev
, u32 hwtid
, struct sk_buff
*skb
)
2518 PDBG("%s c4iw_dev %p tid %u\n", __func__
, dev
, hwtid
);
2519 BUG_ON(skb_cloned(skb
));
2520 skb_trim(skb
, sizeof(struct cpl_tid_release
));
2521 release_tid(&dev
->rdev
, hwtid
, skb
);
2525 static void get_4tuple(struct cpl_pass_accept_req
*req
, enum chip_type type
,
2526 int *iptype
, __u8
*local_ip
, __u8
*peer_ip
,
2527 __be16
*local_port
, __be16
*peer_port
)
2529 int eth_len
= (CHELSIO_CHIP_VERSION(type
) <= CHELSIO_T5
) ?
2530 ETH_HDR_LEN_G(be32_to_cpu(req
->hdr_len
)) :
2531 T6_ETH_HDR_LEN_G(be32_to_cpu(req
->hdr_len
));
2532 int ip_len
= (CHELSIO_CHIP_VERSION(type
) <= CHELSIO_T5
) ?
2533 IP_HDR_LEN_G(be32_to_cpu(req
->hdr_len
)) :
2534 T6_IP_HDR_LEN_G(be32_to_cpu(req
->hdr_len
));
2535 struct iphdr
*ip
= (struct iphdr
*)((u8
*)(req
+ 1) + eth_len
);
2536 struct ipv6hdr
*ip6
= (struct ipv6hdr
*)((u8
*)(req
+ 1) + eth_len
);
2537 struct tcphdr
*tcp
= (struct tcphdr
*)
2538 ((u8
*)(req
+ 1) + eth_len
+ ip_len
);
2540 if (ip
->version
== 4) {
2541 PDBG("%s saddr 0x%x daddr 0x%x sport %u dport %u\n", __func__
,
2542 ntohl(ip
->saddr
), ntohl(ip
->daddr
), ntohs(tcp
->source
),
2545 memcpy(peer_ip
, &ip
->saddr
, 4);
2546 memcpy(local_ip
, &ip
->daddr
, 4);
2548 PDBG("%s saddr %pI6 daddr %pI6 sport %u dport %u\n", __func__
,
2549 ip6
->saddr
.s6_addr
, ip6
->daddr
.s6_addr
, ntohs(tcp
->source
),
2552 memcpy(peer_ip
, ip6
->saddr
.s6_addr
, 16);
2553 memcpy(local_ip
, ip6
->daddr
.s6_addr
, 16);
2555 *peer_port
= tcp
->source
;
2556 *local_port
= tcp
->dest
;
2561 static int pass_accept_req(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
2563 struct c4iw_ep
*child_ep
= NULL
, *parent_ep
;
2564 struct cpl_pass_accept_req
*req
= cplhdr(skb
);
2565 unsigned int stid
= PASS_OPEN_TID_G(ntohl(req
->tos_stid
));
2566 struct tid_info
*t
= dev
->rdev
.lldi
.tids
;
2567 unsigned int hwtid
= GET_TID(req
);
2568 struct dst_entry
*dst
;
2569 __u8 local_ip
[16], peer_ip
[16];
2570 __be16 local_port
, peer_port
;
2571 struct sockaddr_in6
*sin6
;
2573 u16 peer_mss
= ntohs(req
->tcpopt
.mss
);
2575 unsigned short hdrs
;
2576 u8 tos
= PASS_OPEN_TOS_G(ntohl(req
->tos_stid
));
2578 parent_ep
= (struct c4iw_ep
*)get_ep_from_stid(dev
, stid
);
2580 PDBG("%s connect request on invalid stid %d\n", __func__
, stid
);
2584 if (state_read(&parent_ep
->com
) != LISTEN
) {
2585 PDBG("%s - listening ep not in LISTEN\n", __func__
);
2589 get_4tuple(req
, parent_ep
->com
.dev
->rdev
.lldi
.adapter_type
, &iptype
,
2590 local_ip
, peer_ip
, &local_port
, &peer_port
);
2592 /* Find output route */
2594 PDBG("%s parent ep %p hwtid %u laddr %pI4 raddr %pI4 lport %d rport %d peer_mss %d\n"
2595 , __func__
, parent_ep
, hwtid
,
2596 local_ip
, peer_ip
, ntohs(local_port
),
2597 ntohs(peer_port
), peer_mss
);
2598 dst
= find_route(dev
, *(__be32
*)local_ip
, *(__be32
*)peer_ip
,
2599 local_port
, peer_port
,
2602 PDBG("%s parent ep %p hwtid %u laddr %pI6 raddr %pI6 lport %d rport %d peer_mss %d\n"
2603 , __func__
, parent_ep
, hwtid
,
2604 local_ip
, peer_ip
, ntohs(local_port
),
2605 ntohs(peer_port
), peer_mss
);
2606 dst
= find_route6(dev
, local_ip
, peer_ip
, local_port
, peer_port
,
2607 PASS_OPEN_TOS_G(ntohl(req
->tos_stid
)),
2608 ((struct sockaddr_in6
*)
2609 &parent_ep
->com
.local_addr
)->sin6_scope_id
);
2612 printk(KERN_ERR MOD
"%s - failed to find dst entry!\n",
2617 child_ep
= alloc_ep(sizeof(*child_ep
), GFP_KERNEL
);
2619 printk(KERN_ERR MOD
"%s - failed to allocate ep entry!\n",
2625 err
= import_ep(child_ep
, iptype
, peer_ip
, dst
, dev
, false,
2626 parent_ep
->com
.dev
->rdev
.lldi
.adapter_type
, tos
);
2628 printk(KERN_ERR MOD
"%s - failed to allocate l2t entry!\n",
2635 hdrs
= sizeof(struct iphdr
) + sizeof(struct tcphdr
) +
2636 ((enable_tcp_timestamps
&& req
->tcpopt
.tstamp
) ? 12 : 0);
2637 if (peer_mss
&& child_ep
->mtu
> (peer_mss
+ hdrs
))
2638 child_ep
->mtu
= peer_mss
+ hdrs
;
2640 skb_queue_head_init(&child_ep
->com
.ep_skb_list
);
2641 if (alloc_ep_skb_list(&child_ep
->com
.ep_skb_list
, CN_MAX_CON_BUF
))
2644 state_set(&child_ep
->com
, CONNECTING
);
2645 child_ep
->com
.dev
= dev
;
2646 child_ep
->com
.cm_id
= NULL
;
2649 struct sockaddr_in
*sin
= (struct sockaddr_in
*)
2650 &child_ep
->com
.local_addr
;
2652 sin
->sin_family
= PF_INET
;
2653 sin
->sin_port
= local_port
;
2654 sin
->sin_addr
.s_addr
= *(__be32
*)local_ip
;
2656 sin
= (struct sockaddr_in
*)&child_ep
->com
.local_addr
;
2657 sin
->sin_family
= PF_INET
;
2658 sin
->sin_port
= ((struct sockaddr_in
*)
2659 &parent_ep
->com
.local_addr
)->sin_port
;
2660 sin
->sin_addr
.s_addr
= *(__be32
*)local_ip
;
2662 sin
= (struct sockaddr_in
*)&child_ep
->com
.remote_addr
;
2663 sin
->sin_family
= PF_INET
;
2664 sin
->sin_port
= peer_port
;
2665 sin
->sin_addr
.s_addr
= *(__be32
*)peer_ip
;
2667 sin6
= (struct sockaddr_in6
*)&child_ep
->com
.local_addr
;
2668 sin6
->sin6_family
= PF_INET6
;
2669 sin6
->sin6_port
= local_port
;
2670 memcpy(sin6
->sin6_addr
.s6_addr
, local_ip
, 16);
2672 sin6
= (struct sockaddr_in6
*)&child_ep
->com
.local_addr
;
2673 sin6
->sin6_family
= PF_INET6
;
2674 sin6
->sin6_port
= ((struct sockaddr_in6
*)
2675 &parent_ep
->com
.local_addr
)->sin6_port
;
2676 memcpy(sin6
->sin6_addr
.s6_addr
, local_ip
, 16);
2678 sin6
= (struct sockaddr_in6
*)&child_ep
->com
.remote_addr
;
2679 sin6
->sin6_family
= PF_INET6
;
2680 sin6
->sin6_port
= peer_port
;
2681 memcpy(sin6
->sin6_addr
.s6_addr
, peer_ip
, 16);
2684 c4iw_get_ep(&parent_ep
->com
);
2685 child_ep
->parent_ep
= parent_ep
;
2686 child_ep
->tos
= tos
;
2687 child_ep
->dst
= dst
;
2688 child_ep
->hwtid
= hwtid
;
2690 PDBG("%s tx_chan %u smac_idx %u rss_qid %u\n", __func__
,
2691 child_ep
->tx_chan
, child_ep
->smac_idx
, child_ep
->rss_qid
);
2693 init_timer(&child_ep
->timer
);
2694 cxgb4_insert_tid(t
, child_ep
, hwtid
);
2695 insert_ep_tid(child_ep
);
2696 if (accept_cr(child_ep
, skb
, req
)) {
2697 c4iw_put_ep(&parent_ep
->com
);
2698 release_ep_resources(child_ep
);
2700 set_bit(PASS_ACCEPT_REQ
, &child_ep
->com
.history
);
2703 sin6
= (struct sockaddr_in6
*)&child_ep
->com
.local_addr
;
2704 cxgb4_clip_get(child_ep
->com
.dev
->rdev
.lldi
.ports
[0],
2705 (const u32
*)&sin6
->sin6_addr
.s6_addr
, 1);
2709 c4iw_put_ep(&child_ep
->com
);
2711 reject_cr(dev
, hwtid
, skb
);
2713 c4iw_put_ep(&parent_ep
->com
);
2718 static int pass_establish(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
2721 struct cpl_pass_establish
*req
= cplhdr(skb
);
2722 unsigned int tid
= GET_TID(req
);
2725 ep
= get_ep_from_tid(dev
, tid
);
2726 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
2727 ep
->snd_seq
= be32_to_cpu(req
->snd_isn
);
2728 ep
->rcv_seq
= be32_to_cpu(req
->rcv_isn
);
2730 PDBG("%s ep %p hwtid %u tcp_opt 0x%02x\n", __func__
, ep
, tid
,
2731 ntohs(req
->tcp_opt
));
2733 set_emss(ep
, ntohs(req
->tcp_opt
));
2735 dst_confirm(ep
->dst
);
2736 mutex_lock(&ep
->com
.mutex
);
2737 ep
->com
.state
= MPA_REQ_WAIT
;
2739 set_bit(PASS_ESTAB
, &ep
->com
.history
);
2740 ret
= send_flowc(ep
);
2741 mutex_unlock(&ep
->com
.mutex
);
2743 c4iw_ep_disconnect(ep
, 1, GFP_KERNEL
);
2744 c4iw_put_ep(&ep
->com
);
2749 static int peer_close(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
2751 struct cpl_peer_close
*hdr
= cplhdr(skb
);
2753 struct c4iw_qp_attributes attrs
;
2756 unsigned int tid
= GET_TID(hdr
);
2759 ep
= get_ep_from_tid(dev
, tid
);
2763 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
2764 dst_confirm(ep
->dst
);
2766 set_bit(PEER_CLOSE
, &ep
->com
.history
);
2767 mutex_lock(&ep
->com
.mutex
);
2768 switch (ep
->com
.state
) {
2770 __state_set(&ep
->com
, CLOSING
);
2773 __state_set(&ep
->com
, CLOSING
);
2774 connect_reply_upcall(ep
, -ECONNRESET
);
2779 * We're gonna mark this puppy DEAD, but keep
2780 * the reference on it until the ULP accepts or
2781 * rejects the CR. Also wake up anyone waiting
2782 * in rdma connection migration (see c4iw_accept_cr()).
2784 __state_set(&ep
->com
, CLOSING
);
2785 PDBG("waking up ep %p tid %u\n", ep
, ep
->hwtid
);
2786 c4iw_wake_up(&ep
->com
.wr_wait
, -ECONNRESET
);
2789 __state_set(&ep
->com
, CLOSING
);
2790 PDBG("waking up ep %p tid %u\n", ep
, ep
->hwtid
);
2791 c4iw_wake_up(&ep
->com
.wr_wait
, -ECONNRESET
);
2795 __state_set(&ep
->com
, CLOSING
);
2796 attrs
.next_state
= C4IW_QP_STATE_CLOSING
;
2797 ret
= c4iw_modify_qp(ep
->com
.qp
->rhp
, ep
->com
.qp
,
2798 C4IW_QP_ATTR_NEXT_STATE
, &attrs
, 1);
2799 if (ret
!= -ECONNRESET
) {
2800 peer_close_upcall(ep
);
2808 __state_set(&ep
->com
, MORIBUND
);
2812 (void)stop_ep_timer(ep
);
2813 if (ep
->com
.cm_id
&& ep
->com
.qp
) {
2814 attrs
.next_state
= C4IW_QP_STATE_IDLE
;
2815 c4iw_modify_qp(ep
->com
.qp
->rhp
, ep
->com
.qp
,
2816 C4IW_QP_ATTR_NEXT_STATE
, &attrs
, 1);
2818 close_complete_upcall(ep
, 0);
2819 __state_set(&ep
->com
, DEAD
);
2829 mutex_unlock(&ep
->com
.mutex
);
2831 c4iw_ep_disconnect(ep
, 0, GFP_KERNEL
);
2833 release_ep_resources(ep
);
2834 c4iw_put_ep(&ep
->com
);
2838 static int peer_abort(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
2840 struct cpl_abort_req_rss
*req
= cplhdr(skb
);
2842 struct cpl_abort_rpl
*rpl
;
2843 struct sk_buff
*rpl_skb
;
2844 struct c4iw_qp_attributes attrs
;
2847 unsigned int tid
= GET_TID(req
);
2849 ep
= get_ep_from_tid(dev
, tid
);
2853 if (is_neg_adv(req
->status
)) {
2854 PDBG("%s Negative advice on abort- tid %u status %d (%s)\n",
2855 __func__
, ep
->hwtid
, req
->status
,
2856 neg_adv_str(req
->status
));
2857 ep
->stats
.abort_neg_adv
++;
2858 mutex_lock(&dev
->rdev
.stats
.lock
);
2859 dev
->rdev
.stats
.neg_adv
++;
2860 mutex_unlock(&dev
->rdev
.stats
.lock
);
2863 PDBG("%s ep %p tid %u state %u\n", __func__
, ep
, ep
->hwtid
,
2865 set_bit(PEER_ABORT
, &ep
->com
.history
);
2868 * Wake up any threads in rdma_init() or rdma_fini().
2869 * However, this is not needed if com state is just
2872 if (ep
->com
.state
!= MPA_REQ_SENT
)
2873 c4iw_wake_up(&ep
->com
.wr_wait
, -ECONNRESET
);
2875 mutex_lock(&ep
->com
.mutex
);
2876 switch (ep
->com
.state
) {
2878 c4iw_put_ep(&ep
->parent_ep
->com
);
2881 (void)stop_ep_timer(ep
);
2884 (void)stop_ep_timer(ep
);
2885 if (mpa_rev
== 1 || (mpa_rev
== 2 && ep
->tried_with_mpa_v1
))
2886 connect_reply_upcall(ep
, -ECONNRESET
);
2889 * we just don't send notification upwards because we
2890 * want to retry with mpa_v1 without upper layers even
2893 * do some housekeeping so as to re-initiate the
2896 PDBG("%s: mpa_rev=%d. Retrying with mpav1\n", __func__
,
2898 ep
->retry_with_mpa_v1
= 1;
2910 if (ep
->com
.cm_id
&& ep
->com
.qp
) {
2911 attrs
.next_state
= C4IW_QP_STATE_ERROR
;
2912 ret
= c4iw_modify_qp(ep
->com
.qp
->rhp
,
2913 ep
->com
.qp
, C4IW_QP_ATTR_NEXT_STATE
,
2917 "%s - qp <- error failed!\n",
2920 peer_abort_upcall(ep
);
2925 PDBG("%s PEER_ABORT IN DEAD STATE!!!!\n", __func__
);
2926 mutex_unlock(&ep
->com
.mutex
);
2932 dst_confirm(ep
->dst
);
2933 if (ep
->com
.state
!= ABORTING
) {
2934 __state_set(&ep
->com
, DEAD
);
2935 /* we don't release if we want to retry with mpa_v1 */
2936 if (!ep
->retry_with_mpa_v1
)
2939 mutex_unlock(&ep
->com
.mutex
);
2941 rpl_skb
= skb_dequeue(&ep
->com
.ep_skb_list
);
2942 if (WARN_ON(!rpl_skb
)) {
2946 set_wr_txq(skb
, CPL_PRIORITY_DATA
, ep
->txq_idx
);
2947 rpl
= (struct cpl_abort_rpl
*) skb_put(rpl_skb
, sizeof(*rpl
));
2948 INIT_TP_WR(rpl
, ep
->hwtid
);
2949 OPCODE_TID(rpl
) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_RPL
, ep
->hwtid
));
2950 rpl
->cmd
= CPL_ABORT_NO_RST
;
2951 c4iw_ofld_send(&ep
->com
.dev
->rdev
, rpl_skb
);
2954 release_ep_resources(ep
);
2955 else if (ep
->retry_with_mpa_v1
) {
2956 if (ep
->com
.remote_addr
.ss_family
== AF_INET6
) {
2957 struct sockaddr_in6
*sin6
=
2958 (struct sockaddr_in6
*)
2959 &ep
->com
.local_addr
;
2961 ep
->com
.dev
->rdev
.lldi
.ports
[0],
2962 (const u32
*)&sin6
->sin6_addr
.s6_addr
,
2965 remove_handle(ep
->com
.dev
, &ep
->com
.dev
->hwtid_idr
, ep
->hwtid
);
2966 cxgb4_remove_tid(ep
->com
.dev
->rdev
.lldi
.tids
, 0, ep
->hwtid
);
2967 dst_release(ep
->dst
);
2968 cxgb4_l2t_release(ep
->l2t
);
2973 c4iw_put_ep(&ep
->com
);
2974 /* Dereferencing ep, referenced in peer_abort_intr() */
2975 c4iw_put_ep(&ep
->com
);
2979 static int close_con_rpl(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
2982 struct c4iw_qp_attributes attrs
;
2983 struct cpl_close_con_rpl
*rpl
= cplhdr(skb
);
2985 unsigned int tid
= GET_TID(rpl
);
2987 ep
= get_ep_from_tid(dev
, tid
);
2991 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
2994 /* The cm_id may be null if we failed to connect */
2995 mutex_lock(&ep
->com
.mutex
);
2996 set_bit(CLOSE_CON_RPL
, &ep
->com
.history
);
2997 switch (ep
->com
.state
) {
2999 __state_set(&ep
->com
, MORIBUND
);
3002 (void)stop_ep_timer(ep
);
3003 if ((ep
->com
.cm_id
) && (ep
->com
.qp
)) {
3004 attrs
.next_state
= C4IW_QP_STATE_IDLE
;
3005 c4iw_modify_qp(ep
->com
.qp
->rhp
,
3007 C4IW_QP_ATTR_NEXT_STATE
,
3010 close_complete_upcall(ep
, 0);
3011 __state_set(&ep
->com
, DEAD
);
3021 mutex_unlock(&ep
->com
.mutex
);
3023 release_ep_resources(ep
);
3024 c4iw_put_ep(&ep
->com
);
3028 static int terminate(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
3030 struct cpl_rdma_terminate
*rpl
= cplhdr(skb
);
3031 unsigned int tid
= GET_TID(rpl
);
3033 struct c4iw_qp_attributes attrs
;
3035 ep
= get_ep_from_tid(dev
, tid
);
3038 if (ep
&& ep
->com
.qp
) {
3039 printk(KERN_WARNING MOD
"TERM received tid %u qpid %u\n", tid
,
3040 ep
->com
.qp
->wq
.sq
.qid
);
3041 attrs
.next_state
= C4IW_QP_STATE_TERMINATE
;
3042 c4iw_modify_qp(ep
->com
.qp
->rhp
, ep
->com
.qp
,
3043 C4IW_QP_ATTR_NEXT_STATE
, &attrs
, 1);
3045 printk(KERN_WARNING MOD
"TERM received tid %u no ep/qp\n", tid
);
3046 c4iw_put_ep(&ep
->com
);
3052 * Upcall from the adapter indicating data has been transmitted.
3053 * For us its just the single MPA request or reply. We can now free
3054 * the skb holding the mpa message.
3056 static int fw4_ack(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
3059 struct cpl_fw4_ack
*hdr
= cplhdr(skb
);
3060 u8 credits
= hdr
->credits
;
3061 unsigned int tid
= GET_TID(hdr
);
3064 ep
= get_ep_from_tid(dev
, tid
);
3067 PDBG("%s ep %p tid %u credits %u\n", __func__
, ep
, ep
->hwtid
, credits
);
3069 PDBG("%s 0 credit ack ep %p tid %u state %u\n",
3070 __func__
, ep
, ep
->hwtid
, state_read(&ep
->com
));
3074 dst_confirm(ep
->dst
);
3076 PDBG("%s last streaming msg ack ep %p tid %u state %u "
3077 "initiator %u freeing skb\n", __func__
, ep
, ep
->hwtid
,
3078 state_read(&ep
->com
), ep
->mpa_attr
.initiator
? 1 : 0);
3079 mutex_lock(&ep
->com
.mutex
);
3080 kfree_skb(ep
->mpa_skb
);
3082 if (test_bit(STOP_MPA_TIMER
, &ep
->com
.flags
))
3084 mutex_unlock(&ep
->com
.mutex
);
3087 c4iw_put_ep(&ep
->com
);
3091 int c4iw_reject_cr(struct iw_cm_id
*cm_id
, const void *pdata
, u8 pdata_len
)
3094 struct c4iw_ep
*ep
= to_ep(cm_id
);
3096 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
3098 mutex_lock(&ep
->com
.mutex
);
3099 if (ep
->com
.state
!= MPA_REQ_RCVD
) {
3100 mutex_unlock(&ep
->com
.mutex
);
3101 c4iw_put_ep(&ep
->com
);
3104 set_bit(ULP_REJECT
, &ep
->com
.history
);
3108 abort
= send_mpa_reject(ep
, pdata
, pdata_len
);
3109 mutex_unlock(&ep
->com
.mutex
);
3112 c4iw_ep_disconnect(ep
, abort
!= 0, GFP_KERNEL
);
3113 c4iw_put_ep(&ep
->com
);
3117 int c4iw_accept_cr(struct iw_cm_id
*cm_id
, struct iw_cm_conn_param
*conn_param
)
3120 struct c4iw_qp_attributes attrs
;
3121 enum c4iw_qp_attr_mask mask
;
3122 struct c4iw_ep
*ep
= to_ep(cm_id
);
3123 struct c4iw_dev
*h
= to_c4iw_dev(cm_id
->device
);
3124 struct c4iw_qp
*qp
= get_qhp(h
, conn_param
->qpn
);
3127 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
3129 mutex_lock(&ep
->com
.mutex
);
3130 if (ep
->com
.state
!= MPA_REQ_RCVD
) {
3137 set_bit(ULP_ACCEPT
, &ep
->com
.history
);
3138 if ((conn_param
->ord
> cur_max_read_depth(ep
->com
.dev
)) ||
3139 (conn_param
->ird
> cur_max_read_depth(ep
->com
.dev
))) {
3144 if (ep
->mpa_attr
.version
== 2 && ep
->mpa_attr
.enhanced_rdma_conn
) {
3145 if (conn_param
->ord
> ep
->ird
) {
3146 if (RELAXED_IRD_NEGOTIATION
) {
3147 conn_param
->ord
= ep
->ird
;
3149 ep
->ird
= conn_param
->ird
;
3150 ep
->ord
= conn_param
->ord
;
3151 send_mpa_reject(ep
, conn_param
->private_data
,
3152 conn_param
->private_data_len
);
3157 if (conn_param
->ird
< ep
->ord
) {
3158 if (RELAXED_IRD_NEGOTIATION
&&
3159 ep
->ord
<= h
->rdev
.lldi
.max_ordird_qp
) {
3160 conn_param
->ird
= ep
->ord
;
3167 ep
->ird
= conn_param
->ird
;
3168 ep
->ord
= conn_param
->ord
;
3170 if (ep
->mpa_attr
.version
== 1) {
3171 if (peer2peer
&& ep
->ird
== 0)
3175 (ep
->mpa_attr
.p2p_type
!= FW_RI_INIT_P2PTYPE_DISABLED
) &&
3176 (p2p_type
== FW_RI_INIT_P2PTYPE_READ_REQ
) && ep
->ird
== 0)
3180 PDBG("%s %d ird %d ord %d\n", __func__
, __LINE__
, ep
->ird
, ep
->ord
);
3182 ep
->com
.cm_id
= cm_id
;
3183 ref_cm_id(&ep
->com
);
3187 /* bind QP to EP and move to RTS */
3188 attrs
.mpa_attr
= ep
->mpa_attr
;
3189 attrs
.max_ird
= ep
->ird
;
3190 attrs
.max_ord
= ep
->ord
;
3191 attrs
.llp_stream_handle
= ep
;
3192 attrs
.next_state
= C4IW_QP_STATE_RTS
;
3194 /* bind QP and TID with INIT_WR */
3195 mask
= C4IW_QP_ATTR_NEXT_STATE
|
3196 C4IW_QP_ATTR_LLP_STREAM_HANDLE
|
3197 C4IW_QP_ATTR_MPA_ATTR
|
3198 C4IW_QP_ATTR_MAX_IRD
|
3199 C4IW_QP_ATTR_MAX_ORD
;
3201 err
= c4iw_modify_qp(ep
->com
.qp
->rhp
,
3202 ep
->com
.qp
, mask
, &attrs
, 1);
3204 goto err_deref_cm_id
;
3206 set_bit(STOP_MPA_TIMER
, &ep
->com
.flags
);
3207 err
= send_mpa_reply(ep
, conn_param
->private_data
,
3208 conn_param
->private_data_len
);
3210 goto err_deref_cm_id
;
3212 __state_set(&ep
->com
, FPDU_MODE
);
3213 established_upcall(ep
);
3214 mutex_unlock(&ep
->com
.mutex
);
3215 c4iw_put_ep(&ep
->com
);
3218 deref_cm_id(&ep
->com
);
3222 mutex_unlock(&ep
->com
.mutex
);
3224 c4iw_ep_disconnect(ep
, 1, GFP_KERNEL
);
3225 c4iw_put_ep(&ep
->com
);
3229 static int pick_local_ipaddrs(struct c4iw_dev
*dev
, struct iw_cm_id
*cm_id
)
3231 struct in_device
*ind
;
3233 struct sockaddr_in
*laddr
= (struct sockaddr_in
*)&cm_id
->m_local_addr
;
3234 struct sockaddr_in
*raddr
= (struct sockaddr_in
*)&cm_id
->m_remote_addr
;
3236 ind
= in_dev_get(dev
->rdev
.lldi
.ports
[0]);
3238 return -EADDRNOTAVAIL
;
3239 for_primary_ifa(ind
) {
3240 laddr
->sin_addr
.s_addr
= ifa
->ifa_address
;
3241 raddr
->sin_addr
.s_addr
= ifa
->ifa_address
;
3247 return found
? 0 : -EADDRNOTAVAIL
;
3250 static int get_lladdr(struct net_device
*dev
, struct in6_addr
*addr
,
3251 unsigned char banned_flags
)
3253 struct inet6_dev
*idev
;
3254 int err
= -EADDRNOTAVAIL
;
3257 idev
= __in6_dev_get(dev
);
3259 struct inet6_ifaddr
*ifp
;
3261 read_lock_bh(&idev
->lock
);
3262 list_for_each_entry(ifp
, &idev
->addr_list
, if_list
) {
3263 if (ifp
->scope
== IFA_LINK
&&
3264 !(ifp
->flags
& banned_flags
)) {
3265 memcpy(addr
, &ifp
->addr
, 16);
3270 read_unlock_bh(&idev
->lock
);
3276 static int pick_local_ip6addrs(struct c4iw_dev
*dev
, struct iw_cm_id
*cm_id
)
3278 struct in6_addr
uninitialized_var(addr
);
3279 struct sockaddr_in6
*la6
= (struct sockaddr_in6
*)&cm_id
->m_local_addr
;
3280 struct sockaddr_in6
*ra6
= (struct sockaddr_in6
*)&cm_id
->m_remote_addr
;
3282 if (!get_lladdr(dev
->rdev
.lldi
.ports
[0], &addr
, IFA_F_TENTATIVE
)) {
3283 memcpy(la6
->sin6_addr
.s6_addr
, &addr
, 16);
3284 memcpy(ra6
->sin6_addr
.s6_addr
, &addr
, 16);
3287 return -EADDRNOTAVAIL
;
3290 int c4iw_connect(struct iw_cm_id
*cm_id
, struct iw_cm_conn_param
*conn_param
)
3292 struct c4iw_dev
*dev
= to_c4iw_dev(cm_id
->device
);
3295 struct sockaddr_in
*laddr
;
3296 struct sockaddr_in
*raddr
;
3297 struct sockaddr_in6
*laddr6
;
3298 struct sockaddr_in6
*raddr6
;
3302 if ((conn_param
->ord
> cur_max_read_depth(dev
)) ||
3303 (conn_param
->ird
> cur_max_read_depth(dev
))) {
3307 ep
= alloc_ep(sizeof(*ep
), GFP_KERNEL
);
3309 printk(KERN_ERR MOD
"%s - cannot alloc ep.\n", __func__
);
3314 skb_queue_head_init(&ep
->com
.ep_skb_list
);
3315 if (alloc_ep_skb_list(&ep
->com
.ep_skb_list
, CN_MAX_CON_BUF
)) {
3320 init_timer(&ep
->timer
);
3321 ep
->plen
= conn_param
->private_data_len
;
3323 memcpy(ep
->mpa_pkt
+ sizeof(struct mpa_message
),
3324 conn_param
->private_data
, ep
->plen
);
3325 ep
->ird
= conn_param
->ird
;
3326 ep
->ord
= conn_param
->ord
;
3328 if (peer2peer
&& ep
->ord
== 0)
3331 ep
->com
.cm_id
= cm_id
;
3332 ref_cm_id(&ep
->com
);
3334 ep
->com
.qp
= get_qhp(dev
, conn_param
->qpn
);
3336 PDBG("%s qpn 0x%x not found!\n", __func__
, conn_param
->qpn
);
3341 PDBG("%s qpn 0x%x qp %p cm_id %p\n", __func__
, conn_param
->qpn
,
3345 * Allocate an active TID to initiate a TCP connection.
3347 ep
->atid
= cxgb4_alloc_atid(dev
->rdev
.lldi
.tids
, ep
);
3348 if (ep
->atid
== -1) {
3349 printk(KERN_ERR MOD
"%s - cannot alloc atid.\n", __func__
);
3353 insert_handle(dev
, &dev
->atid_idr
, ep
, ep
->atid
);
3355 memcpy(&ep
->com
.local_addr
, &cm_id
->m_local_addr
,
3356 sizeof(ep
->com
.local_addr
));
3357 memcpy(&ep
->com
.remote_addr
, &cm_id
->m_remote_addr
,
3358 sizeof(ep
->com
.remote_addr
));
3360 laddr
= (struct sockaddr_in
*)&ep
->com
.local_addr
;
3361 raddr
= (struct sockaddr_in
*)&ep
->com
.remote_addr
;
3362 laddr6
= (struct sockaddr_in6
*)&ep
->com
.local_addr
;
3363 raddr6
= (struct sockaddr_in6
*) &ep
->com
.remote_addr
;
3365 if (cm_id
->m_remote_addr
.ss_family
== AF_INET
) {
3367 ra
= (__u8
*)&raddr
->sin_addr
;
3370 * Handle loopback requests to INADDR_ANY.
3372 if (raddr
->sin_addr
.s_addr
== htonl(INADDR_ANY
)) {
3373 err
= pick_local_ipaddrs(dev
, cm_id
);
3379 PDBG("%s saddr %pI4 sport 0x%x raddr %pI4 rport 0x%x\n",
3380 __func__
, &laddr
->sin_addr
, ntohs(laddr
->sin_port
),
3381 ra
, ntohs(raddr
->sin_port
));
3382 ep
->dst
= find_route(dev
, laddr
->sin_addr
.s_addr
,
3383 raddr
->sin_addr
.s_addr
, laddr
->sin_port
,
3384 raddr
->sin_port
, cm_id
->tos
);
3387 ra
= (__u8
*)&raddr6
->sin6_addr
;
3390 * Handle loopback requests to INADDR_ANY.
3392 if (ipv6_addr_type(&raddr6
->sin6_addr
) == IPV6_ADDR_ANY
) {
3393 err
= pick_local_ip6addrs(dev
, cm_id
);
3399 PDBG("%s saddr %pI6 sport 0x%x raddr %pI6 rport 0x%x\n",
3400 __func__
, laddr6
->sin6_addr
.s6_addr
,
3401 ntohs(laddr6
->sin6_port
),
3402 raddr6
->sin6_addr
.s6_addr
, ntohs(raddr6
->sin6_port
));
3403 ep
->dst
= find_route6(dev
, laddr6
->sin6_addr
.s6_addr
,
3404 raddr6
->sin6_addr
.s6_addr
,
3405 laddr6
->sin6_port
, raddr6
->sin6_port
, 0,
3406 raddr6
->sin6_scope_id
);
3409 printk(KERN_ERR MOD
"%s - cannot find route.\n", __func__
);
3410 err
= -EHOSTUNREACH
;
3414 err
= import_ep(ep
, iptype
, ra
, ep
->dst
, ep
->com
.dev
, true,
3415 ep
->com
.dev
->rdev
.lldi
.adapter_type
, cm_id
->tos
);
3417 printk(KERN_ERR MOD
"%s - cannot alloc l2e.\n", __func__
);
3421 PDBG("%s txq_idx %u tx_chan %u smac_idx %u rss_qid %u l2t_idx %u\n",
3422 __func__
, ep
->txq_idx
, ep
->tx_chan
, ep
->smac_idx
, ep
->rss_qid
,
3425 state_set(&ep
->com
, CONNECTING
);
3426 ep
->tos
= cm_id
->tos
;
3428 /* send connect request to rnic */
3429 err
= send_connect(ep
);
3433 cxgb4_l2t_release(ep
->l2t
);
3435 dst_release(ep
->dst
);
3437 remove_handle(ep
->com
.dev
, &ep
->com
.dev
->atid_idr
, ep
->atid
);
3438 cxgb4_free_atid(ep
->com
.dev
->rdev
.lldi
.tids
, ep
->atid
);
3440 skb_queue_purge(&ep
->com
.ep_skb_list
);
3441 deref_cm_id(&ep
->com
);
3443 c4iw_put_ep(&ep
->com
);
3448 static int create_server6(struct c4iw_dev
*dev
, struct c4iw_listen_ep
*ep
)
3451 struct sockaddr_in6
*sin6
= (struct sockaddr_in6
*)
3452 &ep
->com
.local_addr
;
3454 if (ipv6_addr_type(&sin6
->sin6_addr
) != IPV6_ADDR_ANY
) {
3455 err
= cxgb4_clip_get(ep
->com
.dev
->rdev
.lldi
.ports
[0],
3456 (const u32
*)&sin6
->sin6_addr
.s6_addr
, 1);
3460 c4iw_init_wr_wait(&ep
->com
.wr_wait
);
3461 err
= cxgb4_create_server6(ep
->com
.dev
->rdev
.lldi
.ports
[0],
3462 ep
->stid
, &sin6
->sin6_addr
,
3464 ep
->com
.dev
->rdev
.lldi
.rxq_ids
[0]);
3466 err
= c4iw_wait_for_reply(&ep
->com
.dev
->rdev
,
3470 err
= net_xmit_errno(err
);
3472 cxgb4_clip_release(ep
->com
.dev
->rdev
.lldi
.ports
[0],
3473 (const u32
*)&sin6
->sin6_addr
.s6_addr
, 1);
3474 pr_err("cxgb4_create_server6/filter failed err %d stid %d laddr %pI6 lport %d\n",
3476 sin6
->sin6_addr
.s6_addr
, ntohs(sin6
->sin6_port
));
3481 static int create_server4(struct c4iw_dev
*dev
, struct c4iw_listen_ep
*ep
)
3484 struct sockaddr_in
*sin
= (struct sockaddr_in
*)
3485 &ep
->com
.local_addr
;
3487 if (dev
->rdev
.lldi
.enable_fw_ofld_conn
) {
3489 err
= cxgb4_create_server_filter(
3490 ep
->com
.dev
->rdev
.lldi
.ports
[0], ep
->stid
,
3491 sin
->sin_addr
.s_addr
, sin
->sin_port
, 0,
3492 ep
->com
.dev
->rdev
.lldi
.rxq_ids
[0], 0, 0);
3493 if (err
== -EBUSY
) {
3494 if (c4iw_fatal_error(&ep
->com
.dev
->rdev
)) {
3498 set_current_state(TASK_UNINTERRUPTIBLE
);
3499 schedule_timeout(usecs_to_jiffies(100));
3501 } while (err
== -EBUSY
);
3503 c4iw_init_wr_wait(&ep
->com
.wr_wait
);
3504 err
= cxgb4_create_server(ep
->com
.dev
->rdev
.lldi
.ports
[0],
3505 ep
->stid
, sin
->sin_addr
.s_addr
, sin
->sin_port
,
3506 0, ep
->com
.dev
->rdev
.lldi
.rxq_ids
[0]);
3508 err
= c4iw_wait_for_reply(&ep
->com
.dev
->rdev
,
3512 err
= net_xmit_errno(err
);
3515 pr_err("cxgb4_create_server/filter failed err %d stid %d laddr %pI4 lport %d\n"
3517 &sin
->sin_addr
, ntohs(sin
->sin_port
));
3521 int c4iw_create_listen(struct iw_cm_id
*cm_id
, int backlog
)
3524 struct c4iw_dev
*dev
= to_c4iw_dev(cm_id
->device
);
3525 struct c4iw_listen_ep
*ep
;
3529 ep
= alloc_ep(sizeof(*ep
), GFP_KERNEL
);
3531 printk(KERN_ERR MOD
"%s - cannot alloc ep.\n", __func__
);
3535 skb_queue_head_init(&ep
->com
.ep_skb_list
);
3536 PDBG("%s ep %p\n", __func__
, ep
);
3537 ep
->com
.cm_id
= cm_id
;
3538 ref_cm_id(&ep
->com
);
3540 ep
->backlog
= backlog
;
3541 memcpy(&ep
->com
.local_addr
, &cm_id
->m_local_addr
,
3542 sizeof(ep
->com
.local_addr
));
3545 * Allocate a server TID.
3547 if (dev
->rdev
.lldi
.enable_fw_ofld_conn
&&
3548 ep
->com
.local_addr
.ss_family
== AF_INET
)
3549 ep
->stid
= cxgb4_alloc_sftid(dev
->rdev
.lldi
.tids
,
3550 cm_id
->m_local_addr
.ss_family
, ep
);
3552 ep
->stid
= cxgb4_alloc_stid(dev
->rdev
.lldi
.tids
,
3553 cm_id
->m_local_addr
.ss_family
, ep
);
3555 if (ep
->stid
== -1) {
3556 printk(KERN_ERR MOD
"%s - cannot alloc stid.\n", __func__
);
3560 insert_handle(dev
, &dev
->stid_idr
, ep
, ep
->stid
);
3562 memcpy(&ep
->com
.local_addr
, &cm_id
->m_local_addr
,
3563 sizeof(ep
->com
.local_addr
));
3565 state_set(&ep
->com
, LISTEN
);
3566 if (ep
->com
.local_addr
.ss_family
== AF_INET
)
3567 err
= create_server4(dev
, ep
);
3569 err
= create_server6(dev
, ep
);
3571 cm_id
->provider_data
= ep
;
3575 cxgb4_free_stid(ep
->com
.dev
->rdev
.lldi
.tids
, ep
->stid
,
3576 ep
->com
.local_addr
.ss_family
);
3578 deref_cm_id(&ep
->com
);
3579 c4iw_put_ep(&ep
->com
);
3585 int c4iw_destroy_listen(struct iw_cm_id
*cm_id
)
3588 struct c4iw_listen_ep
*ep
= to_listen_ep(cm_id
);
3590 PDBG("%s ep %p\n", __func__
, ep
);
3593 state_set(&ep
->com
, DEAD
);
3594 if (ep
->com
.dev
->rdev
.lldi
.enable_fw_ofld_conn
&&
3595 ep
->com
.local_addr
.ss_family
== AF_INET
) {
3596 err
= cxgb4_remove_server_filter(
3597 ep
->com
.dev
->rdev
.lldi
.ports
[0], ep
->stid
,
3598 ep
->com
.dev
->rdev
.lldi
.rxq_ids
[0], 0);
3600 struct sockaddr_in6
*sin6
;
3601 c4iw_init_wr_wait(&ep
->com
.wr_wait
);
3602 err
= cxgb4_remove_server(
3603 ep
->com
.dev
->rdev
.lldi
.ports
[0], ep
->stid
,
3604 ep
->com
.dev
->rdev
.lldi
.rxq_ids
[0], 0);
3607 err
= c4iw_wait_for_reply(&ep
->com
.dev
->rdev
, &ep
->com
.wr_wait
,
3609 sin6
= (struct sockaddr_in6
*)&ep
->com
.local_addr
;
3610 cxgb4_clip_release(ep
->com
.dev
->rdev
.lldi
.ports
[0],
3611 (const u32
*)&sin6
->sin6_addr
.s6_addr
, 1);
3613 remove_handle(ep
->com
.dev
, &ep
->com
.dev
->stid_idr
, ep
->stid
);
3614 cxgb4_free_stid(ep
->com
.dev
->rdev
.lldi
.tids
, ep
->stid
,
3615 ep
->com
.local_addr
.ss_family
);
3617 deref_cm_id(&ep
->com
);
3618 c4iw_put_ep(&ep
->com
);
3622 int c4iw_ep_disconnect(struct c4iw_ep
*ep
, int abrupt
, gfp_t gfp
)
3627 struct c4iw_rdev
*rdev
;
3629 mutex_lock(&ep
->com
.mutex
);
3631 PDBG("%s ep %p state %s, abrupt %d\n", __func__
, ep
,
3632 states
[ep
->com
.state
], abrupt
);
3635 * Ref the ep here in case we have fatal errors causing the
3636 * ep to be released and freed.
3638 c4iw_get_ep(&ep
->com
);
3640 rdev
= &ep
->com
.dev
->rdev
;
3641 if (c4iw_fatal_error(rdev
)) {
3643 close_complete_upcall(ep
, -EIO
);
3644 ep
->com
.state
= DEAD
;
3646 switch (ep
->com
.state
) {
3655 ep
->com
.state
= ABORTING
;
3657 ep
->com
.state
= CLOSING
;
3660 * if we close before we see the fw4_ack() then we fix
3661 * up the timer state since we're reusing it.
3664 test_bit(STOP_MPA_TIMER
, &ep
->com
.flags
)) {
3665 clear_bit(STOP_MPA_TIMER
, &ep
->com
.flags
);
3670 set_bit(CLOSE_SENT
, &ep
->com
.flags
);
3673 if (!test_and_set_bit(CLOSE_SENT
, &ep
->com
.flags
)) {
3676 (void)stop_ep_timer(ep
);
3677 ep
->com
.state
= ABORTING
;
3679 ep
->com
.state
= MORIBUND
;
3685 PDBG("%s ignoring disconnect ep %p state %u\n",
3686 __func__
, ep
, ep
->com
.state
);
3695 set_bit(EP_DISC_ABORT
, &ep
->com
.history
);
3696 close_complete_upcall(ep
, -ECONNRESET
);
3697 ret
= send_abort(ep
);
3699 set_bit(EP_DISC_CLOSE
, &ep
->com
.history
);
3700 ret
= send_halfclose(ep
);
3703 set_bit(EP_DISC_FAIL
, &ep
->com
.history
);
3706 close_complete_upcall(ep
, -EIO
);
3709 struct c4iw_qp_attributes attrs
;
3711 attrs
.next_state
= C4IW_QP_STATE_ERROR
;
3712 ret
= c4iw_modify_qp(ep
->com
.qp
->rhp
,
3714 C4IW_QP_ATTR_NEXT_STATE
,
3718 "%s - qp <- error failed!\n",
3724 mutex_unlock(&ep
->com
.mutex
);
3725 c4iw_put_ep(&ep
->com
);
3727 release_ep_resources(ep
);
3731 static void active_ofld_conn_reply(struct c4iw_dev
*dev
, struct sk_buff
*skb
,
3732 struct cpl_fw6_msg_ofld_connection_wr_rpl
*req
)
3735 int atid
= be32_to_cpu(req
->tid
);
3737 ep
= (struct c4iw_ep
*)lookup_atid(dev
->rdev
.lldi
.tids
,
3738 (__force u32
) req
->tid
);
3742 switch (req
->retval
) {
3744 set_bit(ACT_RETRY_NOMEM
, &ep
->com
.history
);
3745 if (ep
->retry_count
++ < ACT_OPEN_RETRY_COUNT
) {
3746 send_fw_act_open_req(ep
, atid
);
3750 set_bit(ACT_RETRY_INUSE
, &ep
->com
.history
);
3751 if (ep
->retry_count
++ < ACT_OPEN_RETRY_COUNT
) {
3752 send_fw_act_open_req(ep
, atid
);
3757 pr_info("%s unexpected ofld conn wr retval %d\n",
3758 __func__
, req
->retval
);
3761 pr_err("active ofld_connect_wr failure %d atid %d\n",
3763 mutex_lock(&dev
->rdev
.stats
.lock
);
3764 dev
->rdev
.stats
.act_ofld_conn_fails
++;
3765 mutex_unlock(&dev
->rdev
.stats
.lock
);
3766 connect_reply_upcall(ep
, status2errno(req
->retval
));
3767 state_set(&ep
->com
, DEAD
);
3768 if (ep
->com
.remote_addr
.ss_family
== AF_INET6
) {
3769 struct sockaddr_in6
*sin6
=
3770 (struct sockaddr_in6
*)&ep
->com
.local_addr
;
3771 cxgb4_clip_release(ep
->com
.dev
->rdev
.lldi
.ports
[0],
3772 (const u32
*)&sin6
->sin6_addr
.s6_addr
, 1);
3774 remove_handle(dev
, &dev
->atid_idr
, atid
);
3775 cxgb4_free_atid(dev
->rdev
.lldi
.tids
, atid
);
3776 dst_release(ep
->dst
);
3777 cxgb4_l2t_release(ep
->l2t
);
3778 c4iw_put_ep(&ep
->com
);
3781 static void passive_ofld_conn_reply(struct c4iw_dev
*dev
, struct sk_buff
*skb
,
3782 struct cpl_fw6_msg_ofld_connection_wr_rpl
*req
)
3784 struct sk_buff
*rpl_skb
;
3785 struct cpl_pass_accept_req
*cpl
;
3788 rpl_skb
= (struct sk_buff
*)(unsigned long)req
->cookie
;
3791 PDBG("%s passive open failure %d\n", __func__
, req
->retval
);
3792 mutex_lock(&dev
->rdev
.stats
.lock
);
3793 dev
->rdev
.stats
.pas_ofld_conn_fails
++;
3794 mutex_unlock(&dev
->rdev
.stats
.lock
);
3797 cpl
= (struct cpl_pass_accept_req
*)cplhdr(rpl_skb
);
3798 OPCODE_TID(cpl
) = htonl(MK_OPCODE_TID(CPL_PASS_ACCEPT_REQ
,
3799 (__force u32
) htonl(
3800 (__force u32
) req
->tid
)));
3801 ret
= pass_accept_req(dev
, rpl_skb
);
3808 static int deferred_fw6_msg(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
3810 struct cpl_fw6_msg
*rpl
= cplhdr(skb
);
3811 struct cpl_fw6_msg_ofld_connection_wr_rpl
*req
;
3813 switch (rpl
->type
) {
3815 c4iw_ev_dispatch(dev
, (struct t4_cqe
*)&rpl
->data
[0]);
3817 case FW6_TYPE_OFLD_CONNECTION_WR_RPL
:
3818 req
= (struct cpl_fw6_msg_ofld_connection_wr_rpl
*)rpl
->data
;
3819 switch (req
->t_state
) {
3821 active_ofld_conn_reply(dev
, skb
, req
);
3824 passive_ofld_conn_reply(dev
, skb
, req
);
3827 pr_err("%s unexpected ofld conn wr state %d\n",
3828 __func__
, req
->t_state
);
3836 static void build_cpl_pass_accept_req(struct sk_buff
*skb
, int stid
, u8 tos
)
3839 __be16 hdr_len
, vlantag
, len
;
3841 int tcp_hdr_len
, ip_hdr_len
;
3843 struct cpl_rx_pkt
*cpl
= cplhdr(skb
);
3844 struct cpl_pass_accept_req
*req
;
3845 struct tcp_options_received tmp_opt
;
3846 struct c4iw_dev
*dev
;
3847 enum chip_type type
;
3849 dev
= *((struct c4iw_dev
**) (skb
->cb
+ sizeof(void *)));
3850 /* Store values from cpl_rx_pkt in temporary location. */
3851 vlantag
= cpl
->vlan
;
3853 l2info
= cpl
->l2info
;
3854 hdr_len
= cpl
->hdr_len
;
3857 __skb_pull(skb
, sizeof(*req
) + sizeof(struct rss_header
));
3860 * We need to parse the TCP options from SYN packet.
3861 * to generate cpl_pass_accept_req.
3863 memset(&tmp_opt
, 0, sizeof(tmp_opt
));
3864 tcp_clear_options(&tmp_opt
);
3865 tcp_parse_options(skb
, &tmp_opt
, 0, NULL
);
3867 req
= (struct cpl_pass_accept_req
*)__skb_push(skb
, sizeof(*req
));
3868 memset(req
, 0, sizeof(*req
));
3869 req
->l2info
= cpu_to_be16(SYN_INTF_V(intf
) |
3870 SYN_MAC_IDX_V(RX_MACIDX_G(
3871 be32_to_cpu(l2info
))) |
3873 type
= dev
->rdev
.lldi
.adapter_type
;
3874 tcp_hdr_len
= RX_TCPHDR_LEN_G(be16_to_cpu(hdr_len
));
3875 ip_hdr_len
= RX_IPHDR_LEN_G(be16_to_cpu(hdr_len
));
3877 cpu_to_be32(SYN_RX_CHAN_V(RX_CHAN_G(be32_to_cpu(l2info
))));
3878 if (CHELSIO_CHIP_VERSION(type
) <= CHELSIO_T5
) {
3879 eth_hdr_len
= is_t4(type
) ?
3880 RX_ETHHDR_LEN_G(be32_to_cpu(l2info
)) :
3881 RX_T5_ETHHDR_LEN_G(be32_to_cpu(l2info
));
3882 req
->hdr_len
|= cpu_to_be32(TCP_HDR_LEN_V(tcp_hdr_len
) |
3883 IP_HDR_LEN_V(ip_hdr_len
) |
3884 ETH_HDR_LEN_V(eth_hdr_len
));
3885 } else { /* T6 and later */
3886 eth_hdr_len
= RX_T6_ETHHDR_LEN_G(be32_to_cpu(l2info
));
3887 req
->hdr_len
|= cpu_to_be32(T6_TCP_HDR_LEN_V(tcp_hdr_len
) |
3888 T6_IP_HDR_LEN_V(ip_hdr_len
) |
3889 T6_ETH_HDR_LEN_V(eth_hdr_len
));
3891 req
->vlan
= vlantag
;
3893 req
->tos_stid
= cpu_to_be32(PASS_OPEN_TID_V(stid
) |
3894 PASS_OPEN_TOS_V(tos
));
3895 req
->tcpopt
.mss
= htons(tmp_opt
.mss_clamp
);
3896 if (tmp_opt
.wscale_ok
)
3897 req
->tcpopt
.wsf
= tmp_opt
.snd_wscale
;
3898 req
->tcpopt
.tstamp
= tmp_opt
.saw_tstamp
;
3899 if (tmp_opt
.sack_ok
)
3900 req
->tcpopt
.sack
= 1;
3901 OPCODE_TID(req
) = htonl(MK_OPCODE_TID(CPL_PASS_ACCEPT_REQ
, 0));
3905 static void send_fw_pass_open_req(struct c4iw_dev
*dev
, struct sk_buff
*skb
,
3906 __be32 laddr
, __be16 lport
,
3907 __be32 raddr
, __be16 rport
,
3908 u32 rcv_isn
, u32 filter
, u16 window
,
3909 u32 rss_qid
, u8 port_id
)
3911 struct sk_buff
*req_skb
;
3912 struct fw_ofld_connection_wr
*req
;
3913 struct cpl_pass_accept_req
*cpl
= cplhdr(skb
);
3916 req_skb
= alloc_skb(sizeof(struct fw_ofld_connection_wr
), GFP_KERNEL
);
3917 req
= (struct fw_ofld_connection_wr
*)__skb_put(req_skb
, sizeof(*req
));
3918 memset(req
, 0, sizeof(*req
));
3919 req
->op_compl
= htonl(WR_OP_V(FW_OFLD_CONNECTION_WR
) | FW_WR_COMPL_F
);
3920 req
->len16_pkd
= htonl(FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*req
), 16)));
3921 req
->le
.version_cpl
= htonl(FW_OFLD_CONNECTION_WR_CPL_F
);
3922 req
->le
.filter
= (__force __be32
) filter
;
3923 req
->le
.lport
= lport
;
3924 req
->le
.pport
= rport
;
3925 req
->le
.u
.ipv4
.lip
= laddr
;
3926 req
->le
.u
.ipv4
.pip
= raddr
;
3927 req
->tcb
.rcv_nxt
= htonl(rcv_isn
+ 1);
3928 req
->tcb
.rcv_adv
= htons(window
);
3929 req
->tcb
.t_state_to_astid
=
3930 htonl(FW_OFLD_CONNECTION_WR_T_STATE_V(TCP_SYN_RECV
) |
3931 FW_OFLD_CONNECTION_WR_RCV_SCALE_V(cpl
->tcpopt
.wsf
) |
3932 FW_OFLD_CONNECTION_WR_ASTID_V(
3933 PASS_OPEN_TID_G(ntohl(cpl
->tos_stid
))));
3936 * We store the qid in opt2 which will be used by the firmware
3937 * to send us the wr response.
3939 req
->tcb
.opt2
= htonl(RSS_QUEUE_V(rss_qid
));
3942 * We initialize the MSS index in TCB to 0xF.
3943 * So that when driver sends cpl_pass_accept_rpl
3944 * TCB picks up the correct value. If this was 0
3945 * TP will ignore any value > 0 for MSS index.
3947 req
->tcb
.opt0
= cpu_to_be64(MSS_IDX_V(0xF));
3948 req
->cookie
= (uintptr_t)skb
;
3950 set_wr_txq(req_skb
, CPL_PRIORITY_CONTROL
, port_id
);
3951 ret
= cxgb4_ofld_send(dev
->rdev
.lldi
.ports
[0], req_skb
);
3953 pr_err("%s - cxgb4_ofld_send error %d - dropping\n", __func__
,
3961 * Handler for CPL_RX_PKT message. Need to handle cpl_rx_pkt
3962 * messages when a filter is being used instead of server to
3963 * redirect a syn packet. When packets hit filter they are redirected
3964 * to the offload queue and driver tries to establish the connection
3965 * using firmware work request.
3967 static int rx_pkt(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
3970 unsigned int filter
;
3971 struct ethhdr
*eh
= NULL
;
3972 struct vlan_ethhdr
*vlan_eh
= NULL
;
3974 struct tcphdr
*tcph
;
3975 struct rss_header
*rss
= (void *)skb
->data
;
3976 struct cpl_rx_pkt
*cpl
= (void *)skb
->data
;
3977 struct cpl_pass_accept_req
*req
= (void *)(rss
+ 1);
3978 struct l2t_entry
*e
;
3979 struct dst_entry
*dst
;
3980 struct c4iw_ep
*lep
= NULL
;
3982 struct port_info
*pi
;
3983 struct net_device
*pdev
;
3984 u16 rss_qid
, eth_hdr_len
;
3987 struct neighbour
*neigh
;
3989 /* Drop all non-SYN packets */
3990 if (!(cpl
->l2info
& cpu_to_be32(RXF_SYN_F
)))
3994 * Drop all packets which did not hit the filter.
3995 * Unlikely to happen.
3997 if (!(rss
->filter_hit
&& rss
->filter_tid
))
4001 * Calculate the server tid from filter hit index from cpl_rx_pkt.
4003 stid
= (__force
int) cpu_to_be32((__force u32
) rss
->hash_val
);
4005 lep
= (struct c4iw_ep
*)get_ep_from_stid(dev
, stid
);
4007 PDBG("%s connect request on invalid stid %d\n", __func__
, stid
);
4011 switch (CHELSIO_CHIP_VERSION(dev
->rdev
.lldi
.adapter_type
)) {
4013 eth_hdr_len
= RX_ETHHDR_LEN_G(be32_to_cpu(cpl
->l2info
));
4016 eth_hdr_len
= RX_T5_ETHHDR_LEN_G(be32_to_cpu(cpl
->l2info
));
4019 eth_hdr_len
= RX_T6_ETHHDR_LEN_G(be32_to_cpu(cpl
->l2info
));
4022 pr_err("T%d Chip is not supported\n",
4023 CHELSIO_CHIP_VERSION(dev
->rdev
.lldi
.adapter_type
));
4027 if (eth_hdr_len
== ETH_HLEN
) {
4028 eh
= (struct ethhdr
*)(req
+ 1);
4029 iph
= (struct iphdr
*)(eh
+ 1);
4031 vlan_eh
= (struct vlan_ethhdr
*)(req
+ 1);
4032 iph
= (struct iphdr
*)(vlan_eh
+ 1);
4033 skb
->vlan_tci
= ntohs(cpl
->vlan
);
4036 if (iph
->version
!= 0x4)
4039 tcph
= (struct tcphdr
*)(iph
+ 1);
4040 skb_set_network_header(skb
, (void *)iph
- (void *)rss
);
4041 skb_set_transport_header(skb
, (void *)tcph
- (void *)rss
);
4044 PDBG("%s lip 0x%x lport %u pip 0x%x pport %u tos %d\n", __func__
,
4045 ntohl(iph
->daddr
), ntohs(tcph
->dest
), ntohl(iph
->saddr
),
4046 ntohs(tcph
->source
), iph
->tos
);
4048 dst
= find_route(dev
, iph
->daddr
, iph
->saddr
, tcph
->dest
, tcph
->source
,
4051 pr_err("%s - failed to find dst entry!\n",
4055 neigh
= dst_neigh_lookup_skb(dst
, skb
);
4058 pr_err("%s - failed to allocate neigh!\n",
4063 if (neigh
->dev
->flags
& IFF_LOOPBACK
) {
4064 pdev
= ip_dev_find(&init_net
, iph
->daddr
);
4065 e
= cxgb4_l2t_get(dev
->rdev
.lldi
.l2t
, neigh
,
4067 pi
= (struct port_info
*)netdev_priv(pdev
);
4068 tx_chan
= cxgb4_port_chan(pdev
);
4071 pdev
= get_real_dev(neigh
->dev
);
4072 e
= cxgb4_l2t_get(dev
->rdev
.lldi
.l2t
, neigh
,
4074 pi
= (struct port_info
*)netdev_priv(pdev
);
4075 tx_chan
= cxgb4_port_chan(pdev
);
4077 neigh_release(neigh
);
4079 pr_err("%s - failed to allocate l2t entry!\n",
4084 step
= dev
->rdev
.lldi
.nrxq
/ dev
->rdev
.lldi
.nchan
;
4085 rss_qid
= dev
->rdev
.lldi
.rxq_ids
[pi
->port_id
* step
];
4086 window
= (__force u16
) htons((__force u16
)tcph
->window
);
4088 /* Calcuate filter portion for LE region. */
4089 filter
= (__force
unsigned int) cpu_to_be32(cxgb4_select_ntuple(
4090 dev
->rdev
.lldi
.ports
[0],
4094 * Synthesize the cpl_pass_accept_req. We have everything except the
4095 * TID. Once firmware sends a reply with TID we update the TID field
4096 * in cpl and pass it through the regular cpl_pass_accept_req path.
4098 build_cpl_pass_accept_req(skb
, stid
, iph
->tos
);
4099 send_fw_pass_open_req(dev
, skb
, iph
->daddr
, tcph
->dest
, iph
->saddr
,
4100 tcph
->source
, ntohl(tcph
->seq
), filter
, window
,
4101 rss_qid
, pi
->port_id
);
4102 cxgb4_l2t_release(e
);
4107 c4iw_put_ep(&lep
->com
);
4112 * These are the real handlers that are called from a
4115 static c4iw_handler_func work_handlers
[NUM_CPL_CMDS
+ NUM_FAKE_CPLS
] = {
4116 [CPL_ACT_ESTABLISH
] = act_establish
,
4117 [CPL_ACT_OPEN_RPL
] = act_open_rpl
,
4118 [CPL_RX_DATA
] = rx_data
,
4119 [CPL_ABORT_RPL_RSS
] = abort_rpl
,
4120 [CPL_ABORT_RPL
] = abort_rpl
,
4121 [CPL_PASS_OPEN_RPL
] = pass_open_rpl
,
4122 [CPL_CLOSE_LISTSRV_RPL
] = close_listsrv_rpl
,
4123 [CPL_PASS_ACCEPT_REQ
] = pass_accept_req
,
4124 [CPL_PASS_ESTABLISH
] = pass_establish
,
4125 [CPL_PEER_CLOSE
] = peer_close
,
4126 [CPL_ABORT_REQ_RSS
] = peer_abort
,
4127 [CPL_CLOSE_CON_RPL
] = close_con_rpl
,
4128 [CPL_RDMA_TERMINATE
] = terminate
,
4129 [CPL_FW4_ACK
] = fw4_ack
,
4130 [CPL_FW6_MSG
] = deferred_fw6_msg
,
4131 [CPL_RX_PKT
] = rx_pkt
,
4132 [FAKE_CPL_PUT_EP_SAFE
] = _put_ep_safe
,
4133 [FAKE_CPL_PASS_PUT_EP_SAFE
] = _put_pass_ep_safe
4136 static void process_timeout(struct c4iw_ep
*ep
)
4138 struct c4iw_qp_attributes attrs
;
4141 mutex_lock(&ep
->com
.mutex
);
4142 PDBG("%s ep %p tid %u state %d\n", __func__
, ep
, ep
->hwtid
,
4144 set_bit(TIMEDOUT
, &ep
->com
.history
);
4145 switch (ep
->com
.state
) {
4147 connect_reply_upcall(ep
, -ETIMEDOUT
);
4156 if (ep
->com
.cm_id
&& ep
->com
.qp
) {
4157 attrs
.next_state
= C4IW_QP_STATE_ERROR
;
4158 c4iw_modify_qp(ep
->com
.qp
->rhp
,
4159 ep
->com
.qp
, C4IW_QP_ATTR_NEXT_STATE
,
4162 close_complete_upcall(ep
, -ETIMEDOUT
);
4168 * These states are expected if the ep timed out at the same
4169 * time as another thread was calling stop_ep_timer().
4170 * So we silently do nothing for these states.
4175 WARN(1, "%s unexpected state ep %p tid %u state %u\n",
4176 __func__
, ep
, ep
->hwtid
, ep
->com
.state
);
4179 mutex_unlock(&ep
->com
.mutex
);
4181 c4iw_ep_disconnect(ep
, 1, GFP_KERNEL
);
4182 c4iw_put_ep(&ep
->com
);
4185 static void process_timedout_eps(void)
4189 spin_lock_irq(&timeout_lock
);
4190 while (!list_empty(&timeout_list
)) {
4191 struct list_head
*tmp
;
4193 tmp
= timeout_list
.next
;
4197 spin_unlock_irq(&timeout_lock
);
4198 ep
= list_entry(tmp
, struct c4iw_ep
, entry
);
4199 process_timeout(ep
);
4200 spin_lock_irq(&timeout_lock
);
4202 spin_unlock_irq(&timeout_lock
);
4205 static void process_work(struct work_struct
*work
)
4207 struct sk_buff
*skb
= NULL
;
4208 struct c4iw_dev
*dev
;
4209 struct cpl_act_establish
*rpl
;
4210 unsigned int opcode
;
4213 process_timedout_eps();
4214 while ((skb
= skb_dequeue(&rxq
))) {
4216 dev
= *((struct c4iw_dev
**) (skb
->cb
+ sizeof(void *)));
4217 opcode
= rpl
->ot
.opcode
;
4219 BUG_ON(!work_handlers
[opcode
]);
4220 ret
= work_handlers
[opcode
](dev
, skb
);
4223 process_timedout_eps();
4227 static DECLARE_WORK(skb_work
, process_work
);
4229 static void ep_timeout(unsigned long arg
)
4231 struct c4iw_ep
*ep
= (struct c4iw_ep
*)arg
;
4234 spin_lock(&timeout_lock
);
4235 if (!test_and_set_bit(TIMEOUT
, &ep
->com
.flags
)) {
4237 * Only insert if it is not already on the list.
4239 if (!ep
->entry
.next
) {
4240 list_add_tail(&ep
->entry
, &timeout_list
);
4244 spin_unlock(&timeout_lock
);
4246 queue_work(workq
, &skb_work
);
4250 * All the CM events are handled on a work queue to have a safe context.
4252 static int sched(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
4256 * Save dev in the skb->cb area.
4258 *((struct c4iw_dev
**) (skb
->cb
+ sizeof(void *))) = dev
;
4261 * Queue the skb and schedule the worker thread.
4263 skb_queue_tail(&rxq
, skb
);
4264 queue_work(workq
, &skb_work
);
4268 static int set_tcb_rpl(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
4270 struct cpl_set_tcb_rpl
*rpl
= cplhdr(skb
);
4272 if (rpl
->status
!= CPL_ERR_NONE
) {
4273 printk(KERN_ERR MOD
"Unexpected SET_TCB_RPL status %u "
4274 "for tid %u\n", rpl
->status
, GET_TID(rpl
));
4280 static int fw6_msg(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
4282 struct cpl_fw6_msg
*rpl
= cplhdr(skb
);
4283 struct c4iw_wr_wait
*wr_waitp
;
4286 PDBG("%s type %u\n", __func__
, rpl
->type
);
4288 switch (rpl
->type
) {
4289 case FW6_TYPE_WR_RPL
:
4290 ret
= (int)((be64_to_cpu(rpl
->data
[0]) >> 8) & 0xff);
4291 wr_waitp
= (struct c4iw_wr_wait
*)(__force
unsigned long) rpl
->data
[1];
4292 PDBG("%s wr_waitp %p ret %u\n", __func__
, wr_waitp
, ret
);
4294 c4iw_wake_up(wr_waitp
, ret
? -ret
: 0);
4298 case FW6_TYPE_OFLD_CONNECTION_WR_RPL
:
4302 printk(KERN_ERR MOD
"%s unexpected fw6 msg type %u\n", __func__
,
4310 static int peer_abort_intr(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
4312 struct cpl_abort_req_rss
*req
= cplhdr(skb
);
4314 unsigned int tid
= GET_TID(req
);
4316 ep
= get_ep_from_tid(dev
, tid
);
4317 /* This EP will be dereferenced in peer_abort() */
4319 printk(KERN_WARNING MOD
4320 "Abort on non-existent endpoint, tid %d\n", tid
);
4324 if (is_neg_adv(req
->status
)) {
4325 PDBG("%s Negative advice on abort- tid %u status %d (%s)\n",
4326 __func__
, ep
->hwtid
, req
->status
,
4327 neg_adv_str(req
->status
));
4330 PDBG("%s ep %p tid %u state %u\n", __func__
, ep
, ep
->hwtid
,
4333 c4iw_wake_up(&ep
->com
.wr_wait
, -ECONNRESET
);
4340 * Most upcalls from the T4 Core go to sched() to
4341 * schedule the processing on a work queue.
4343 c4iw_handler_func c4iw_handlers
[NUM_CPL_CMDS
] = {
4344 [CPL_ACT_ESTABLISH
] = sched
,
4345 [CPL_ACT_OPEN_RPL
] = sched
,
4346 [CPL_RX_DATA
] = sched
,
4347 [CPL_ABORT_RPL_RSS
] = sched
,
4348 [CPL_ABORT_RPL
] = sched
,
4349 [CPL_PASS_OPEN_RPL
] = sched
,
4350 [CPL_CLOSE_LISTSRV_RPL
] = sched
,
4351 [CPL_PASS_ACCEPT_REQ
] = sched
,
4352 [CPL_PASS_ESTABLISH
] = sched
,
4353 [CPL_PEER_CLOSE
] = sched
,
4354 [CPL_CLOSE_CON_RPL
] = sched
,
4355 [CPL_ABORT_REQ_RSS
] = peer_abort_intr
,
4356 [CPL_RDMA_TERMINATE
] = sched
,
4357 [CPL_FW4_ACK
] = sched
,
4358 [CPL_SET_TCB_RPL
] = set_tcb_rpl
,
4359 [CPL_FW6_MSG
] = fw6_msg
,
4360 [CPL_RX_PKT
] = sched
4363 int __init
c4iw_cm_init(void)
4365 spin_lock_init(&timeout_lock
);
4366 skb_queue_head_init(&rxq
);
4368 workq
= create_singlethread_workqueue("iw_cxgb4");
4375 void c4iw_cm_term(void)
4377 WARN_ON(!list_empty(&timeout_list
));
4378 flush_workqueue(workq
);
4379 destroy_workqueue(workq
);