]> git.ipfire.org Git - ipfire-2.x.git/blob - src/patches/suse-2.6.27.39/patches.drivers/cxgb3i-mainline.patch
Add a patch to fix Intel E100 wake-on-lan problems.
[ipfire-2.x.git] / src / patches / suse-2.6.27.39 / patches.drivers / cxgb3i-mainline.patch
1 Subject: fixes bug in tag release and sync-up cxgb3i with mainline state
2 From: Karen Xie <kxie@chelsio.com>
3 References: 464508 - LTC50816
4
5 This patch fixes the tag generation and release and also updates the tcp connection offload to sync-up with the current kernel.org version.
6
7 Signed-off-by: Karen Xie <kxie@chelsio.com>
8 Signed-off-by: Olaf Hering <olh@suse.de>
9
10 ---
11 drivers/scsi/cxgb3i/cxgb3i_init.c | 4
12 drivers/scsi/cxgb3i/cxgb3i_iscsi.c | 99 -
13 drivers/scsi/cxgb3i/cxgb3i_offload.c | 2615 +++++++++++++++--------------------
14 drivers/scsi/cxgb3i/cxgb3i_offload.h | 251 +--
15 drivers/scsi/cxgb3i/cxgb3i_ulp2.c | 25
16 drivers/scsi/cxgb3i/cxgb3i_ulp2.h | 23
17 6 files changed, 1429 insertions(+), 1588 deletions(-)
18
19 --- a/drivers/scsi/cxgb3i/cxgb3i_init.c
20 +++ b/drivers/scsi/cxgb3i/cxgb3i_init.c
21 @@ -12,7 +12,7 @@
22 #include "cxgb3i.h"
23
24 #define DRV_MODULE_NAME "cxgb3i"
25 -#define DRV_MODULE_VERSION "1.0.0"
26 +#define DRV_MODULE_VERSION "0.1.0"
27 #define DRV_MODULE_RELDATE "Jun. 1, 2008"
28
29 static char version[] =
30 @@ -48,7 +48,6 @@ static void open_s3_dev(struct t3cdev *t
31 vers_printed = 1;
32 }
33
34 - cxgb3i_log_debug("open cxgb3 %s.\n", t3dev->name);
35 cxgb3i_sdev_add(t3dev, &t3c_client);
36 cxgb3i_adapter_add(t3dev);
37 }
38 @@ -59,7 +58,6 @@ static void open_s3_dev(struct t3cdev *t
39 */
40 static void close_s3_dev(struct t3cdev *t3dev)
41 {
42 - cxgb3i_log_debug("close cxgb3 %s.\n", t3dev->name);
43 cxgb3i_adapter_remove(t3dev);
44 cxgb3i_sdev_remove(t3dev);
45 }
46 --- a/drivers/scsi/cxgb3i/cxgb3i_iscsi.c
47 +++ b/drivers/scsi/cxgb3i/cxgb3i_iscsi.c
48 @@ -24,6 +24,18 @@
49 #include "cxgb3i.h"
50 #include "cxgb3i_ulp2.h"
51
52 +#ifdef __DEBUG_CXGB3I_TAG__
53 +#define cxgb3i_tag_debug cxgb3i_log_debug
54 +#else
55 +#define cxgb3i_tag_debug(fmt...)
56 +#endif
57 +
58 +#ifdef __DEBUG_CXGB3I_API__
59 +#define cxgb3i_api_debug cxgb3i_log_debug
60 +#else
61 +#define cxgb3i_api_debug(fmt...)
62 +#endif
63 +
64 #define align_to_4k_boundary(n) \
65 do { \
66 n = (n) & ~((1 << 12) - 1); \
67 @@ -48,7 +60,7 @@ struct cxgb3i_adapter *cxgb3i_adapter_ad
68
69 snic = kzalloc(sizeof(*snic), GFP_KERNEL);
70 if (!snic) {
71 - cxgb3i_log_debug("cxgb3 %s, OOM.\n", t3dev->name);
72 + cxgb3i_api_debug("cxgb3 %s, OOM.\n", t3dev->name);
73 return NULL;
74 }
75
76 @@ -164,7 +176,7 @@ struct cxgb3i_hba *cxgb3i_hba_host_add(s
77 goto pci_dev_put;
78 }
79
80 - cxgb3i_log_debug("shost 0x%p, hba 0x%p, no %u.\n",
81 + cxgb3i_api_debug("shost 0x%p, hba 0x%p, no %u.\n",
82 shost, hba, shost->host_no);
83
84 return hba;
85 @@ -177,7 +189,7 @@ pci_dev_put:
86
87 void cxgb3i_hba_host_remove(struct cxgb3i_hba *hba)
88 {
89 - cxgb3i_log_debug("shost 0x%p, hba 0x%p, no %u.\n",
90 + cxgb3i_api_debug("shost 0x%p, hba 0x%p, no %u.\n",
91 hba->shost, hba, hba->shost->host_no);
92 iscsi_host_remove(hba->shost);
93 pci_dev_put(hba->snic->pdev);
94 @@ -218,7 +230,7 @@ static struct iscsi_endpoint *cxgb3i_ep_
95 cxgb3i_log_info("NOT going through cxgbi device.\n");
96 goto release_conn;
97 }
98 - if (c3cn_in_state(c3cn, C3CN_STATE_CLOSE)) {
99 + if (c3cn_is_closing(c3cn)) {
100 err = -ENOSPC;
101 cxgb3i_log_info("ep connect unable to connect.\n");
102 goto release_conn;
103 @@ -234,12 +246,12 @@ static struct iscsi_endpoint *cxgb3i_ep_
104 cep->c3cn = c3cn;
105 cep->hba = hba;
106
107 - cxgb3i_log_debug("ep 0x%p, 0x%p, c3cn 0x%p, hba 0x%p.\n",
108 + cxgb3i_api_debug("ep 0x%p, 0x%p, c3cn 0x%p, hba 0x%p.\n",
109 ep, cep, c3cn, hba);
110 return ep;
111
112 release_conn:
113 - cxgb3i_log_debug("conn 0x%p failed, release.\n", c3cn);
114 + cxgb3i_api_debug("conn 0x%p failed, release.\n", c3cn);
115 if (c3cn)
116 cxgb3i_c3cn_release(c3cn);
117 return ERR_PTR(err);
118 @@ -257,9 +269,9 @@ static int cxgb3i_ep_poll(struct iscsi_e
119 struct cxgb3i_endpoint *cep = ep->dd_data;
120 struct s3_conn *c3cn = cep->c3cn;
121
122 - if (!c3cn_in_state(c3cn, C3CN_STATE_ESTABLISHED))
123 + if (!c3cn_is_established(c3cn))
124 return 0;
125 - cxgb3i_log_debug("ep 0x%p, c3cn 0x%p established.\n", ep, c3cn);
126 + cxgb3i_api_debug("ep 0x%p, c3cn 0x%p established.\n", ep, c3cn);
127 return 1;
128 }
129
130 @@ -274,7 +286,7 @@ static void cxgb3i_ep_disconnect(struct
131 struct cxgb3i_endpoint *cep = ep->dd_data;
132 struct cxgb3i_conn *cconn = cep->cconn;
133
134 - cxgb3i_log_debug("ep 0x%p, cep 0x%p.\n", ep, cep);
135 + cxgb3i_api_debug("ep 0x%p, cep 0x%p.\n", ep, cep);
136
137 if (cconn && cconn->conn) {
138 struct iscsi_tcp_conn *tcp_conn = &cconn->tcp_conn;
139 @@ -291,7 +303,7 @@ static void cxgb3i_ep_disconnect(struct
140 write_unlock_bh(&cep->c3cn->callback_lock);
141 }
142
143 - cxgb3i_log_debug("ep 0x%p, cep 0x%p, release c3cn 0x%p.\n",
144 + cxgb3i_api_debug("ep 0x%p, cep 0x%p, release c3cn 0x%p.\n",
145 ep, cep, cep->c3cn);
146 cxgb3i_c3cn_release(cep->c3cn);
147 iscsi_destroy_endpoint(ep);
148 @@ -325,7 +337,7 @@ cxgb3i_session_create(struct iscsi_endpo
149 cep = ep->dd_data;
150 hba = cep->hba;
151 shost = hba->shost;
152 - cxgb3i_log_debug("ep 0x%p, cep 0x%p, hba 0x%p.\n", ep, cep, hba);
153 + cxgb3i_api_debug("ep 0x%p, cep 0x%p, hba 0x%p.\n", ep, cep, hba);
154 BUG_ON(hba != iscsi_host_priv(shost));
155
156 *host_no = shost->host_no;
157 @@ -364,7 +376,7 @@ remove_session:
158 */
159 static void cxgb3i_session_destroy(struct iscsi_cls_session *cls_session)
160 {
161 - cxgb3i_log_debug("sess 0x%p.\n", cls_session);
162 + cxgb3i_api_debug("sess 0x%p.\n", cls_session);
163 iscsi_r2tpool_free(cls_session->dd_data);
164 iscsi_session_teardown(cls_session);
165 }
166 @@ -380,10 +392,10 @@ static inline int cxgb3i_conn_max_xmit_d
167 {
168 struct cxgb3i_conn *cconn = conn->dd_data;
169 unsigned int max = min_t(unsigned int, ULP2_MAX_PDU_SIZE,
170 - cconn->hba->snic->tx_max_size -
171 - ISCSI_PDU_HEADER_MAX);
172 + cconn->hba->snic->tx_max_size -
173 + ISCSI_PDU_HEADER_MAX);
174
175 - cxgb3i_log_debug("conn 0x%p, max xmit %u.\n",
176 + cxgb3i_api_debug("conn 0x%p, max xmit %u.\n",
177 conn, conn->max_xmit_dlength);
178
179 if (conn->max_xmit_dlength)
180 @@ -394,7 +406,7 @@ static inline int cxgb3i_conn_max_xmit_d
181
182 align_to_4k_boundary(conn->max_xmit_dlength);
183
184 - cxgb3i_log_debug("conn 0x%p, set max xmit %u.\n",
185 + cxgb3i_api_debug("conn 0x%p, set max xmit %u.\n",
186 conn, conn->max_xmit_dlength);
187
188 return 0;
189 @@ -404,18 +416,18 @@ static inline int cxgb3i_conn_max_recv_d
190 {
191 struct cxgb3i_conn *cconn = conn->dd_data;
192 unsigned int max = min_t(unsigned int, ULP2_MAX_PDU_SIZE,
193 - cconn->hba->snic->rx_max_size -
194 - ISCSI_PDU_HEADER_MAX);
195 + cconn->hba->snic->rx_max_size -
196 + ISCSI_PDU_HEADER_MAX);
197
198 - cxgb3i_log_debug("conn 0x%p, max recv %u.\n",
199 + cxgb3i_api_debug("conn 0x%p, max recv %u.\n",
200 conn, conn->max_recv_dlength);
201
202 align_to_4k_boundary(max);
203
204 if (conn->max_recv_dlength) {
205 if (conn->max_recv_dlength > max) {
206 - cxgb3i_log_error("MaxRecvDataSegmentLength %u, not supported."
207 - "Need to be <= %u.\n",
208 + cxgb3i_log_error("MaxRecvDataSegmentLength %u, not "
209 + "supported. Need to be <= %u.\n",
210 conn->max_recv_dlength, max);
211 return -EINVAL;
212 }
213 @@ -425,7 +437,7 @@ static inline int cxgb3i_conn_max_recv_d
214 } else
215 conn->max_recv_dlength = max;
216
217 - cxgb3i_log_debug("conn 0x%p, set max recv %u.\n",
218 + cxgb3i_api_debug("conn 0x%p, set max recv %u.\n",
219 conn, conn->max_recv_dlength);
220
221 return 0;
222 @@ -438,7 +450,7 @@ static struct iscsi_cls_conn *cxgb3i_con
223 struct iscsi_conn *conn;
224 struct cxgb3i_conn *cconn;
225
226 - cxgb3i_log_debug("sess 0x%p, cid %u.\n", cls_session, cid);
227 + cxgb3i_api_debug("sess 0x%p, cid %u.\n", cls_session, cid);
228
229 cls_conn = iscsi_conn_setup(cls_session, sizeof(*cconn), cid);
230 if (!cls_conn)
231 @@ -495,7 +507,7 @@ static int cxgb3i_conn_bind(struct iscsi
232 if (!ep)
233 return -EINVAL;
234
235 - cxgb3i_log_debug("ep 0x%p, cls sess 0x%p, cls conn 0x%p.\n",
236 + cxgb3i_api_debug("ep 0x%p, cls sess 0x%p, cls conn 0x%p.\n",
237 ep, cls_session, cls_conn);
238
239 err = iscsi_conn_bind(cls_session, cls_conn, is_leading);
240 @@ -544,7 +556,7 @@ static int cxgb3i_conn_get_param(struct
241 struct iscsi_conn *conn = cls_conn->dd_data;
242 int len;
243
244 - cxgb3i_log_debug("cls_conn 0x%p, param %d.\n", cls_conn, param);
245 + cxgb3i_api_debug("cls_conn 0x%p, param %d.\n", cls_conn, param);
246
247 switch (param) {
248 case ISCSI_PARAM_CONN_PORT:
249 @@ -621,15 +633,22 @@ static int cxgb3i_host_set_param(struct
250 {
251 struct cxgb3i_hba *hba = iscsi_host_priv(shost);
252
253 - cxgb3i_log_debug("param %d, buf %s.\n", param, buf);
254 + cxgb3i_api_debug("param %d, buf %s.\n", param, buf);
255
256 - if (hba && param == ISCSI_HOST_PARAM_IPADDRESS) {
257 + switch (param) {
258 + case ISCSI_HOST_PARAM_IPADDRESS:
259 + {
260 __be32 addr = in_aton(buf);
261 cxgb3i_set_private_ipv4addr(hba->ndev, addr);
262 return 0;
263 }
264 -
265 - return iscsi_host_get_param(shost, param, buf);
266 + case ISCSI_HOST_PARAM_HWADDRESS:
267 + case ISCSI_HOST_PARAM_NETDEV_NAME:
268 + /* ignore */
269 + return 0;
270 + default:
271 + return iscsi_host_set_param(shost, param, buf, buflen);
272 + }
273 }
274
275 /**
276 @@ -645,7 +664,7 @@ static int cxgb3i_host_get_param(struct
277 int i;
278 int len = 0;
279
280 - cxgb3i_log_debug("hba %s, param %d.\n", hba->ndev->name, param);
281 + cxgb3i_api_debug("hba %s, param %d.\n", hba->ndev->name, param);
282
283 switch (param) {
284 case ISCSI_HOST_PARAM_HWADDRESS:
285 @@ -720,6 +739,10 @@ static inline void cxgb3i_parse_tag(stru
286 << format->rsvd_shift;
287 *sw_bits |= tag & ((1 << format->rsvd_shift) - 1);
288 }
289 +
290 + cxgb3i_tag_debug("parse tag 0x%x, rsvd 0x%x, sw 0x%x.\n",
291 + tag, rsvd_bits ? *rsvd_bits : 0xFFFFFFFF,
292 + sw_bits ? *sw_bits : 0xFFFFFFFF);
293 }
294
295
296 @@ -735,6 +758,9 @@ static void cxgb3i_parse_itt(struct iscs
297 *idx = sw_bits & ISCSI_ITT_MASK;
298 if (age)
299 *age = (sw_bits >> snic->tag_format.idx_bits) & ISCSI_AGE_MASK;
300 +
301 + cxgb3i_tag_debug("parse itt 0x%x, idx 0x%x, age 0x%x.\n",
302 + itt, idx ? *idx : 0xFFFFF, age ? *age : 0xFF);
303 }
304
305 static int cxgb3i_reserve_itt(struct iscsi_task *task, itt_t *hdr_itt)
306 @@ -762,6 +788,9 @@ static int cxgb3i_reserve_itt(struct isc
307 snic->tag_format.rsvd_shift);
308 *hdr_itt = htonl(tag);
309
310 + cxgb3i_tag_debug("new tag 0x%x/0x%x (itt 0x%x, age 0x%x).\n",
311 + tag, *hdr_itt, task->itt, sess->age);
312 +
313 return 0;
314 }
315
316 @@ -771,10 +800,12 @@ static void cxgb3i_release_itt(struct is
317 struct iscsi_conn *conn = task->conn;
318 struct cxgb3i_conn *cconn = conn->dd_data;
319 struct cxgb3i_adapter *snic = cconn->hba->snic;
320 + u32 tag = ntohl(hdr_itt);
321 +
322 + cxgb3i_tag_debug("release tag 0x%x.\n", tag);
323
324 - hdr_itt = ntohl(hdr_itt);
325 if (sc && (sc->sc_data_direction == DMA_FROM_DEVICE))
326 - cxgb3i_ddp_tag_release(snic, hdr_itt,
327 + cxgb3i_ddp_tag_release(snic, tag,
328 scsi_in(sc)->table.sgl,
329 scsi_in(sc)->table.nents);
330 }
331 @@ -871,14 +902,14 @@ int cxgb3i_iscsi_init(void)
332 cxgb3i_log_error("Could not register cxgb3i transport.\n");
333 return -ENODEV;
334 }
335 - cxgb3i_log_debug("cxgb3i transport 0x%p.\n", cxgb3i_scsi_transport);
336 + cxgb3i_api_debug("cxgb3i transport 0x%p.\n", cxgb3i_scsi_transport);
337 return 0;
338 }
339
340 void cxgb3i_iscsi_cleanup(void)
341 {
342 if (cxgb3i_scsi_transport) {
343 - cxgb3i_log_debug("cxgb3i transport 0x%p.\n",
344 + cxgb3i_api_debug("cxgb3i transport 0x%p.\n",
345 cxgb3i_scsi_transport);
346 iscsi_unregister_transport(&cxgb3i_iscsi_transport);
347 }
348 --- a/drivers/scsi/cxgb3i/cxgb3i_offload.c
349 +++ b/drivers/scsi/cxgb3i/cxgb3i_offload.c
350 @@ -1,12 +1,15 @@
351 /*
352 - * Copyright (C) 2003-2008 Chelsio Communications. All rights reserved.
353 + * cxgb3i_offload.c: Chelsio S3xx iscsi offloaded tcp connection management
354 *
355 - * Written by Dimitris Michailidis (dm@chelsio.com)
356 + * Copyright (C) 2003-2008 Chelsio Communications. All rights reserved.
357 *
358 * This program is distributed in the hope that it will be useful, but WITHOUT
359 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
360 * FITNESS FOR A PARTICULAR PURPOSE. See the LICENSE file included in this
361 * release for licensing terms and conditions.
362 + *
363 + * Written by: Dimitris Michailidis (dm@chelsio.com)
364 + * Karen Xie (kxie@chelsio.com)
365 */
366
367 #include <linux/if_vlan.h>
368 @@ -18,6 +21,27 @@
369 #include "cxgb3i_offload.h"
370 #include "cxgb3i_ulp2.h"
371
372 +#ifdef __DEBUG_C3CN_CONN__
373 +#define c3cn_conn_debug cxgb3i_log_debug
374 +#else
375 +#define c3cn_conn_debug(fmt...)
376 +#endif
377 +
378 +#ifdef __DEBUG_C3CN_TX__
379 +#define c3cn_tx_debug cxgb3i_log_debug
380 +#else
381 +#define c3cn_tx_debug(fmt...)
382 +#endif
383 +
384 +#ifdef __DEBUG_C3CN_RX__
385 +#define c3cn_rx_debug cxgb3i_log_debug
386 +#else
387 +#define c3cn_rx_debug(fmt...)
388 +#endif
389 +
390 +/*
391 + * module parameters releated to offloaded iscsi connection
392 + */
393 static int cxgb3_rcv_win = 256 * 1024;
394 module_param(cxgb3_rcv_win, int, 0644);
395 MODULE_PARM_DESC(cxgb3_rcv_win, "TCP receive window in bytes (default=256KB)");
396 @@ -39,30 +63,91 @@ static unsigned int cxgb3_sport_base = 2
397 module_param(cxgb3_sport_base, uint, 0644);
398 MODULE_PARM_DESC(cxgb3_sport_base, "starting port number (default=20000)");
399
400 -#ifdef __DEBUG_C3CN_TX__
401 -#define c3cn_tx_debug cxgb3i_log_debug
402 -#else
403 -#define c3cn_tx_debug(fmt...)
404 -#endif
405 +/*
406 + * cxgb3i tcp connection data(per adapter) list
407 + */
408 +static LIST_HEAD(cdata_list);
409 +static DEFINE_RWLOCK(cdata_rwlock);
410
411 -#ifdef __DEBUG_C3CN_RX__
412 -#define c3cn_rx_debug cxgb3i_log_debug
413 -#else
414 -#define c3cn_rx_debug(fmt...)
415 -#endif
416 +static int c3cn_push_tx_frames(struct s3_conn *c3cn, int req_completion);
417 +static void c3cn_release_offload_resources(struct s3_conn *c3cn);
418 +
419 +/*
420 + * iscsi source port management
421 + *
422 + * Find a free source port in the port allocation map. We use a very simple
423 + * rotor scheme to look for the next free port.
424 + *
425 + * If a source port has been specified make sure that it doesn't collide with
426 + * our normal source port allocation map. If it's outside the range of our
427 + * allocation/deallocation scheme just let them use it.
428 + *
429 + * If the source port is outside our allocation range, the caller is
430 + * responsible for keeping track of their port usage.
431 + */
432 +static int c3cn_get_port(struct s3_conn *c3cn, struct cxgb3i_sdev_data *cdata)
433 +{
434 + unsigned int start;
435 + int idx;
436 +
437 + if (!cdata)
438 + goto error_out;
439 +
440 + if (c3cn->saddr.sin_port != 0) {
441 + idx = ntohs(c3cn->saddr.sin_port) - cxgb3_sport_base;
442 + if (idx < 0 || idx >= cxgb3_max_connect)
443 + return 0;
444 + if (!test_and_set_bit(idx, cdata->sport_map))
445 + return -EADDRINUSE;
446 + }
447 +
448 + /* the sport_map_next may not be accurate but that is okay, sport_map
449 + should be */
450 + start = idx = cdata->sport_map_next;
451 + do {
452 + if (++idx >= cxgb3_max_connect)
453 + idx = 0;
454 + if (!(test_and_set_bit(idx, cdata->sport_map))) {
455 + c3cn->saddr.sin_port = htons(cxgb3_sport_base + idx);
456 + cdata->sport_map_next = idx;
457 + c3cn_conn_debug("%s reserve port %u.\n",
458 + cdata->cdev->name,
459 + cxgb3_sport_base + idx);
460 + return 0;
461 + }
462 + } while (idx != start);
463 +
464 +error_out:
465 + return -EADDRNOTAVAIL;
466 +}
467 +
468 +static void c3cn_put_port(struct s3_conn *c3cn)
469 +{
470 + struct cxgb3i_sdev_data *cdata = CXGB3_SDEV_DATA(c3cn->cdev);
471 +
472 + if (c3cn->saddr.sin_port) {
473 + int idx = ntohs(c3cn->saddr.sin_port) - cxgb3_sport_base;
474 +
475 + c3cn->saddr.sin_port = 0;
476 + if (idx < 0 || idx >= cxgb3_max_connect)
477 + return;
478 + clear_bit(idx, cdata->sport_map);
479 + c3cn_conn_debug("%s, release port %u.\n",
480 + cdata->cdev->name, cxgb3_sport_base + idx);
481 + }
482 +}
483
484 -/* connection flags */
485 static inline void c3cn_set_flag(struct s3_conn *c3cn, enum c3cn_flags flag)
486 {
487 __set_bit(flag, &c3cn->flags);
488 - c3cn_conn_debug("c3cn 0x%p, set %d, s 0x%x, f 0x%lx.\n",
489 + c3cn_conn_debug("c3cn 0x%p, set %d, s %u, f 0x%lx.\n",
490 c3cn, flag, c3cn->state, c3cn->flags);
491 }
492
493 -static inline void c3cn_reset_flag(struct s3_conn *c3cn, enum c3cn_flags flag)
494 +static inline void c3cn_clear_flag(struct s3_conn *c3cn, enum c3cn_flags flag)
495 {
496 __clear_bit(flag, &c3cn->flags);
497 - c3cn_conn_debug("c3cn 0x%p, clear %d, s 0x%x, f 0x%lx.\n",
498 + c3cn_conn_debug("c3cn 0x%p, clear %d, s %u, f 0x%lx.\n",
499 c3cn, flag, c3cn->state, c3cn->flags);
500 }
501
502 @@ -73,14 +158,12 @@ static inline int c3cn_flag(struct s3_co
503 return test_bit(flag, &c3cn->flags);
504 }
505
506 -/* connection state */
507 static void c3cn_set_state(struct s3_conn *c3cn, int state)
508 {
509 - c3cn_conn_debug("c3cn 0x%p state -> 0x%x.\n", c3cn, state);
510 + c3cn_conn_debug("c3cn 0x%p state -> %u.\n", c3cn, state);
511 c3cn->state = state;
512 }
513
514 -/* connection reference count */
515 static inline void c3cn_hold(struct s3_conn *c3cn)
516 {
517 atomic_inc(&c3cn->refcnt);
518 @@ -89,432 +172,316 @@ static inline void c3cn_hold(struct s3_c
519 static inline void c3cn_put(struct s3_conn *c3cn)
520 {
521 if (atomic_dec_and_test(&c3cn->refcnt)) {
522 - c3cn_conn_debug("free c3cn 0x%p, 0x%x, 0x%lx.\n",
523 + c3cn_conn_debug("free c3cn 0x%p, s %u, f 0x%lx.\n",
524 c3cn, c3cn->state, c3cn->flags);
525 kfree(c3cn);
526 }
527 }
528
529 -/* minimal port allocation management scheme */
530 -static spinlock_t sport_map_lock;
531 -static unsigned int sport_map_next;
532 -static unsigned long *sport_map;
533 +static void c3cn_closed(struct s3_conn *c3cn)
534 +{
535 + c3cn_conn_debug("c3cn 0x%p, state %u, flag 0x%lx.\n",
536 + c3cn, c3cn->state, c3cn->flags);
537 +
538 + c3cn_put_port(c3cn);
539 + c3cn_release_offload_resources(c3cn);
540 + c3cn_set_state(c3cn, C3CN_STATE_CLOSED);
541 + cxgb3i_conn_closing(c3cn);
542 +}
543 +
544 +/*
545 + * CPL (Chelsio Protocol Language) defines a message passing interface between
546 + * the host driver and T3 asic.
547 + * The section below implments CPLs that related to iscsi tcp connection
548 + * open/close/abort and data send/receive.
549 + */
550
551 /*
552 - * Find a free source port in our allocation map. We use a very simple rotor
553 - * scheme to look for the next free port.
554 - *
555 - * If a source port has been specified make sure that it doesn't collide with
556 - * our normal source port allocation map. If it's outside the range of our
557 - * allocation scheme just let them use it.
558 + * CPL connection active open request: host ->
559 */
560 -static int c3cn_get_port(struct s3_conn *c3cn)
561 +static unsigned int find_best_mtu(const struct t3c_data *d, unsigned short mtu)
562 {
563 - unsigned int start;
564 + int i = 0;
565
566 - if (!sport_map)
567 - goto error_out;
568 + while (i < d->nmtus - 1 && d->mtus[i + 1] <= mtu)
569 + ++i;
570 + return i;
571 +}
572
573 - if (c3cn->saddr.sin_port != 0) {
574 - int sport = ntohs(c3cn->saddr.sin_port) - cxgb3_sport_base;
575 - int err = 0;
576 +static unsigned int select_mss(struct s3_conn *c3cn, unsigned int pmtu)
577 +{
578 + unsigned int idx;
579 + struct dst_entry *dst = c3cn->dst_cache;
580 + struct t3cdev *cdev = c3cn->cdev;
581 + const struct t3c_data *td = T3C_DATA(cdev);
582 + u16 advmss = dst_metric(dst, RTAX_ADVMSS);
583
584 - if (sport < 0 || sport >= cxgb3_max_connect)
585 - return 0;
586 - spin_lock(&sport_map_lock);
587 - err = __test_and_set_bit(sport, sport_map);
588 - spin_unlock(&sport_map_lock);
589 - return err ? -EADDRINUSE : 0;
590 - }
591 + if (advmss > pmtu - 40)
592 + advmss = pmtu - 40;
593 + if (advmss < td->mtus[0] - 40)
594 + advmss = td->mtus[0] - 40;
595 + idx = find_best_mtu(td, advmss + 40);
596 + return idx;
597 +}
598
599 - spin_lock(&sport_map_lock);
600 - start = sport_map_next;
601 - do {
602 - unsigned int new = sport_map_next;
603 - if (++sport_map_next >= cxgb3_max_connect)
604 - sport_map_next = 0;
605 - if (!(__test_and_set_bit(new, sport_map))) {
606 - spin_unlock(&sport_map_lock);
607 - c3cn_conn_debug("reserve port %u.\n",
608 - cxgb3_sport_base + new);
609 - c3cn->saddr.sin_port = htons(cxgb3_sport_base + new);
610 - return 0;
611 - }
612 - } while (sport_map_next != start);
613 - spin_unlock(&sport_map_lock);
614 +static inline int compute_wscale(int win)
615 +{
616 + int wscale = 0;
617 + while (wscale < 14 && (65535<<wscale) < win)
618 + wscale++;
619 + return wscale;
620 +}
621
622 -error_out:
623 - return -EADDRNOTAVAIL;
624 +static inline unsigned int calc_opt0h(struct s3_conn *c3cn)
625 +{
626 + int wscale = compute_wscale(cxgb3_rcv_win);
627 + return V_KEEP_ALIVE(1) |
628 + F_TCAM_BYPASS |
629 + V_WND_SCALE(wscale) |
630 + V_MSS_IDX(c3cn->mss_idx);
631 }
632
633 -/*
634 - * Deallocate a source port from the allocation map. If the source port is
635 - * outside our allocation range just return -- the caller is responsible for
636 - * keeping track of their port usage outside of our allocation map.
637 - */
638 -static void c3cn_put_port(struct s3_conn *c3cn)
639 +static inline unsigned int calc_opt0l(struct s3_conn *c3cn)
640 {
641 - if (c3cn->saddr.sin_port) {
642 - int old = ntohs(c3cn->saddr.sin_port) - cxgb3_sport_base;
643 - c3cn->saddr.sin_port = 0;
644 + return V_ULP_MODE(ULP_MODE_ISCSI) |
645 + V_RCV_BUFSIZ(cxgb3_rcv_win>>10);
646 +}
647
648 - if (old < 0 || old >= cxgb3_max_connect)
649 - return;
650 +static void make_act_open_req(struct s3_conn *c3cn, struct sk_buff *skb,
651 + unsigned int atid, const struct l2t_entry *e)
652 +{
653 + struct cpl_act_open_req *req;
654
655 - c3cn_conn_debug("release port %u.\n", cxgb3_sport_base + old);
656 - spin_lock(&sport_map_lock);
657 - __clear_bit(old, sport_map);
658 - spin_unlock(&sport_map_lock);
659 - }
660 + c3cn_conn_debug("c3cn 0x%p, atid 0x%x.\n", c3cn, atid);
661 +
662 + skb->priority = CPL_PRIORITY_SETUP;
663 + req = (struct cpl_act_open_req *)__skb_put(skb, sizeof(*req));
664 + req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
665 + OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_ACT_OPEN_REQ, atid));
666 + req->local_port = c3cn->saddr.sin_port;
667 + req->peer_port = c3cn->daddr.sin_port;
668 + req->local_ip = c3cn->saddr.sin_addr.s_addr;
669 + req->peer_ip = c3cn->daddr.sin_addr.s_addr;
670 + req->opt0h = htonl(calc_opt0h(c3cn) | V_L2T_IDX(e->idx) |
671 + V_TX_CHANNEL(e->smt_idx));
672 + req->opt0l = htonl(calc_opt0l(c3cn));
673 + req->params = 0;
674 }
675
676 -static void c3cn_reset_timer(struct s3_conn *c3cn, struct timer_list *timer,
677 - unsigned long expires)
678 +static void fail_act_open(struct s3_conn *c3cn, int errno)
679 {
680 - if (!mod_timer(timer, expires))
681 - c3cn_hold(c3cn);
682 + c3cn_conn_debug("c3cn 0x%p, state %u, flag 0x%lx.\n",
683 + c3cn, c3cn->state, c3cn->flags);
684 + c3cn->err = errno;
685 + c3cn_closed(c3cn);
686 }
687
688 -typedef int (cxgb3_cpl_handler_decl) (struct t3cdev *,
689 - struct sk_buff *, void *);
690 -
691 -static cxgb3_cpl_handler_decl do_act_establish;
692 -static cxgb3_cpl_handler_decl do_act_open_rpl;
693 -static cxgb3_cpl_handler_decl do_wr_ack;
694 -static cxgb3_cpl_handler_decl do_peer_close;
695 -static cxgb3_cpl_handler_decl do_abort_req;
696 -static cxgb3_cpl_handler_decl do_abort_rpl;
697 -static cxgb3_cpl_handler_decl do_close_con_rpl;
698 -static cxgb3_cpl_handler_decl do_iscsi_hdr;
699 +static void act_open_req_arp_failure(struct t3cdev *dev, struct sk_buff *skb)
700 +{
701 + struct s3_conn *c3cn = (struct s3_conn *)skb->sk;
702
703 -static LIST_HEAD(cxgb3_list);
704 -static DEFINE_MUTEX(cxgb3_list_lock);
705 + c3cn_conn_debug("c3cn 0x%p, state %u.\n", c3cn, c3cn->state);
706
707 -/*
708 - * For ULP connections HW may inserts digest bytes into the pdu. This array
709 - * contains the compensating extra lengths for ULP packets. It is indexed by
710 - * a packet's ULP submode.
711 - */
712 -static const unsigned int cxgb3_ulp_extra_len[] = { 0, 4, 4, 8 };
713 + c3cn_hold(c3cn);
714 + spin_lock_bh(&c3cn->lock);
715 + if (c3cn->state == C3CN_STATE_CONNECTING)
716 + fail_act_open(c3cn, EHOSTUNREACH);
717 + spin_unlock_bh(&c3cn->lock);
718 + c3cn_put(c3cn);
719 + __kfree_skb(skb);
720 +}
721
722 /*
723 - * Return the length of any HW additions that will be made to a Tx packet.
724 - * Such additions can happen for some types of ULP packets.
725 + * CPL connection close request: host ->
726 + *
727 + * Close a connection by sending a CPL_CLOSE_CON_REQ message and queue it to
728 + * the write queue (i.e., after any unsent txt data).
729 */
730 -static inline unsigned int ulp_extra_len(const struct sk_buff *skb)
731 +static void skb_entail(struct s3_conn *c3cn, struct sk_buff *skb,
732 + int flags)
733 {
734 - return cxgb3_ulp_extra_len[skb_ulp_mode(skb) & 3];
735 + CXGB3_SKB_CB(skb)->seq = c3cn->write_seq;
736 + CXGB3_SKB_CB(skb)->flags = flags;
737 + __skb_queue_tail(&c3cn->write_queue, skb);
738 }
739
740 -/*
741 - * Size of WRs in bytes. Note that we assume all devices we are handling have
742 - * the same WR size.
743 - */
744 -static unsigned int wrlen __read_mostly;
745 -
746 -/*
747 - * The number of WRs needed for an skb depends on the number of page fragments
748 - * in the skb and whether it has any payload in its main body. This maps the
749 - * length of the gather list represented by an skb into the # of necessary WRs.
750 - */
751 -#define SKB_WR_LIST_SIZE (16384/512 + 1)
752 -static unsigned int skb_wrs[SKB_WR_LIST_SIZE + 2] __read_mostly;
753 -
754 -static void s3_init_wr_tab(unsigned int wr_len)
755 +static void send_close_req(struct s3_conn *c3cn)
756 {
757 - int i;
758 + struct sk_buff *skb = c3cn->cpl_close;
759 + struct cpl_close_con_req *req = (struct cpl_close_con_req *)skb->head;
760 + unsigned int tid = c3cn->tid;
761
762 - if (skb_wrs[1]) /* already initialized */
763 - return;
764 + c3cn_conn_debug("c3cn 0x%p, state 0x%x, flag 0x%lx.\n",
765 + c3cn, c3cn->state, c3cn->flags);
766
767 - for (i = 1; i < SKB_WR_LIST_SIZE; i++) {
768 - int sgl_len = (3 * i) / 2 + (i & 1);
769 + c3cn->cpl_close = NULL;
770
771 - sgl_len += 3;
772 - skb_wrs[i] = (sgl_len <= wr_len
773 - ? 1 : 1 + (sgl_len - 2) / (wr_len - 1));
774 - }
775 + req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_CLOSE_CON));
776 + req->wr.wr_lo = htonl(V_WR_TID(tid));
777 + OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_CON_REQ, tid));
778 + req->rsvd = htonl(c3cn->write_seq);
779
780 - wrlen = wr_len * 8;
781 + skb_entail(c3cn, skb, C3CB_FLAG_NO_APPEND);
782 + if (c3cn->state != C3CN_STATE_CONNECTING)
783 + c3cn_push_tx_frames(c3cn, 1);
784 }
785
786 /*
787 - * cxgb3i API operations.
788 - */
789 -/*
790 - * large memory chunk allocation/release
791 + * CPL connection abort request: host ->
792 + *
793 + * Send an ABORT_REQ message. Makes sure we do not send multiple ABORT_REQs
794 + * for the same connection and also that we do not try to send a message
795 + * after the connection has closed.
796 */
797 -void *cxgb3i_alloc_big_mem(unsigned int size)
798 +static void abort_arp_failure(struct t3cdev *cdev, struct sk_buff *skb)
799 {
800 - void *p = kmalloc(size, GFP_KERNEL);
801 - if (!p)
802 - p = vmalloc(size);
803 - if (p)
804 - memset(p, 0, size);
805 - return p;
806 -}
807 + struct cpl_abort_req *req = cplhdr(skb);
808
809 -void cxgb3i_free_big_mem(void *addr)
810 -{
811 - if (is_vmalloc_addr(addr))
812 - vfree(addr);
813 - else
814 - kfree(addr);
815 -}
816 + c3cn_conn_debug("tdev 0x%p.\n", cdev);
817
818 -void cxgb3i_sdev_cleanup(void)
819 -{
820 - if (sport_map)
821 - cxgb3i_free_big_mem(sport_map);
822 + req->cmd = CPL_ABORT_NO_RST;
823 + cxgb3_ofld_send(cdev, skb);
824 }
825
826 -int cxgb3i_sdev_init(cxgb3_cpl_handler_func *cpl_handlers)
827 +static inline void c3cn_purge_write_queue(struct s3_conn *c3cn)
828 {
829 - cpl_handlers[CPL_ACT_ESTABLISH] = do_act_establish;
830 - cpl_handlers[CPL_ACT_OPEN_RPL] = do_act_open_rpl;
831 - cpl_handlers[CPL_PEER_CLOSE] = do_peer_close;
832 - cpl_handlers[CPL_ABORT_REQ_RSS] = do_abort_req;
833 - cpl_handlers[CPL_ABORT_RPL_RSS] = do_abort_rpl;
834 - cpl_handlers[CPL_CLOSE_CON_RPL] = do_close_con_rpl;
835 - cpl_handlers[CPL_TX_DMA_ACK] = do_wr_ack;
836 - cpl_handlers[CPL_ISCSI_HDR] = do_iscsi_hdr;
837 + struct sk_buff *skb;
838
839 - if (cxgb3_max_connect > CXGB3I_MAX_CONN)
840 - cxgb3_max_connect = CXGB3I_MAX_CONN;
841 - sport_map = cxgb3i_alloc_big_mem(DIV_ROUND_UP(cxgb3_max_connect,
842 - 8 *
843 - sizeof(unsigned long)));
844 - if (!sport_map)
845 - return -ENOMEM;
846 - return 0;
847 + while ((skb = __skb_dequeue(&c3cn->write_queue)))
848 + __kfree_skb(skb);
849 }
850
851 -void cxgb3i_sdev_add(struct t3cdev *cdev, struct cxgb3_client *client)
852 +static void send_abort_req(struct s3_conn *c3cn)
853 {
854 - struct cxgb3i_sdev_data *cdata;
855 - struct adap_ports *ports;
856 - struct ofld_page_info rx_page_info;
857 - unsigned int wr_len;
858 - int i;
859 + struct sk_buff *skb = c3cn->cpl_abort_req;
860 + struct cpl_abort_req *req;
861 + unsigned int tid = c3cn->tid;
862
863 - cdata = kzalloc(sizeof *cdata, GFP_KERNEL);
864 - if (!cdata)
865 + if (unlikely(c3cn->state == C3CN_STATE_ABORTING) || !skb ||
866 + !c3cn->cdev)
867 return;
868 - ports = kzalloc(sizeof *ports, GFP_KERNEL);
869 - if (!ports)
870 - goto free_ports;
871 - cdata->ports = ports;
872 -
873 - if (cdev->ctl(cdev, GET_WR_LEN, &wr_len) < 0 ||
874 - cdev->ctl(cdev, GET_PORTS, cdata->ports) < 0 ||
875 - cdev->ctl(cdev, GET_RX_PAGE_INFO, &rx_page_info) < 0)
876 - goto free_ports;
877
878 - s3_init_wr_tab(wr_len);
879 + c3cn_set_state(c3cn, C3CN_STATE_ABORTING);
880
881 - INIT_LIST_HEAD(&cdata->list);
882 - cdata->cdev = cdev;
883 - cdata->client = client;
884 - cdata->rx_page_size = rx_page_info.page_size;
885 - skb_queue_head_init(&cdata->deferq);
886 + c3cn_conn_debug("c3cn 0x%p, flag ABORT_RPL + ABORT_SHUT.\n", c3cn);
887
888 - for (i = 0; i < ports->nports; i++)
889 - NDEV2CDATA(ports->lldevs[i]) = cdata;
890 + c3cn_set_flag(c3cn, C3CN_ABORT_RPL_PENDING);
891
892 - mutex_lock(&cxgb3_list_lock);
893 - list_add_tail(&cdata->list, &cxgb3_list);
894 - mutex_unlock(&cxgb3_list_lock);
895 + /* Purge the send queue so we don't send anything after an abort. */
896 + c3cn_purge_write_queue(c3cn);
897
898 - return;
899 + c3cn->cpl_abort_req = NULL;
900 + req = (struct cpl_abort_req *)skb->head;
901
902 -free_ports:
903 - kfree(ports);
904 - kfree(cdata);
905 -}
906 + skb->priority = CPL_PRIORITY_DATA;
907 + set_arp_failure_handler(skb, abort_arp_failure);
908
909 -void cxgb3i_sdev_remove(struct t3cdev *cdev)
910 -{
911 - struct cxgb3i_sdev_data *cdata = CXGB3_SDEV_DATA(cdev);
912 - struct adap_ports *ports = cdata->ports;
913 - int i;
914 -
915 - for (i = 0; i < ports->nports; i++)
916 - NDEV2CDATA(ports->lldevs[i]) = NULL;
917 -
918 - mutex_lock(&cxgb3_list_lock);
919 - list_del(&cdata->list);
920 - mutex_unlock(&cxgb3_list_lock);
921 + req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_REQ));
922 + req->wr.wr_lo = htonl(V_WR_TID(tid));
923 + OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_ABORT_REQ, tid));
924 + req->rsvd0 = htonl(c3cn->snd_nxt);
925 + req->rsvd1 = !c3cn_flag(c3cn, C3CN_TX_DATA_SENT);
926 + req->cmd = CPL_ABORT_SEND_RST;
927
928 - kfree(ports);
929 - kfree(cdata);
930 + l2t_send(c3cn->cdev, skb, c3cn->l2t);
931 }
932
933 /*
934 - * Return TRUE if the specified net device is for a port on one of our
935 - * registered adapters.
936 + * CPL connection abort reply: host ->
937 + *
938 + * Send an ABORT_RPL message in response of the ABORT_REQ received.
939 */
940 -static int is_cxgb3_dev(struct net_device *dev)
941 +static void send_abort_rpl(struct s3_conn *c3cn, int rst_status)
942 {
943 - struct cxgb3i_sdev_data *cdata;
944 + struct sk_buff *skb = c3cn->cpl_abort_rpl;
945 + struct cpl_abort_rpl *rpl = (struct cpl_abort_rpl *)skb->head;
946
947 - mutex_lock(&cxgb3_list_lock);
948 - list_for_each_entry(cdata, &cxgb3_list, list) {
949 - struct adap_ports *ports = cdata->ports;
950 - int i;
951 + c3cn->cpl_abort_rpl = NULL;
952
953 - for (i = 0; i < ports->nports; i++)
954 - if (dev == ports->lldevs[i]) {
955 - mutex_unlock(&cxgb3_list_lock);
956 - return 1;
957 - }
958 - }
959 - mutex_unlock(&cxgb3_list_lock);
960 - return 0;
961 + skb->priority = CPL_PRIORITY_DATA;
962 + rpl->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_RPL));
963 + rpl->wr.wr_lo = htonl(V_WR_TID(c3cn->tid));
964 + OPCODE_TID(rpl) = htonl(MK_OPCODE_TID(CPL_ABORT_RPL, c3cn->tid));
965 + rpl->cmd = rst_status;
966 +
967 + cxgb3_ofld_send(c3cn->cdev, skb);
968 }
969
970 /*
971 - * Primary cxgb3 API operations.
972 - * =============================
973 + * CPL connection rx data ack: host ->
974 + * Send RX credits through an RX_DATA_ACK CPL message. Returns the number of
975 + * credits sent.
976 */
977 -
978 -static int s3_push_frames(struct s3_conn *, int);
979 -static int s3_send_reset(struct s3_conn *, int, struct sk_buff *);
980 -static void t3_release_offload_resources(struct s3_conn *);
981 -static void mk_close_req(struct s3_conn *);
982 -
983 -struct s3_conn *cxgb3i_c3cn_create(void)
984 +static u32 send_rx_credits(struct s3_conn *c3cn, u32 credits, u32 dack)
985 {
986 - struct s3_conn *c3cn;
987 -
988 - c3cn = kzalloc(sizeof(*c3cn), GFP_KERNEL);
989 - if (c3cn == NULL)
990 - return NULL;
991 -
992 - c3cn_conn_debug("alloc c3cn 0x%p.\n", c3cn);
993 + struct sk_buff *skb;
994 + struct cpl_rx_data_ack *req;
995
996 - c3cn->flags = 0;
997 - spin_lock_init(&c3cn->lock);
998 - atomic_set(&c3cn->refcnt, 1);
999 - skb_queue_head_init(&c3cn->receive_queue);
1000 - skb_queue_head_init(&c3cn->write_queue);
1001 - setup_timer(&c3cn->retry_timer, NULL, (unsigned long)c3cn);
1002 - rwlock_init(&c3cn->callback_lock);
1003 + skb = alloc_skb(sizeof(*req), GFP_ATOMIC);
1004 + if (!skb)
1005 + return 0;
1006
1007 - return c3cn;
1008 + req = (struct cpl_rx_data_ack *)__skb_put(skb, sizeof(*req));
1009 + req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
1010 + OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RX_DATA_ACK, c3cn->tid));
1011 + req->credit_dack = htonl(dack | V_RX_CREDITS(credits));
1012 + skb->priority = CPL_PRIORITY_ACK;
1013 + cxgb3_ofld_send(c3cn->cdev, skb);
1014 + return credits;
1015 }
1016
1017 -static inline void s3_purge_write_queue(struct s3_conn *c3cn)
1018 -{
1019 - struct sk_buff *skb;
1020 -
1021 - while ((skb = __skb_dequeue(&c3cn->write_queue)))
1022 - __kfree_skb(skb);
1023 -}
1024 +/*
1025 + * CPL connection tx data: host ->
1026 + *
1027 + * Send iscsi PDU via TX_DATA CPL message. Returns the number of
1028 + * credits sent.
1029 + * Each TX_DATA consumes work request credit (wrs), so we need to keep track of
1030 + * how many we've used so far and how many are pending (i.e., yet ack'ed by T3).
1031 + */
1032
1033 -static void c3cn_done(struct s3_conn *c3cn)
1034 +/*
1035 + * For ULP connections HW may inserts digest bytes into the pdu. Those digest
1036 + * bytes are not sent by the host but are part of the TCP payload and therefore
1037 + * consume TCP sequence space.
1038 + */
1039 +static const unsigned int cxgb3_ulp_extra_len[] = { 0, 4, 4, 8 };
1040 +static inline unsigned int ulp_extra_len(const struct sk_buff *skb)
1041 {
1042 - c3cn_conn_debug("c3cn 0x%p, state 0x%x, flag 0x%lx.\n",
1043 - c3cn, c3cn->state, c3cn->flags);
1044 -
1045 - c3cn_put_port(c3cn);
1046 - t3_release_offload_resources(c3cn);
1047 - c3cn_set_state(c3cn, C3CN_STATE_CLOSE);
1048 - c3cn->shutdown = C3CN_SHUTDOWN_MASK;
1049 - cxgb3i_conn_closing(c3cn);
1050 + return cxgb3_ulp_extra_len[skb_ulp_mode(skb) & 3];
1051 }
1052
1053 -static void c3cn_close(struct s3_conn *c3cn)
1054 -{
1055 - int data_lost, old_state;
1056 -
1057 - c3cn_conn_debug("c3cn 0x%p, state 0x%x, flag 0x%lx.\n",
1058 - c3cn, c3cn->state, c3cn->flags);
1059 -
1060 - dst_confirm(c3cn->dst_cache);
1061 -
1062 - spin_lock_bh(&c3cn->lock);
1063 - c3cn->shutdown |= C3CN_SHUTDOWN_MASK;
1064 -
1065 - /*
1066 - * We need to flush the receive buffs. We do this only on the
1067 - * descriptor close, not protocol-sourced closes, because the
1068 - * reader process may not have drained the data yet! Make a note
1069 - * of whether any received data will be lost so we can decide whether
1070 - * to FIN or RST.
1071 - */
1072 - data_lost = skb_queue_len(&c3cn->receive_queue);
1073 - __skb_queue_purge(&c3cn->receive_queue);
1074 -
1075 - if (c3cn->state == C3CN_STATE_CLOSE)
1076 - /* Nothing if we are already closed */
1077 - c3cn_conn_debug("c3cn 0x%p, 0x%x, already closed.\n",
1078 - c3cn, c3cn->state);
1079 - else if (data_lost || c3cn->state == C3CN_STATE_SYN_SENT) {
1080 - c3cn_conn_debug("c3cn 0x%p, 0x%x -> closing, send reset.\n",
1081 - c3cn, c3cn->state);
1082 - /* Unread data was tossed, zap the connection. */
1083 - s3_send_reset(c3cn, CPL_ABORT_SEND_RST, NULL);
1084 - goto unlock;
1085 - } else if (c3cn->state == C3CN_STATE_ESTABLISHED) {
1086 - c3cn_conn_debug("c3cn 0x%p, est. -> closing, send close_req.\n",
1087 - c3cn);
1088 - c3cn_set_state(c3cn, C3CN_STATE_CLOSING);
1089 - mk_close_req(c3cn);
1090 - }
1091 -
1092 -unlock:
1093 - old_state = c3cn->state;
1094 - c3cn_hold(c3cn); /* must last past the potential destroy() */
1095 +static unsigned int wrlen __read_mostly;
1096
1097 - spin_unlock_bh(&c3cn->lock);
1098 +/*
1099 + * The number of WRs needed for an skb depends on the number of fragments
1100 + * in the skb and whether it has any payload in its main body. This maps the
1101 + * length of the gather list represented by an skb into the # of necessary WRs.
1102 + *
1103 + * The max. length of an skb is controlled by the max pdu size which is ~16K.
1104 + * Also, assume the min. fragment length is the sector size (512), then add
1105 + * extra fragment counts for iscsi bhs and payload padding.
1106 + */
1107 +#define SKB_WR_LIST_SIZE (16384/512 + 3)
1108 +static unsigned int skb_wrs[SKB_WR_LIST_SIZE] __read_mostly;
1109
1110 - /*
1111 - * There are no more user references at this point. Grab the
1112 - * connection lock and finish the close.
1113 - */
1114 - local_bh_disable();
1115 - spin_lock(&c3cn->lock);
1116 +static void s3_init_wr_tab(unsigned int wr_len)
1117 +{
1118 + int i;
1119
1120 - /*
1121 - * Because the connection was orphaned before the spin_lock()
1122 - * either the backlog or a BH may have already destroyed it.
1123 - * Bail out if so.
1124 - */
1125 - if (old_state != C3CN_STATE_CLOSE && c3cn->state == C3CN_STATE_CLOSE)
1126 - goto out;
1127 + if (skb_wrs[1]) /* already initialized */
1128 + return;
1129
1130 - if (c3cn->state == C3CN_STATE_CLOSE)
1131 - s3_purge_write_queue(c3cn);
1132 + for (i = 1; i < SKB_WR_LIST_SIZE; i++) {
1133 + int sgl_len = (3 * i) / 2 + (i & 1);
1134
1135 -out:
1136 - spin_unlock(&c3cn->lock);
1137 - local_bh_enable();
1138 - c3cn_put(c3cn);
1139 -}
1140 + sgl_len += 3;
1141 + skb_wrs[i] = (sgl_len <= wr_len
1142 + ? 1 : 1 + (sgl_len - 2) / (wr_len - 1));
1143 + }
1144
1145 -void cxgb3i_c3cn_release(struct s3_conn *c3cn)
1146 -{
1147 - c3cn_conn_debug("c3cn 0x%p, s 0x%x, f 0x%lx.\n",
1148 - c3cn, c3cn->state, c3cn->flags);
1149 - if (likely(c3cn->state != C3CN_STATE_SYN_SENT))
1150 - c3cn_close(c3cn);
1151 - else
1152 - c3cn_set_flag(c3cn, C3CN_CLOSE_NEEDED);
1153 - c3cn_put(c3cn);
1154 + wrlen = wr_len * 8;
1155 }
1156
1157 -
1158 -/*
1159 - * Local utility routines used to implement primary cxgb3 API operations.
1160 - * ======================================================================
1161 - */
1162 -
1163 -static u32 s3_send_rx_credits(struct s3_conn *, u32, u32, int);
1164 -static int act_open(struct s3_conn *, struct net_device *);
1165 -static void mk_act_open_req(struct s3_conn *, struct sk_buff *,
1166 - unsigned int, const struct l2t_entry *);
1167 -static void skb_entail(struct s3_conn *, struct sk_buff *, int);
1168 -
1169 static inline void reset_wr_list(struct s3_conn *c3cn)
1170 {
1171 c3cn->wr_pending_head = NULL;
1172 @@ -532,7 +499,7 @@ static inline void enqueue_wr(struct s3_
1173
1174 /*
1175 * We want to take an extra reference since both us and the driver
1176 - * need to free the packet before it's really freed. We know there's
1177 + * need to free the packet before it's really freed. We know there's
1178 * just one user currently so we use atomic_set rather than skb_get
1179 * to avoid the atomic op.
1180 */
1181 @@ -545,34 +512,37 @@ static inline void enqueue_wr(struct s3_
1182 c3cn->wr_pending_tail = skb;
1183 }
1184
1185 -/*
1186 - * The next two functions calculate the option 0 value for a connection.
1187 - */
1188 -static inline int compute_wscale(int win)
1189 +static inline struct sk_buff *peek_wr(const struct s3_conn *c3cn)
1190 {
1191 - int wscale = 0;
1192 - while (wscale < 14 && (65535<<wscale) < win)
1193 - wscale++;
1194 - return wscale;
1195 + return c3cn->wr_pending_head;
1196 }
1197
1198 -static inline unsigned int calc_opt0h(struct s3_conn *c3cn)
1199 +static inline void free_wr_skb(struct sk_buff *skb)
1200 {
1201 - int wscale = compute_wscale(cxgb3_rcv_win);
1202 - return V_KEEP_ALIVE(1) |
1203 - F_TCAM_BYPASS |
1204 - V_WND_SCALE(wscale) |
1205 - V_MSS_IDX(c3cn->mss_idx);
1206 + kfree_skb(skb);
1207 }
1208
1209 -static inline unsigned int calc_opt0l(struct s3_conn *c3cn)
1210 +static inline struct sk_buff *dequeue_wr(struct s3_conn *c3cn)
1211 {
1212 - return V_ULP_MODE(ULP_MODE_ISCSI) |
1213 - V_RCV_BUFSIZ(cxgb3_rcv_win>>10);
1214 + struct sk_buff *skb = c3cn->wr_pending_head;
1215 +
1216 + if (likely(skb)) {
1217 + /* Don't bother clearing the tail */
1218 + c3cn->wr_pending_head = (struct sk_buff *)skb->sp;
1219 + skb->sp = NULL;
1220 + }
1221 + return skb;
1222 +}
1223 +
1224 +static void purge_wr_queue(struct s3_conn *c3cn)
1225 +{
1226 + struct sk_buff *skb;
1227 + while ((skb = dequeue_wr(c3cn)) != NULL)
1228 + free_wr_skb(skb);
1229 }
1230
1231 -static inline void make_tx_data_wr(struct s3_conn *c3cn,
1232 - struct sk_buff *skb, int len)
1233 +static inline void make_tx_data_wr(struct s3_conn *c3cn, struct sk_buff *skb,
1234 + int len)
1235 {
1236 struct tx_data_wr *req;
1237
1238 @@ -591,250 +561,63 @@ static inline void make_tx_data_wr(struc
1239 if (!c3cn_flag(c3cn, C3CN_TX_DATA_SENT)) {
1240 req->flags |= htonl(V_TX_ACK_PAGES(2) | F_TX_INIT |
1241 V_TX_CPU_IDX(c3cn->qset));
1242 -
1243 - /* Sendbuffer is in units of 32KB.
1244 - */
1245 + /* Sendbuffer is in units of 32KB. */
1246 req->param |= htonl(V_TX_SNDBUF(cxgb3_snd_win >> 15));
1247 c3cn_set_flag(c3cn, C3CN_TX_DATA_SENT);
1248 }
1249 }
1250
1251 /**
1252 - * cxgb3_egress_dev - return the cxgb3 egress device
1253 - * @root_dev: the root device anchoring the search
1254 - * @c3cn: the connection used to determine egress port in bonding mode
1255 - * @context: in bonding mode, indicates a connection set up or failover
1256 + * c3cn_push_tx_frames -- start transmit
1257 + * @c3cn: the offloaded connection
1258 + * @req_completion: request wr_ack or not
1259 *
1260 - * Return egress device or NULL if the egress device isn't one of our ports.
1261 - *
1262 - * Given a root network device it returns the physical egress device that is a
1263 - * descendant of the root device. The root device may be either a physical
1264 - * device, in which case it is the device returned, or a virtual device, such
1265 - * as a VLAN or bonding device. In case of a bonding device the search
1266 - * considers the decisions of the bonding device given its mode to locate the
1267 - * correct egress device.
1268 + * Prepends TX_DATA_WR or CPL_CLOSE_CON_REQ headers to buffers waiting in a
1269 + * connection's send queue and sends them on to T3. Must be called with the
1270 + * connection's lock held. Returns the amount of send buffer space that was
1271 + * freed as a result of sending queued data to T3.
1272 */
1273 -static struct net_device *cxgb3_egress_dev(struct net_device *root_dev,
1274 - struct s3_conn *c3cn,
1275 - int context)
1276 -{
1277 - while (root_dev) {
1278 - if (root_dev->priv_flags & IFF_802_1Q_VLAN)
1279 - root_dev = vlan_dev_real_dev(root_dev);
1280 - else if (is_cxgb3_dev(root_dev))
1281 - return root_dev;
1282 - else
1283 - return NULL;
1284 - }
1285 - return NULL;
1286 -}
1287 -
1288 -static struct rtable *find_route(__be32 saddr, __be32 daddr,
1289 - __be16 sport, __be16 dport)
1290 +static void arp_failure_discard(struct t3cdev *cdev, struct sk_buff *skb)
1291 {
1292 - struct rtable *rt;
1293 - struct flowi fl = {
1294 - .oif = 0,
1295 - .nl_u = {
1296 - .ip4_u = {
1297 - .daddr = daddr,
1298 - .saddr = saddr,
1299 - .tos = 0 } },
1300 - .proto = IPPROTO_TCP,
1301 - .uli_u = {
1302 - .ports = {
1303 - .sport = sport,
1304 - .dport = dport } } };
1305 -
1306 - if (ip_route_output_flow(&init_net, &rt, &fl, NULL, 0))
1307 - return NULL;
1308 - return rt;
1309 + kfree_skb(skb);
1310 }
1311
1312 -int cxgb3i_c3cn_connect(struct s3_conn *c3cn, struct sockaddr_in *usin)
1313 +static int c3cn_push_tx_frames(struct s3_conn *c3cn, int req_completion)
1314 {
1315 - struct rtable *rt;
1316 - struct net_device *dev;
1317 - struct cxgb3i_sdev_data *cdata;
1318 + int total_size = 0;
1319 + struct sk_buff *skb;
1320 struct t3cdev *cdev;
1321 - __be32 sipv4;
1322 - int err;
1323 + struct cxgb3i_sdev_data *cdata;
1324
1325 - if (usin->sin_family != AF_INET)
1326 - return -EAFNOSUPPORT;
1327 + if (unlikely(c3cn->state == C3CN_STATE_CONNECTING ||
1328 + c3cn->state == C3CN_STATE_CLOSE_WAIT_1 ||
1329 + c3cn->state == C3CN_STATE_ABORTING)) {
1330 + c3cn_tx_debug("c3cn 0x%p, in closing state %u.\n",
1331 + c3cn, c3cn->state);
1332 + return 0;
1333 + }
1334
1335 - /* get a source port if one hasn't been provided */
1336 - err = c3cn_get_port(c3cn);
1337 - if (err)
1338 - return err;
1339 + cdev = c3cn->cdev;
1340 + cdata = CXGB3_SDEV_DATA(cdev);
1341
1342 - c3cn_conn_debug("c3cn 0x%p get port %u.\n",
1343 - c3cn, ntohs(c3cn->saddr.sin_port));
1344 + while (c3cn->wr_avail
1345 + && (skb = skb_peek(&c3cn->write_queue)) != NULL) {
1346 + int len = skb->len; /* length before skb_push */
1347 + int frags = skb_shinfo(skb)->nr_frags + (len != skb->data_len);
1348 + int wrs_needed = skb_wrs[frags];
1349
1350 - c3cn->daddr.sin_port = usin->sin_port;
1351 - c3cn->daddr.sin_addr.s_addr = usin->sin_addr.s_addr;
1352 + if (wrs_needed > 1 && len + sizeof(struct tx_data_wr) <= wrlen)
1353 + wrs_needed = 1;
1354
1355 - rt = find_route(c3cn->saddr.sin_addr.s_addr,
1356 - c3cn->daddr.sin_addr.s_addr,
1357 - c3cn->saddr.sin_port,
1358 - c3cn->daddr.sin_port);
1359 - if (rt == NULL) {
1360 - c3cn_conn_debug("NO route to 0x%x, port %u.\n",
1361 - c3cn->daddr.sin_addr.s_addr,
1362 - ntohs(c3cn->daddr.sin_port));
1363 - return -ENETUNREACH;
1364 - }
1365 + WARN_ON(frags >= SKB_WR_LIST_SIZE || wrs_needed < 1);
1366
1367 - if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
1368 - c3cn_conn_debug("multi-cast route to 0x%x, port %u.\n",
1369 - c3cn->daddr.sin_addr.s_addr,
1370 - ntohs(c3cn->daddr.sin_port));
1371 - ip_rt_put(rt);
1372 - return -ENETUNREACH;
1373 - }
1374 -
1375 - if (!c3cn->saddr.sin_addr.s_addr)
1376 - c3cn->saddr.sin_addr.s_addr = rt->rt_src;
1377 -
1378 - /* now commit destination to connection */
1379 - c3cn->dst_cache = &rt->u.dst;
1380 -
1381 - /* try to establish an offloaded connection */
1382 - dev = cxgb3_egress_dev(c3cn->dst_cache->dev, c3cn, 0);
1383 - if (dev == NULL) {
1384 - c3cn_conn_debug("c3cn 0x%p, egress dev NULL.\n", c3cn);
1385 - return -ENETUNREACH;
1386 - }
1387 - cdata = NDEV2CDATA(dev);
1388 - cdev = cdata->cdev;
1389 -
1390 - sipv4 = cxgb3i_get_private_ipv4addr(dev);
1391 - if (!sipv4) {
1392 - c3cn_conn_debug("c3cn 0x%p, iscsi ip not configured.\n", c3cn);
1393 - sipv4 = c3cn->saddr.sin_addr.s_addr;
1394 - cxgb3i_set_private_ipv4addr(dev, sipv4);
1395 - } else
1396 - c3cn->saddr.sin_addr.s_addr = sipv4;
1397 -
1398 - c3cn_conn_debug("c3cn 0x%p, %u.%u.%u.%u,%u-%u.%u.%u.%u,%u SYN_SENT.\n",
1399 - c3cn, NIPQUAD(c3cn->saddr.sin_addr.s_addr),
1400 - ntohs(c3cn->saddr.sin_port),
1401 - NIPQUAD(c3cn->daddr.sin_addr.s_addr),
1402 - ntohs(c3cn->daddr.sin_port));
1403 -
1404 - c3cn_set_state(c3cn, C3CN_STATE_SYN_SENT);
1405 -
1406 - if (!act_open(c3cn, dev))
1407 - return 0;
1408 -
1409 - /*
1410 - * If we get here, we don't have an offload connection so simply
1411 - * return a failure.
1412 - */
1413 - err = -ENOTSUPP;
1414 -
1415 - /*
1416 - * This trashes the connection and releases the local port,
1417 - * if necessary.
1418 - */
1419 - c3cn_conn_debug("c3cn 0x%p -> CLOSE.\n", c3cn);
1420 - c3cn_set_state(c3cn, C3CN_STATE_CLOSE);
1421 - ip_rt_put(rt);
1422 - c3cn_put_port(c3cn);
1423 - c3cn->daddr.sin_port = 0;
1424 - return err;
1425 -}
1426 -
1427 -/*
1428 - * Set of states for which we should return RX credits.
1429 - */
1430 -#define CREDIT_RETURN_STATE (C3CN_STATE_ESTABLISHED)
1431 -
1432 -/*
1433 - * Called after some received data has been read. It returns RX credits
1434 - * to the HW for the amount of data processed.
1435 - */
1436 -void cxgb3i_c3cn_rx_credits(struct s3_conn *c3cn, int copied)
1437 -{
1438 - struct t3cdev *cdev;
1439 - int must_send;
1440 - u32 credits, dack = 0;
1441 -
1442 - if (!c3cn_in_state(c3cn, CREDIT_RETURN_STATE))
1443 - return;
1444 -
1445 - credits = c3cn->copied_seq - c3cn->rcv_wup;
1446 - if (unlikely(!credits))
1447 - return;
1448 -
1449 - cdev = c3cn->cdev;
1450 -
1451 - if (unlikely(cxgb3_rx_credit_thres == 0))
1452 - return;
1453 -
1454 - dack = F_RX_DACK_CHANGE | V_RX_DACK_MODE(1);
1455 -
1456 - /*
1457 - * For coalescing to work effectively ensure the receive window has
1458 - * at least 16KB left.
1459 - */
1460 - must_send = credits + 16384 >= cxgb3_rcv_win;
1461 -
1462 - if (must_send || credits >= cxgb3_rx_credit_thres)
1463 - c3cn->rcv_wup += s3_send_rx_credits(c3cn, credits, dack,
1464 - must_send);
1465 -}
1466 -
1467 -/*
1468 - * Generic ARP failure handler that discards the buffer.
1469 - */
1470 -static void arp_failure_discard(struct t3cdev *cdev, struct sk_buff *skb)
1471 -{
1472 - kfree_skb(skb);
1473 -}
1474 -
1475 -/*
1476 - * Prepends TX_DATA_WR or CPL_CLOSE_CON_REQ headers to buffers waiting in a
1477 - * connection's send queue and sends them on to T3. Must be called with the
1478 - * connection's lock held. Returns the amount of send buffer space that was
1479 - * freed as a result of sending queued data to T3.
1480 - */
1481 -static int s3_push_frames(struct s3_conn *c3cn, int req_completion)
1482 -{
1483 - int total_size = 0;
1484 - struct sk_buff *skb;
1485 - struct t3cdev *cdev;
1486 - struct cxgb3i_sdev_data *cdata;
1487 -
1488 - if (unlikely(c3cn_in_state(c3cn,
1489 - C3CN_STATE_SYN_SENT | C3CN_STATE_CLOSE)))
1490 - return 0;
1491 -
1492 - /*
1493 - * We shouldn't really be called at all after an abort but check just
1494 - * in case.
1495 - */
1496 - if (unlikely(c3cn_flag(c3cn, C3CN_ABORT_SHUTDOWN)))
1497 - return 0;
1498 -
1499 - cdev = c3cn->cdev;
1500 - cdata = CXGB3_SDEV_DATA(cdev);
1501 -
1502 - while (c3cn->wr_avail
1503 - && (skb = skb_peek(&c3cn->write_queue)) != NULL
1504 - && !c3cn_flag(c3cn, C3CN_TX_WAIT_IDLE)) {
1505 -
1506 - int len = skb->len; /* length before skb_push */
1507 - int frags = skb_shinfo(skb)->nr_frags + (len != skb->data_len);
1508 - int wrs_needed = skb_wrs[frags];
1509 -
1510 - if (wrs_needed > 1 && len + sizeof(struct tx_data_wr) <= wrlen)
1511 - wrs_needed = 1;
1512 -
1513 - WARN_ON(frags >= SKB_WR_LIST_SIZE || wrs_needed < 1);
1514 -
1515 - if (c3cn->wr_avail < wrs_needed)
1516 + if (c3cn->wr_avail < wrs_needed) {
1517 + c3cn_tx_debug("c3cn 0x%p, skb len %u/%u, frag %u, "
1518 + "wr %d < %u.\n",
1519 + c3cn, skb->len, skb->data_len, frags,
1520 + wrs_needed, c3cn->wr_avail);
1521 break;
1522 + }
1523
1524 __skb_unlink(skb, &c3cn->write_queue);
1525 skb->priority = CPL_PRIORITY_DATA;
1526 @@ -857,8 +640,7 @@ static int s3_push_frames(struct s3_conn
1527 c3cn->wr_unacked = 0;
1528 }
1529 CXGB3_SKB_CB(skb)->flags &= ~C3CB_FLAG_NEED_HDR;
1530 - } else if (skb->data[0] == FW_WROPCODE_OFLD_CLOSE_CON)
1531 - c3cn_set_flag(c3cn, C3CN_CLOSE_CON_REQUESTED);
1532 + }
1533
1534 total_size += skb->truesize;
1535 set_arp_failure_handler(skb, arp_failure_discard);
1536 @@ -868,515 +650,386 @@ static int s3_push_frames(struct s3_conn
1537 }
1538
1539 /*
1540 - * Handle an ARP failure for a CPL_ABORT_REQ. Change it into a no RST variant
1541 - * and send it along.
1542 + * process_cpl_msg: -> host
1543 + * Top-level CPL message processing used by most CPL messages that
1544 + * pertain to connections.
1545 */
1546 -static void abort_arp_failure(struct t3cdev *cdev, struct sk_buff *skb)
1547 +static inline void process_cpl_msg(void (*fn)(struct s3_conn *,
1548 + struct sk_buff *),
1549 + struct s3_conn *c3cn,
1550 + struct sk_buff *skb)
1551 {
1552 - struct cpl_abort_req *req = cplhdr(skb);
1553 -
1554 - c3cn_conn_debug("tdev 0x%p.\n", cdev);
1555 -
1556 - req->cmd = CPL_ABORT_NO_RST;
1557 - cxgb3_ofld_send(cdev, skb);
1558 + spin_lock_bh(&c3cn->lock);
1559 + fn(c3cn, skb);
1560 + spin_unlock_bh(&c3cn->lock);
1561 }
1562
1563 /*
1564 - * Send an ABORT_REQ message. Cannot fail. This routine makes sure we do
1565 - * not send multiple ABORT_REQs for the same connection and also that we do
1566 - * not try to send a message after the connection has closed. Returns 1 if
1567 - * an ABORT_REQ wasn't generated after all, 0 otherwise.
1568 + * process_cpl_msg_ref: -> host
1569 + * Similar to process_cpl_msg() but takes an extra connection reference around
1570 + * the call to the handler. Should be used if the handler may drop a
1571 + * connection reference.
1572 */
1573 -static int s3_send_reset(struct s3_conn *c3cn, int mode,
1574 - struct sk_buff *skb)
1575 +static inline void process_cpl_msg_ref(void (*fn) (struct s3_conn *,
1576 + struct sk_buff *),
1577 + struct s3_conn *c3cn,
1578 + struct sk_buff *skb)
1579 {
1580 - struct cpl_abort_req *req;
1581 - unsigned int tid = c3cn->tid;
1582 + c3cn_hold(c3cn);
1583 + process_cpl_msg(fn, c3cn, skb);
1584 + c3cn_put(c3cn);
1585 +}
1586
1587 - if (unlikely(c3cn_flag(c3cn, C3CN_ABORT_SHUTDOWN) || !c3cn->cdev)) {
1588 - if (skb)
1589 - __kfree_skb(skb);
1590 - return 1;
1591 - }
1592 +/*
1593 + * Process a CPL_ACT_ESTABLISH message: -> host
1594 + * Updates connection state from an active establish CPL message. Runs with
1595 + * the connection lock held.
1596 + */
1597
1598 - c3cn_conn_debug("c3cn 0x%p, mode %d, flag ABORT_RPL + ABORT_SHUT.\n",
1599 - c3cn, mode);
1600 +static inline void s3_free_atid(struct t3cdev *cdev, unsigned int tid)
1601 +{
1602 + struct s3_conn *c3cn = cxgb3_free_atid(cdev, tid);
1603 + if (c3cn)
1604 + c3cn_put(c3cn);
1605 +}
1606
1607 - c3cn_set_flag(c3cn, C3CN_ABORT_RPL_PENDING);
1608 - c3cn_set_flag(c3cn, C3CN_ABORT_SHUTDOWN);
1609 +static void c3cn_established(struct s3_conn *c3cn, u32 snd_isn,
1610 + unsigned int opt)
1611 +{
1612 + c3cn_conn_debug("c3cn 0x%p, state %u.\n", c3cn, c3cn->state);
1613
1614 - /* Purge the send queue so we don't send anything after an abort. */
1615 - s3_purge_write_queue(c3cn);
1616 + c3cn->write_seq = c3cn->snd_nxt = c3cn->snd_una = snd_isn;
1617
1618 - if (!skb)
1619 - skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
1620 - skb->priority = CPL_PRIORITY_DATA;
1621 - set_arp_failure_handler(skb, abort_arp_failure);
1622 + /*
1623 + * Causes the first RX_DATA_ACK to supply any Rx credits we couldn't
1624 + * pass through opt0.
1625 + */
1626 + if (cxgb3_rcv_win > (M_RCV_BUFSIZ << 10))
1627 + c3cn->rcv_wup -= cxgb3_rcv_win - (M_RCV_BUFSIZ << 10);
1628
1629 - req = (struct cpl_abort_req *)skb_put(skb, sizeof(*req));
1630 - req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_REQ));
1631 - req->wr.wr_lo = htonl(V_WR_TID(tid));
1632 - OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_ABORT_REQ, tid));
1633 - req->rsvd0 = htonl(c3cn->snd_nxt);
1634 - req->rsvd1 = !c3cn_flag(c3cn, C3CN_TX_DATA_SENT);
1635 - req->cmd = mode;
1636 + dst_confirm(c3cn->dst_cache);
1637
1638 - l2t_send(c3cn->cdev, skb, c3cn->l2t);
1639 - return 0;
1640 + smp_mb();
1641 +
1642 + c3cn_set_state(c3cn, C3CN_STATE_ESTABLISHED);
1643 }
1644
1645 -/*
1646 - * Add a list of skbs to a connection send queue. This interface is intended
1647 - * for use by in-kernel ULPs. The skbs must comply with the max size limit of
1648 - * the device and have a headroom of at least TX_HEADER_LEN bytes.
1649 - */
1650 -int cxgb3i_c3cn_send_pdus(struct s3_conn *c3cn, struct sk_buff *skb, int flags)
1651 +static void process_act_establish(struct s3_conn *c3cn, struct sk_buff *skb)
1652 {
1653 - struct sk_buff *next;
1654 - int err, copied = 0;
1655 -
1656 - spin_lock_bh(&c3cn->lock);
1657 + struct cpl_act_establish *req = cplhdr(skb);
1658 + u32 rcv_isn = ntohl(req->rcv_isn); /* real RCV_ISN + 1 */
1659
1660 - if (!c3cn_in_state(c3cn, C3CN_STATE_ESTABLISHED)) {
1661 - err = -EAGAIN;
1662 - goto out_err;
1663 - }
1664 + c3cn_conn_debug("c3cn 0x%p, state %u, flag 0x%lx.\n",
1665 + c3cn, c3cn->state, c3cn->flags);
1666
1667 - err = -EPIPE;
1668 - if (c3cn->err || (c3cn->shutdown & C3CN_SEND_SHUTDOWN))
1669 - goto out_err;
1670 + if (unlikely(c3cn->state != C3CN_STATE_CONNECTING))
1671 + cxgb3i_log_error("TID %u expected SYN_SENT, got EST., s %u\n",
1672 + c3cn->tid, c3cn->state);
1673
1674 - while (skb) {
1675 - int frags = skb_shinfo(skb)->nr_frags +
1676 - (skb->len != skb->data_len);
1677 + c3cn->copied_seq = c3cn->rcv_wup = c3cn->rcv_nxt = rcv_isn;
1678 + c3cn_established(c3cn, ntohl(req->snd_isn), ntohs(req->tcp_opt));
1679
1680 - if (unlikely(skb_headroom(skb) < TX_HEADER_LEN)) {
1681 - c3cn_tx_debug("c3cn 0x%p, skb head.\n", c3cn);
1682 - err = -EINVAL;
1683 - goto out_err;
1684 - }
1685 + __kfree_skb(skb);
1686
1687 - if (frags >= SKB_WR_LIST_SIZE) {
1688 - cxgb3i_log_error("c3cn 0x%p, tx frags %d, len %u,%u.\n",
1689 - c3cn, skb_shinfo(skb)->nr_frags,
1690 - skb->len, skb->data_len);
1691 - err = -EINVAL;
1692 - goto out_err;
1693 - }
1694 + if (unlikely(c3cn_flag(c3cn, C3CN_ACTIVE_CLOSE_NEEDED)))
1695 + /* upper layer has requested closing */
1696 + send_abort_req(c3cn);
1697 + else if (c3cn_push_tx_frames(c3cn, 1))
1698 + cxgb3i_conn_tx_open(c3cn);
1699 +}
1700
1701 - next = skb->next;
1702 - skb->next = NULL;
1703 - skb_entail(c3cn, skb, C3CB_FLAG_NO_APPEND | C3CB_FLAG_NEED_HDR);
1704 - copied += skb->len;
1705 - c3cn->write_seq += skb->len + ulp_extra_len(skb);
1706 - skb = next;
1707 - }
1708 -done:
1709 - if (likely(skb_queue_len(&c3cn->write_queue)))
1710 - s3_push_frames(c3cn, 1);
1711 - spin_unlock_bh(&c3cn->lock);
1712 - return copied;
1713 +static int do_act_establish(struct t3cdev *cdev, struct sk_buff *skb,
1714 + void *ctx)
1715 +{
1716 + struct cpl_act_establish *req = cplhdr(skb);
1717 + unsigned int tid = GET_TID(req);
1718 + unsigned int atid = G_PASS_OPEN_TID(ntohl(req->tos_tid));
1719 + struct s3_conn *c3cn = ctx;
1720 + struct cxgb3i_sdev_data *cdata = CXGB3_SDEV_DATA(cdev);
1721
1722 -out_err:
1723 - if (copied == 0 && err == -EPIPE)
1724 - copied = c3cn->err ? c3cn->err : -EPIPE;
1725 - goto done;
1726 -}
1727 + c3cn_conn_debug("rcv, tid 0x%x, c3cn 0x%p, s %u, f 0x%lx.\n",
1728 + tid, c3cn, c3cn->state, c3cn->flags);
1729
1730 -/*
1731 - * Low-level utility routines for primary API functions.
1732 - * =====================================================
1733 - */
1734 -/* routines to implement CPL message processing */
1735 -static void c3cn_act_establish(struct s3_conn *, struct sk_buff *);
1736 -static void active_open_failed(struct s3_conn *, struct sk_buff *);
1737 -static void wr_ack(struct s3_conn *, struct sk_buff *);
1738 -static void do_peer_fin(struct s3_conn *, struct sk_buff *);
1739 -static void process_abort_req(struct s3_conn *, struct sk_buff *);
1740 -static void process_abort_rpl(struct s3_conn *, struct sk_buff *);
1741 -static void process_close_con_rpl(struct s3_conn *, struct sk_buff *);
1742 -static void process_rx_iscsi_hdr(struct s3_conn *, struct sk_buff *);
1743 + c3cn->tid = tid;
1744 + c3cn_hold(c3cn);
1745 + cxgb3_insert_tid(cdata->cdev, cdata->client, c3cn, tid);
1746 + s3_free_atid(cdev, atid);
1747
1748 -static struct sk_buff *__get_cpl_reply_skb(struct sk_buff *, size_t, gfp_t);
1749 + c3cn->qset = G_QNUM(ntohl(skb->csum));
1750
1751 -static void fail_act_open(struct s3_conn *, int);
1752 -static void init_offload_conn(struct s3_conn *, struct t3cdev *,
1753 - struct dst_entry *);
1754 + process_cpl_msg(process_act_establish, c3cn, skb);
1755 + return 0;
1756 +}
1757
1758 /*
1759 - * Insert a connection into the TID table and take an extra reference.
1760 + * Process a CPL_ACT_OPEN_RPL message: -> host
1761 + * Handle active open failures.
1762 */
1763 -static inline void c3cn_insert_tid(struct cxgb3i_sdev_data *cdata,
1764 - struct s3_conn *c3cn,
1765 - unsigned int tid)
1766 +static int act_open_rpl_status_to_errno(int status)
1767 {
1768 - c3cn_hold(c3cn);
1769 - cxgb3_insert_tid(cdata->cdev, cdata->client, c3cn, tid);
1770 -}
1771 -
1772 -static inline void free_atid(struct t3cdev *cdev, unsigned int tid)
1773 -{
1774 - struct s3_conn *c3cn = cxgb3_free_atid(cdev, tid);
1775 - if (c3cn)
1776 - c3cn_put(c3cn);
1777 -}
1778 -
1779 -/*
1780 - * This function is intended for allocations of small control messages.
1781 - * Such messages go as immediate data and usually the pakets are freed
1782 - * immediately. We maintain a cache of one small sk_buff and use it whenever
1783 - * it is available (has a user count of 1). Otherwise we get a fresh buffer.
1784 - */
1785 -#define CTRL_SKB_LEN 120
1786 -
1787 -static struct sk_buff *alloc_ctrl_skb(const struct s3_conn *c3cn,
1788 - int len)
1789 -{
1790 - struct sk_buff *skb = c3cn->ctrl_skb_cache;
1791 -
1792 - if (likely(skb && !skb_shared(skb) && !skb_cloned(skb))) {
1793 - __skb_trim(skb, 0);
1794 - atomic_set(&skb->users, 2);
1795 - } else if (likely(!in_atomic()))
1796 - skb = alloc_skb(len, GFP_ATOMIC | __GFP_NOFAIL);
1797 - else
1798 - skb = alloc_skb(len, GFP_ATOMIC);
1799 - return skb;
1800 + switch (status) {
1801 + case CPL_ERR_CONN_RESET:
1802 + return ECONNREFUSED;
1803 + case CPL_ERR_ARP_MISS:
1804 + return EHOSTUNREACH;
1805 + case CPL_ERR_CONN_TIMEDOUT:
1806 + return ETIMEDOUT;
1807 + case CPL_ERR_TCAM_FULL:
1808 + return ENOMEM;
1809 + case CPL_ERR_CONN_EXIST:
1810 + cxgb3i_log_error("ACTIVE_OPEN_RPL: 4-tuple in use\n");
1811 + return EADDRINUSE;
1812 + default:
1813 + return EIO;
1814 + }
1815 }
1816
1817 -/*
1818 - * Handle an ARP failure for an active open.
1819 - */
1820 -static void act_open_req_arp_failure(struct t3cdev *dev, struct sk_buff *skb)
1821 +static void act_open_retry_timer(unsigned long data)
1822 {
1823 - struct s3_conn *c3cn = (struct s3_conn *)skb->sk;
1824 + struct sk_buff *skb;
1825 + struct s3_conn *c3cn = (struct s3_conn *)data;
1826
1827 - c3cn_conn_debug("c3cn 0x%p, state 0x%x.\n", c3cn, c3cn->state);
1828 + c3cn_conn_debug("c3cn 0x%p, state %u.\n", c3cn, c3cn->state);
1829
1830 - c3cn_hold(c3cn);
1831 - spin_lock(&c3cn->lock);
1832 - if (c3cn->state == C3CN_STATE_SYN_SENT) {
1833 - fail_act_open(c3cn, EHOSTUNREACH);
1834 - __kfree_skb(skb);
1835 + spin_lock_bh(&c3cn->lock);
1836 + skb = alloc_skb(sizeof(struct cpl_act_open_req), GFP_ATOMIC);
1837 + if (!skb)
1838 + fail_act_open(c3cn, ENOMEM);
1839 + else {
1840 + skb->sk = (struct sock *)c3cn;
1841 + set_arp_failure_handler(skb, act_open_req_arp_failure);
1842 + make_act_open_req(c3cn, skb, c3cn->tid, c3cn->l2t);
1843 + l2t_send(c3cn->cdev, skb, c3cn->l2t);
1844 }
1845 - spin_unlock(&c3cn->lock);
1846 + spin_unlock_bh(&c3cn->lock);
1847 c3cn_put(c3cn);
1848 }
1849
1850 -/*
1851 - * Send an active open request.
1852 - */
1853 -static int act_open(struct s3_conn *c3cn, struct net_device *dev)
1854 +static void process_act_open_rpl(struct s3_conn *c3cn, struct sk_buff *skb)
1855 {
1856 - struct cxgb3i_sdev_data *cdata = NDEV2CDATA(dev);
1857 - struct t3cdev *cdev = cdata->cdev;
1858 - struct dst_entry *dst = c3cn->dst_cache;
1859 - struct sk_buff *skb;
1860 + struct cpl_act_open_rpl *rpl = cplhdr(skb);
1861
1862 - c3cn_conn_debug("c3cn 0x%p, state 0x%x, flag 0x%lx.\n",
1863 + c3cn_conn_debug("c3cn 0x%p, state %u, flag 0x%lx.\n",
1864 c3cn, c3cn->state, c3cn->flags);
1865 - /*
1866 - * Initialize connection data. Note that the flags and ULP mode are
1867 - * initialized higher up ...
1868 - */
1869 - c3cn->dev = dev;
1870 - c3cn->cdev = cdev;
1871 - c3cn->tid = cxgb3_alloc_atid(cdev, cdata->client, c3cn);
1872 - if (c3cn->tid < 0)
1873 - goto out_err;
1874 -
1875 - c3cn->qset = 0;
1876 - c3cn->l2t = t3_l2t_get(cdev, dst->neighbour, dev);
1877 - if (!c3cn->l2t)
1878 - goto free_tid;
1879
1880 - skb = alloc_skb(sizeof(struct cpl_act_open_req), GFP_KERNEL);
1881 - if (!skb)
1882 - goto free_l2t;
1883 + if (rpl->status == CPL_ERR_CONN_EXIST &&
1884 + c3cn->retry_timer.function != act_open_retry_timer) {
1885 + c3cn->retry_timer.function = act_open_retry_timer;
1886 + if (!mod_timer(&c3cn->retry_timer, jiffies + HZ / 2))
1887 + c3cn_hold(c3cn);
1888 + } else
1889 + fail_act_open(c3cn, act_open_rpl_status_to_errno(rpl->status));
1890 + __kfree_skb(skb);
1891 +}
1892
1893 - skb->sk = (struct sock *)c3cn;
1894 - set_arp_failure_handler(skb, act_open_req_arp_failure);
1895 +static int do_act_open_rpl(struct t3cdev *cdev, struct sk_buff *skb, void *ctx)
1896 +{
1897 + struct s3_conn *c3cn = ctx;
1898 + struct cpl_act_open_rpl *rpl = cplhdr(skb);
1899
1900 - c3cn_hold(c3cn);
1901 + c3cn_conn_debug("rcv, status 0x%x, c3cn 0x%p, s %u, f 0x%lx.\n",
1902 + rpl->status, c3cn, c3cn->state, c3cn->flags);
1903
1904 - init_offload_conn(c3cn, cdev, dst);
1905 - c3cn->err = 0;
1906 - c3cn_reset_flag(c3cn, C3CN_DONE);
1907 + if (rpl->status != CPL_ERR_TCAM_FULL &&
1908 + rpl->status != CPL_ERR_CONN_EXIST &&
1909 + rpl->status != CPL_ERR_ARP_MISS)
1910 + cxgb3_queue_tid_release(cdev, GET_TID(rpl));
1911
1912 - mk_act_open_req(c3cn, skb, c3cn->tid, c3cn->l2t);
1913 - l2t_send(cdev, skb, c3cn->l2t);
1914 + process_cpl_msg_ref(process_act_open_rpl, c3cn, skb);
1915 return 0;
1916 -
1917 -free_l2t:
1918 - l2t_release(L2DATA(cdev), c3cn->l2t);
1919 -free_tid:
1920 - free_atid(cdev, c3cn->tid);
1921 - c3cn->tid = 0;
1922 -out_err:
1923 - return -1;
1924 }
1925
1926 /*
1927 - * Close a connection by sending a CPL_CLOSE_CON_REQ message. Cannot fail
1928 - * under any circumstances. We take the easy way out and always queue the
1929 - * message to the write_queue. We can optimize the case where the queue is
1930 - * already empty though the optimization is probably not worth it.
1931 + * Process PEER_CLOSE CPL messages: -> host
1932 + * Handle peer FIN.
1933 */
1934 -static void mk_close_req(struct s3_conn *c3cn)
1935 +static void process_peer_close(struct s3_conn *c3cn, struct sk_buff *skb)
1936 {
1937 - struct sk_buff *skb;
1938 - struct cpl_close_con_req *req;
1939 - unsigned int tid = c3cn->tid;
1940 -
1941 - c3cn_conn_debug("c3cn 0x%p, state 0x%x, flag 0x%lx.\n",
1942 + c3cn_conn_debug("c3cn 0x%p, state %u, flag 0x%lx.\n",
1943 c3cn, c3cn->state, c3cn->flags);
1944
1945 - skb = alloc_skb(sizeof(struct cpl_close_con_req),
1946 - GFP_KERNEL | __GFP_NOFAIL);
1947 - req = (struct cpl_close_con_req *)__skb_put(skb, sizeof(*req));
1948 - req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_CLOSE_CON));
1949 - req->wr.wr_lo = htonl(V_WR_TID(tid));
1950 - OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_CON_REQ, tid));
1951 - req->rsvd = htonl(c3cn->write_seq);
1952 + if (c3cn_flag(c3cn, C3CN_ABORT_RPL_PENDING))
1953 + goto out;
1954
1955 - skb_entail(c3cn, skb, C3CB_FLAG_NO_APPEND);
1956 - if (c3cn->state != C3CN_STATE_SYN_SENT)
1957 - s3_push_frames(c3cn, 1);
1958 + switch (c3cn->state) {
1959 + case C3CN_STATE_ESTABLISHED:
1960 + c3cn_set_state(c3cn, C3CN_STATE_PASSIVE_CLOSE);
1961 + break;
1962 + case C3CN_STATE_ACTIVE_CLOSE:
1963 + c3cn_set_state(c3cn, C3CN_STATE_CLOSE_WAIT_2);
1964 + break;
1965 + case C3CN_STATE_CLOSE_WAIT_1:
1966 + c3cn_closed(c3cn);
1967 + break;
1968 + case C3CN_STATE_ABORTING:
1969 + break;
1970 + default:
1971 + cxgb3i_log_error("%s: peer close, TID %u in bad state %u\n",
1972 + c3cn->cdev->name, c3cn->tid, c3cn->state);
1973 + }
1974 +
1975 + cxgb3i_conn_closing(c3cn);
1976 +out:
1977 + __kfree_skb(skb);
1978 }
1979
1980 -static void skb_entail(struct s3_conn *c3cn, struct sk_buff *skb,
1981 - int flags)
1982 +static int do_peer_close(struct t3cdev *cdev, struct sk_buff *skb, void *ctx)
1983 {
1984 - CXGB3_SKB_CB(skb)->seq = c3cn->write_seq;
1985 - CXGB3_SKB_CB(skb)->flags = flags;
1986 - __skb_queue_tail(&c3cn->write_queue, skb);
1987 + struct s3_conn *c3cn = ctx;
1988 +
1989 + c3cn_conn_debug("rcv, c3cn 0x%p, s %u, f 0x%lx.\n",
1990 + c3cn, c3cn->state, c3cn->flags);
1991 + process_cpl_msg_ref(process_peer_close, c3cn, skb);
1992 + return 0;
1993 }
1994
1995 /*
1996 - * Send RX credits through an RX_DATA_ACK CPL message. If nofail is 0 we are
1997 - * permitted to return without sending the message in case we cannot allocate
1998 - * an sk_buff. Returns the number of credits sent.
1999 + * Process CLOSE_CONN_RPL CPL message: -> host
2000 + * Process a peer ACK to our FIN.
2001 */
2002 -static u32 s3_send_rx_credits(struct s3_conn *c3cn, u32 credits, u32 dack,
2003 - int nofail)
2004 +static void process_close_con_rpl(struct s3_conn *c3cn, struct sk_buff *skb)
2005 {
2006 - struct sk_buff *skb;
2007 - struct cpl_rx_data_ack *req;
2008 + struct cpl_close_con_rpl *rpl = cplhdr(skb);
2009
2010 - skb = (nofail ? alloc_ctrl_skb(c3cn, sizeof(*req))
2011 - : alloc_skb(sizeof(*req), GFP_ATOMIC));
2012 - if (!skb)
2013 - return 0;
2014 + c3cn_conn_debug("c3cn 0x%p, state %u, flag 0x%lx.\n",
2015 + c3cn, c3cn->state, c3cn->flags);
2016
2017 - req = (struct cpl_rx_data_ack *)__skb_put(skb, sizeof(*req));
2018 - req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
2019 - OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RX_DATA_ACK, c3cn->tid));
2020 - req->credit_dack = htonl(dack | V_RX_CREDITS(credits));
2021 - skb->priority = CPL_PRIORITY_ACK;
2022 - cxgb3_ofld_send(c3cn->cdev, skb);
2023 - return credits;
2024 -}
2025 + c3cn->snd_una = ntohl(rpl->snd_nxt) - 1; /* exclude FIN */
2026
2027 -static void mk_act_open_req(struct s3_conn *c3cn, struct sk_buff *skb,
2028 - unsigned int atid, const struct l2t_entry *e)
2029 -{
2030 - struct cpl_act_open_req *req;
2031 + if (c3cn_flag(c3cn, C3CN_ABORT_RPL_PENDING))
2032 + goto out;
2033
2034 - c3cn_conn_debug("c3cn 0x%p, atid 0x%x.\n", c3cn, atid);
2035 + switch (c3cn->state) {
2036 + case C3CN_STATE_ACTIVE_CLOSE:
2037 + c3cn_set_state(c3cn, C3CN_STATE_CLOSE_WAIT_1);
2038 + break;
2039 + case C3CN_STATE_CLOSE_WAIT_1:
2040 + case C3CN_STATE_CLOSE_WAIT_2:
2041 + c3cn_closed(c3cn);
2042 + break;
2043 + case C3CN_STATE_ABORTING:
2044 + break;
2045 + default:
2046 + cxgb3i_log_error("%s: close_rpl, TID %u in bad state %u\n",
2047 + c3cn->cdev->name, c3cn->tid, c3cn->state);
2048 + }
2049
2050 - skb->priority = CPL_PRIORITY_SETUP;
2051 - req = (struct cpl_act_open_req *)__skb_put(skb, sizeof(*req));
2052 - req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
2053 - OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_ACT_OPEN_REQ, atid));
2054 - req->local_port = c3cn->saddr.sin_port;
2055 - req->peer_port = c3cn->daddr.sin_port;
2056 - req->local_ip = c3cn->saddr.sin_addr.s_addr;
2057 - req->peer_ip = c3cn->daddr.sin_addr.s_addr;
2058 - req->opt0h = htonl(calc_opt0h(c3cn) | V_L2T_IDX(e->idx) |
2059 - V_TX_CHANNEL(e->smt_idx));
2060 - req->opt0l = htonl(calc_opt0l(c3cn));
2061 - req->params = 0;
2062 +out:
2063 + kfree_skb(skb);
2064 }
2065
2066 -/*
2067 - * Definitions and declarations for CPL handler functions.
2068 - * =======================================================
2069 - */
2070 -
2071 -/*
2072 - * Similar to process_cpl_msg() but takes an extra connection reference around
2073 - * the call to the handler. Should be used if the handler may drop a
2074 - * connection reference.
2075 - */
2076 -static inline void process_cpl_msg_ref(void (*fn) (struct s3_conn *,
2077 - struct sk_buff *),
2078 - struct s3_conn *c3cn,
2079 - struct sk_buff *skb)
2080 +static int do_close_con_rpl(struct t3cdev *cdev, struct sk_buff *skb,
2081 + void *ctx)
2082 {
2083 - c3cn_hold(c3cn);
2084 - process_cpl_msg(fn, c3cn, skb);
2085 - c3cn_put(c3cn);
2086 -}
2087 + struct s3_conn *c3cn = ctx;
2088
2089 -/*
2090 - * Return whether a failed active open has allocated a TID
2091 - */
2092 -static inline int act_open_has_tid(int status)
2093 -{
2094 - return status != CPL_ERR_TCAM_FULL && status != CPL_ERR_CONN_EXIST &&
2095 - status != CPL_ERR_ARP_MISS;
2096 + c3cn_conn_debug("rcv, c3cn 0x%p, s %u, f 0x%lx.\n",
2097 + c3cn, c3cn->state, c3cn->flags);
2098 +
2099 + process_cpl_msg_ref(process_close_con_rpl, c3cn, skb);
2100 + return 0;
2101 }
2102
2103 /*
2104 - * Returns true if a connection cannot accept new Rx data.
2105 + * Process ABORT_REQ_RSS CPL message: -> host
2106 + * Process abort requests. If we are waiting for an ABORT_RPL we ignore this
2107 + * request except that we need to reply to it.
2108 */
2109 -static inline int c3cn_no_receive(const struct s3_conn *c3cn)
2110 +
2111 +static int abort_status_to_errno(struct s3_conn *c3cn, int abort_reason,
2112 + int *need_rst)
2113 {
2114 - return c3cn->shutdown & C3CN_RCV_SHUTDOWN;
2115 + switch (abort_reason) {
2116 + case CPL_ERR_BAD_SYN: /* fall through */
2117 + case CPL_ERR_CONN_RESET:
2118 + return c3cn->state > C3CN_STATE_ESTABLISHED ?
2119 + EPIPE : ECONNRESET;
2120 + case CPL_ERR_XMIT_TIMEDOUT:
2121 + case CPL_ERR_PERSIST_TIMEDOUT:
2122 + case CPL_ERR_FINWAIT2_TIMEDOUT:
2123 + case CPL_ERR_KEEPALIVE_TIMEDOUT:
2124 + return ETIMEDOUT;
2125 + default:
2126 + return EIO;
2127 + }
2128 }
2129
2130 -/*
2131 - * A helper function that aborts a connection and increments the given MIB
2132 - * counter. The supplied skb is used to generate the ABORT_REQ message if
2133 - * possible. Must be called with softirqs disabled.
2134 - */
2135 -static inline void abort_conn(struct s3_conn *c3cn,
2136 - struct sk_buff *skb)
2137 +static void process_abort_req(struct s3_conn *c3cn, struct sk_buff *skb)
2138 {
2139 - struct sk_buff *abort_skb;
2140 + int rst_status = CPL_ABORT_NO_RST;
2141 + const struct cpl_abort_req_rss *req = cplhdr(skb);
2142
2143 - c3cn_conn_debug("c3cn 0x%p, state 0x%x, flag 0x%lx.\n",
2144 + c3cn_conn_debug("c3cn 0x%p, state %u, flag 0x%lx.\n",
2145 c3cn, c3cn->state, c3cn->flags);
2146
2147 - abort_skb = __get_cpl_reply_skb(skb, sizeof(struct cpl_abort_req),
2148 - GFP_ATOMIC);
2149 - if (abort_skb)
2150 - s3_send_reset(c3cn, CPL_ABORT_SEND_RST, abort_skb);
2151 -}
2152 + if (!c3cn_flag(c3cn, C3CN_ABORT_REQ_RCVD)) {
2153 + c3cn_set_flag(c3cn, C3CN_ABORT_REQ_RCVD);
2154 + c3cn_set_state(c3cn, C3CN_STATE_ABORTING);
2155 + __kfree_skb(skb);
2156 + return;
2157 + }
2158
2159 -/*
2160 - * Returns whether an ABORT_REQ_RSS message is a negative advice.
2161 - */
2162 -static inline int is_neg_adv_abort(unsigned int status)
2163 -{
2164 - return status == CPL_ERR_RTX_NEG_ADVICE ||
2165 - status == CPL_ERR_PERSIST_NEG_ADVICE;
2166 -}
2167 + c3cn_clear_flag(c3cn, C3CN_ABORT_REQ_RCVD);
2168 + send_abort_rpl(c3cn, rst_status);
2169
2170 -/*
2171 - * CPL handler functions.
2172 - * ======================
2173 - */
2174 + if (!c3cn_flag(c3cn, C3CN_ABORT_RPL_PENDING)) {
2175 + c3cn->err =
2176 + abort_status_to_errno(c3cn, req->status, &rst_status);
2177 + c3cn_closed(c3cn);
2178 + }
2179 +}
2180
2181 -/*
2182 - * Process a CPL_ACT_ESTABLISH message.
2183 - */
2184 -static int do_act_establish(struct t3cdev *cdev, struct sk_buff *skb,
2185 - void *ctx)
2186 +static int do_abort_req(struct t3cdev *cdev, struct sk_buff *skb, void *ctx)
2187 {
2188 - struct cpl_act_establish *req = cplhdr(skb);
2189 - unsigned int tid = GET_TID(req);
2190 - unsigned int atid = G_PASS_OPEN_TID(ntohl(req->tos_tid));
2191 + const struct cpl_abort_req_rss *req = cplhdr(skb);
2192 struct s3_conn *c3cn = ctx;
2193 - struct cxgb3i_sdev_data *cdata = CXGB3_SDEV_DATA(cdev);
2194
2195 - c3cn_conn_debug("rcv, tid 0x%x, c3cn 0x%p, 0x%x, 0x%lx.\n",
2196 - tid, c3cn, c3cn->state, c3cn->flags);
2197 - /*
2198 - * It's OK if the TID is currently in use, the owning connection may
2199 - * have backlogged its last CPL message(s). Just take it away.
2200 - */
2201 - c3cn->tid = tid;
2202 - c3cn_insert_tid(cdata, c3cn, tid);
2203 - free_atid(cdev, atid);
2204 + c3cn_conn_debug("rcv, c3cn 0x%p, s 0x%x, f 0x%lx.\n",
2205 + c3cn, c3cn->state, c3cn->flags);
2206
2207 - c3cn->qset = G_QNUM(ntohl(skb->csum));
2208 + if (req->status == CPL_ERR_RTX_NEG_ADVICE ||
2209 + req->status == CPL_ERR_PERSIST_NEG_ADVICE) {
2210 + __kfree_skb(skb);
2211 + return 0;
2212 + }
2213
2214 - process_cpl_msg(c3cn_act_establish, c3cn, skb);
2215 + process_cpl_msg_ref(process_abort_req, c3cn, skb);
2216 return 0;
2217 }
2218
2219 /*
2220 - * Process an ACT_OPEN_RPL CPL message.
2221 + * Process ABORT_RPL_RSS CPL message: -> host
2222 + * Process abort replies. We only process these messages if we anticipate
2223 + * them as the coordination between SW and HW in this area is somewhat lacking
2224 + * and sometimes we get ABORT_RPLs after we are done with the connection that
2225 + * originated the ABORT_REQ.
2226 */
2227 -static int do_act_open_rpl(struct t3cdev *cdev, struct sk_buff *skb, void *ctx)
2228 +static void process_abort_rpl(struct s3_conn *c3cn, struct sk_buff *skb)
2229 {
2230 - struct s3_conn *c3cn = ctx;
2231 - struct cpl_act_open_rpl *rpl = cplhdr(skb);
2232 -
2233 - c3cn_conn_debug("rcv, status 0x%x, c3cn 0x%p, 0x%x, 0x%lx.\n",
2234 - rpl->status, c3cn, c3cn->state, c3cn->flags);
2235 -
2236 - if (act_open_has_tid(rpl->status))
2237 - cxgb3_queue_tid_release(cdev, GET_TID(rpl));
2238 -
2239 - process_cpl_msg_ref(active_open_failed, c3cn, skb);
2240 - return 0;
2241 -}
2242 -
2243 -/*
2244 - * Handler RX_ISCSI_HDR CPL messages.
2245 - */
2246 -static int do_iscsi_hdr(struct t3cdev *t3dev, struct sk_buff *skb, void *ctx)
2247 -{
2248 - struct s3_conn *c3cn = ctx;
2249 - process_cpl_msg(process_rx_iscsi_hdr, c3cn, skb);
2250 - return 0;
2251 -}
2252 -
2253 -/*
2254 - * Handler for TX_DATA_ACK CPL messages.
2255 - */
2256 -static int do_wr_ack(struct t3cdev *cdev, struct sk_buff *skb, void *ctx)
2257 -{
2258 - struct s3_conn *c3cn = ctx;
2259 -
2260 - process_cpl_msg(wr_ack, c3cn, skb);
2261 - return 0;
2262 -}
2263 -
2264 -/*
2265 - * Handler for PEER_CLOSE CPL messages.
2266 - */
2267 -static int do_peer_close(struct t3cdev *cdev, struct sk_buff *skb, void *ctx)
2268 -{
2269 - struct s3_conn *c3cn = ctx;
2270 -
2271 - c3cn_conn_debug("rcv, c3cn 0x%p, 0x%x, 0x%lx.\n",
2272 - c3cn, c3cn->state, c3cn->flags);
2273 - process_cpl_msg_ref(do_peer_fin, c3cn, skb);
2274 - return 0;
2275 -}
2276 -
2277 -/*
2278 - * Handle an ABORT_REQ_RSS CPL message.
2279 - */
2280 -static int do_abort_req(struct t3cdev *cdev, struct sk_buff *skb, void *ctx)
2281 -{
2282 - const struct cpl_abort_req_rss *req = cplhdr(skb);
2283 - struct s3_conn *c3cn = ctx;
2284 -
2285 - c3cn_conn_debug("rcv, c3cn 0x%p, 0x%x, 0x%lx.\n",
2286 + c3cn_conn_debug("c3cn 0x%p, state %u, flag 0x%lx.\n",
2287 c3cn, c3cn->state, c3cn->flags);
2288
2289 - if (is_neg_adv_abort(req->status)) {
2290 - __kfree_skb(skb);
2291 - return 0;
2292 + if (c3cn_flag(c3cn, C3CN_ABORT_RPL_PENDING)) {
2293 + if (!c3cn_flag(c3cn, C3CN_ABORT_RPL_RCVD))
2294 + c3cn_set_flag(c3cn, C3CN_ABORT_RPL_RCVD);
2295 + else {
2296 + c3cn_clear_flag(c3cn, C3CN_ABORT_RPL_RCVD);
2297 + c3cn_clear_flag(c3cn, C3CN_ABORT_RPL_PENDING);
2298 + if (c3cn_flag(c3cn, C3CN_ABORT_REQ_RCVD))
2299 + cxgb3i_log_error("%s tid %u, ABORT_RPL_RSS\n",
2300 + c3cn->cdev->name, c3cn->tid);
2301 + c3cn_closed(c3cn);
2302 + }
2303 }
2304 -
2305 - process_cpl_msg_ref(process_abort_req, c3cn, skb);
2306 - return 0;
2307 + __kfree_skb(skb);
2308 }
2309
2310 -/*
2311 - * Handle an ABORT_RPL_RSS CPL message.
2312 - */
2313 static int do_abort_rpl(struct t3cdev *cdev, struct sk_buff *skb, void *ctx)
2314 {
2315 struct cpl_abort_rpl_rss *rpl = cplhdr(skb);
2316 struct s3_conn *c3cn = ctx;
2317
2318 - c3cn_conn_debug("rcv, status 0x%x, c3cn 0x%p, 0x%x, 0x%lx.\n",
2319 + c3cn_conn_debug("rcv, status 0x%x, c3cn 0x%p, s %u, 0x%lx.\n",
2320 rpl->status, c3cn, c3cn ? c3cn->state : 0,
2321 c3cn ? c3cn->flags : 0UL);
2322
2323 @@ -1394,7 +1047,7 @@ static int do_abort_rpl(struct t3cdev *c
2324 * abort races with ABORT_REQ_RSS, the latter frees the connection
2325 * expecting the ABORT_REQ will fail with CPL_ERR_ABORT_FAILED,
2326 * but FW turns the ABORT_REQ into a regular one and so we get
2327 - * ABORT_RPL_RSS with status 0 and no connection. Only on T3A.
2328 + * ABORT_RPL_RSS with status 0 and no connection.
2329 */
2330 if (!c3cn)
2331 goto discard;
2332 @@ -1408,144 +1061,11 @@ discard:
2333 }
2334
2335 /*
2336 - * Handler for CLOSE_CON_RPL CPL messages.
2337 - */
2338 -static int do_close_con_rpl(struct t3cdev *cdev, struct sk_buff *skb,
2339 - void *ctx)
2340 -{
2341 - struct s3_conn *c3cn = ctx;
2342 -
2343 - c3cn_conn_debug("rcv, c3cn 0x%p, 0x%x, 0x%lx.\n",
2344 - c3cn, c3cn->state, c3cn->flags);
2345 -
2346 - process_cpl_msg_ref(process_close_con_rpl, c3cn, skb);
2347 - return 0;
2348 -}
2349 -
2350 -/*
2351 - * Definitions and declarations for CPL message processing.
2352 - * ========================================================
2353 - */
2354 -
2355 -static void make_established(struct s3_conn *, u32, unsigned int);
2356 -static void act_open_retry_timer(unsigned long);
2357 -static void mk_act_open_req(struct s3_conn *, struct sk_buff *,
2358 - unsigned int, const struct l2t_entry *);
2359 -static int act_open_rpl_status_to_errno(int);
2360 -static void handle_excess_rx(struct s3_conn *, struct sk_buff *);
2361 -static int abort_status_to_errno(struct s3_conn *, int, int *);
2362 -static void send_abort_rpl(struct sk_buff *, struct t3cdev *, int);
2363 -static struct sk_buff *get_cpl_reply_skb(struct sk_buff *, size_t, gfp_t);
2364 -
2365 -/*
2366 - * Dequeue and return the first unacknowledged's WR on a connections's pending
2367 - * list.
2368 - */
2369 -static inline struct sk_buff *dequeue_wr(struct s3_conn *c3cn)
2370 -{
2371 - struct sk_buff *skb = c3cn->wr_pending_head;
2372 -
2373 - if (likely(skb)) {
2374 - /* Don't bother clearing the tail */
2375 - c3cn->wr_pending_head = (struct sk_buff *)skb->sp;
2376 - skb->sp = NULL;
2377 - }
2378 - return skb;
2379 -}
2380 -
2381 -/*
2382 - * Return the first pending WR without removing it from the list.
2383 - */
2384 -static inline struct sk_buff *peek_wr(const struct s3_conn *c3cn)
2385 -{
2386 - return c3cn->wr_pending_head;
2387 -}
2388 -
2389 -static inline void free_wr_skb(struct sk_buff *skb)
2390 -{
2391 - kfree_skb(skb);
2392 -}
2393 -
2394 -static void purge_wr_queue(struct s3_conn *c3cn)
2395 -{
2396 - struct sk_buff *skb;
2397 - while ((skb = dequeue_wr(c3cn)) != NULL)
2398 - free_wr_skb(skb);
2399 -}
2400 -
2401 -static inline void set_abort_rpl_wr(struct sk_buff *skb, unsigned int tid,
2402 - int cmd)
2403 -{
2404 - struct cpl_abort_rpl *rpl = cplhdr(skb);
2405 -
2406 - rpl->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_RPL));
2407 - rpl->wr.wr_lo = htonl(V_WR_TID(tid));
2408 - OPCODE_TID(rpl) = htonl(MK_OPCODE_TID(CPL_ABORT_RPL, tid));
2409 - rpl->cmd = cmd;
2410 -}
2411 -
2412 -/*
2413 - * CPL message processing ...
2414 - * ==========================
2415 - */
2416 -
2417 -/*
2418 - * Updates connection state from an active establish CPL message. Runs with
2419 - * the connection lock held.
2420 - */
2421 -static void c3cn_act_establish(struct s3_conn *c3cn,
2422 - struct sk_buff *skb)
2423 -{
2424 - struct cpl_act_establish *req = cplhdr(skb);
2425 - u32 rcv_isn = ntohl(req->rcv_isn); /* real RCV_ISN + 1 */
2426 -
2427 - c3cn_conn_debug("c3cn 0x%p, state 0x%x, flag 0x%lx.\n",
2428 - c3cn, c3cn->state, c3cn->flags);
2429 -
2430 - if (unlikely(c3cn->state != C3CN_STATE_SYN_SENT))
2431 - printk(KERN_ERR "TID %u expected SYN_SENT, found %d\n",
2432 - c3cn->tid, c3cn->state);
2433 -
2434 - c3cn->copied_seq = c3cn->rcv_wup = c3cn->rcv_nxt = rcv_isn;
2435 - make_established(c3cn, ntohl(req->snd_isn), ntohs(req->tcp_opt));
2436 -
2437 - if (unlikely(c3cn_flag(c3cn, C3CN_CLOSE_NEEDED))) {
2438 - /* upper layer has requested closing */
2439 - abort_conn(c3cn, skb);
2440 - return;
2441 - }
2442 -
2443 - __kfree_skb(skb);
2444 - if (s3_push_frames(c3cn, 1))
2445 - cxgb3i_conn_tx_open(c3cn);
2446 -}
2447 -
2448 -/*
2449 - * Handle active open failures.
2450 - */
2451 -static void active_open_failed(struct s3_conn *c3cn,
2452 - struct sk_buff *skb)
2453 -{
2454 - struct cpl_act_open_rpl *rpl = cplhdr(skb);
2455 -
2456 - c3cn_conn_debug("c3cn 0x%p, state 0x%x, flag 0x%lx.\n",
2457 - c3cn, c3cn->state, c3cn->flags);
2458 -
2459 - if (rpl->status == CPL_ERR_CONN_EXIST &&
2460 - c3cn->retry_timer.function != act_open_retry_timer) {
2461 - c3cn->retry_timer.function = act_open_retry_timer;
2462 - c3cn_reset_timer(c3cn, &c3cn->retry_timer,
2463 - jiffies + HZ / 2);
2464 - } else
2465 - fail_act_open(c3cn, act_open_rpl_status_to_errno(rpl->status));
2466 - __kfree_skb(skb);
2467 -}
2468 -
2469 -/*
2470 - * Process received pdu for a connection.
2471 + * Process RX_ISCSI_HDR CPL message: -> host
2472 + * Handle received PDUs, the payload could be DDP'ed. If not, the payload
2473 + * follow after the bhs.
2474 */
2475 -static void process_rx_iscsi_hdr(struct s3_conn *c3cn,
2476 - struct sk_buff *skb)
2477 +static void process_rx_iscsi_hdr(struct s3_conn *c3cn, struct sk_buff *skb)
2478 {
2479 struct cpl_iscsi_hdr *hdr_cpl = cplhdr(skb);
2480 struct cpl_iscsi_hdr_norss data_cpl;
2481 @@ -1554,8 +1074,10 @@ static void process_rx_iscsi_hdr(struct
2482 unsigned int len;
2483 int err;
2484
2485 - if (unlikely(c3cn_no_receive(c3cn))) {
2486 - handle_excess_rx(c3cn, skb);
2487 + if (unlikely(c3cn->state >= C3CN_STATE_PASSIVE_CLOSE)) {
2488 + if (c3cn->state != C3CN_STATE_ABORTING)
2489 + send_abort_req(c3cn);
2490 + __kfree_skb(skb);
2491 return;
2492 }
2493
2494 @@ -1568,8 +1090,9 @@ static void process_rx_iscsi_hdr(struct
2495 len = hdr_len = ntohs(hdr_cpl->len);
2496 /* msg coalesce is off or not enough data received */
2497 if (skb->len <= hdr_len) {
2498 - printk(KERN_ERR "%s: TID %u, ISCSI_HDR, skb len %u < %u.\n",
2499 - c3cn->cdev->name, c3cn->tid, skb->len, hdr_len);
2500 + cxgb3i_log_error("%s: TID %u, ISCSI_HDR, skb len %u < %u.\n",
2501 + c3cn->cdev->name, c3cn->tid,
2502 + skb->len, hdr_len);
2503 goto abort_conn;
2504 }
2505
2506 @@ -1586,6 +1109,9 @@ static void process_rx_iscsi_hdr(struct
2507 c3cn_rx_debug("skb 0x%p, len %u, pdulen %u, ddp status 0x%x.\n",
2508 skb, skb->len, skb_ulp_pdulen(skb), status);
2509
2510 + c3cn_rx_debug("rx skb 0x%p, len %u, pdulen %u, ddp status 0x%x.\n",
2511 + skb, skb->len, skb_ulp_pdulen(skb), status);
2512 +
2513 if (status & (1 << RX_DDP_STATUS_HCRC_SHIFT))
2514 skb_ulp_mode(skb) |= ULP2_FLAG_HCRC_ERROR;
2515 if (status & (1 << RX_DDP_STATUS_DCRC_SHIFT))
2516 @@ -1610,15 +1136,24 @@ static void process_rx_iscsi_hdr(struct
2517 return;
2518
2519 abort_conn:
2520 - s3_send_reset(c3cn, CPL_ABORT_SEND_RST, NULL);
2521 + send_abort_req(c3cn);
2522 __kfree_skb(skb);
2523 }
2524
2525 +static int do_iscsi_hdr(struct t3cdev *t3dev, struct sk_buff *skb, void *ctx)
2526 +{
2527 + struct s3_conn *c3cn = ctx;
2528 +
2529 + process_cpl_msg(process_rx_iscsi_hdr, c3cn, skb);
2530 + return 0;
2531 +}
2532 +
2533 /*
2534 + * Process TX_DATA_ACK CPL messages: -> host
2535 * Process an acknowledgment of WR completion. Advance snd_una and send the
2536 * next batch of work requests from the write queue.
2537 */
2538 -static void wr_ack(struct s3_conn *c3cn, struct sk_buff *skb)
2539 +static void process_wr_ack(struct s3_conn *c3cn, struct sk_buff *skb)
2540 {
2541 struct cpl_wr_ack *hdr = cplhdr(skb);
2542 unsigned int credits = ntohs(hdr->credits);
2543 @@ -1632,9 +1167,9 @@ static void wr_ack(struct s3_conn *c3cn,
2544 struct sk_buff *p = peek_wr(c3cn);
2545
2546 if (unlikely(!p)) {
2547 - printk(KERN_ERR "%u WR_ACK credits for TID %u with "
2548 - "nothing pending, state %u\n",
2549 - credits, c3cn->tid, c3cn->state);
2550 + cxgb3i_log_error("%u WR_ACK credits for TID %u with "
2551 + "nothing pending, state %u\n",
2552 + credits, c3cn->tid, c3cn->state);
2553 break;
2554 }
2555 if (unlikely(credits < p->csum)) {
2556 @@ -1653,186 +1188,262 @@ static void wr_ack(struct s3_conn *c3cn,
2557 if (c3cn->snd_una != snd_una) {
2558 c3cn->snd_una = snd_una;
2559 dst_confirm(c3cn->dst_cache);
2560 - if (c3cn->snd_una == c3cn->snd_nxt)
2561 - c3cn_reset_flag(c3cn, C3CN_TX_WAIT_IDLE);
2562 }
2563
2564 - if (skb_queue_len(&c3cn->write_queue) && s3_push_frames(c3cn, 0))
2565 + if (skb_queue_len(&c3cn->write_queue) && c3cn_push_tx_frames(c3cn, 0))
2566 cxgb3i_conn_tx_open(c3cn);
2567 out_free:
2568 __kfree_skb(skb);
2569 }
2570
2571 +static int do_wr_ack(struct t3cdev *cdev, struct sk_buff *skb, void *ctx)
2572 +{
2573 + struct s3_conn *c3cn = ctx;
2574 +
2575 + process_cpl_msg(process_wr_ack, c3cn, skb);
2576 + return 0;
2577 +}
2578 +
2579 /*
2580 - * Handle a peer FIN.
2581 + * for each connection, pre-allocate skbs needed for close/abort requests. So
2582 + * that we can service the request right away.
2583 */
2584 -static void do_peer_fin(struct s3_conn *c3cn, struct sk_buff *skb)
2585 +static void c3cn_free_cpl_skbs(struct s3_conn *c3cn)
2586 {
2587 - int keep = 0;
2588 -
2589 - c3cn_conn_debug("c3cn 0x%p, state 0x%x, flag 0x%lx.\n",
2590 - c3cn, c3cn->state, c3cn->flags);
2591 + if (c3cn->cpl_close)
2592 + kfree_skb(c3cn->cpl_close);
2593 + if (c3cn->cpl_abort_req)
2594 + kfree_skb(c3cn->cpl_abort_req);
2595 + if (c3cn->cpl_abort_rpl)
2596 + kfree_skb(c3cn->cpl_abort_rpl);
2597 +}
2598
2599 - if (c3cn_flag(c3cn, C3CN_ABORT_RPL_PENDING))
2600 - goto out;
2601 +static int c3cn_alloc_cpl_skbs(struct s3_conn *c3cn)
2602 +{
2603 + c3cn->cpl_close = alloc_skb(sizeof(struct cpl_close_con_req),
2604 + GFP_KERNEL);
2605 + if (!c3cn->cpl_close)
2606 + return -ENOMEM;
2607 + skb_put(c3cn->cpl_close, sizeof(struct cpl_close_con_req));
2608
2609 - c3cn->shutdown |= C3CN_RCV_SHUTDOWN;
2610 - c3cn_set_flag(c3cn, C3CN_DONE);
2611 + c3cn->cpl_abort_req = alloc_skb(sizeof(struct cpl_abort_req),
2612 + GFP_KERNEL);
2613 + if (!c3cn->cpl_abort_req)
2614 + goto free_cpl_skbs;
2615 + skb_put(c3cn->cpl_abort_req, sizeof(struct cpl_abort_req));
2616 +
2617 + c3cn->cpl_abort_rpl = alloc_skb(sizeof(struct cpl_abort_rpl),
2618 + GFP_KERNEL);
2619 + if (!c3cn->cpl_abort_rpl)
2620 + goto free_cpl_skbs;
2621 + skb_put(c3cn->cpl_abort_rpl, sizeof(struct cpl_abort_rpl));
2622
2623 - switch (c3cn->state) {
2624 - case C3CN_STATE_ESTABLISHED:
2625 - break;
2626 - case C3CN_STATE_CLOSING:
2627 - c3cn_done(c3cn);
2628 - break;
2629 - default:
2630 - printk(KERN_ERR
2631 - "%s: TID %u received PEER_CLOSE in bad state %d\n",
2632 - c3cn->cdev->name, c3cn->tid, c3cn->state);
2633 - }
2634 + return 0;
2635
2636 - cxgb3i_conn_closing(c3cn);
2637 -out:
2638 - if (!keep)
2639 - __kfree_skb(skb);
2640 +free_cpl_skbs:
2641 + c3cn_free_cpl_skbs(c3cn);
2642 + return -ENOMEM;
2643 }
2644
2645 -/*
2646 - * Process abort requests. If we are waiting for an ABORT_RPL we ignore this
2647 - * request except that we need to reply to it.
2648 +/**
2649 + * c3cn_release_offload_resources - release offload resource
2650 + * @c3cn: the offloaded iscsi tcp connection.
2651 + * Release resources held by an offload connection (TID, L2T entry, etc.)
2652 */
2653 -static void process_abort_req(struct s3_conn *c3cn,
2654 - struct sk_buff *skb)
2655 +static void c3cn_release_offload_resources(struct s3_conn *c3cn)
2656 {
2657 - int rst_status = CPL_ABORT_NO_RST;
2658 - const struct cpl_abort_req_rss *req = cplhdr(skb);
2659 -
2660 - c3cn_conn_debug("c3cn 0x%p, state 0x%x, flag 0x%lx.\n",
2661 - c3cn, c3cn->state, c3cn->flags);
2662 + struct t3cdev *cdev = c3cn->cdev;
2663 + unsigned int tid = c3cn->tid;
2664
2665 - if (!c3cn_flag(c3cn, C3CN_ABORT_REQ_RCVD)) {
2666 - c3cn_set_flag(c3cn, C3CN_ABORT_REQ_RCVD);
2667 - c3cn_set_flag(c3cn, C3CN_ABORT_SHUTDOWN);
2668 - __kfree_skb(skb);
2669 + if (!cdev)
2670 return;
2671 - }
2672 - c3cn_reset_flag(c3cn, C3CN_ABORT_REQ_RCVD);
2673
2674 - /*
2675 - * Three cases to consider:
2676 - * a) We haven't sent an abort_req; close the connection.
2677 - * b) We have sent a post-close abort_req that will get to TP too late
2678 - * and will generate a CPL_ERR_ABORT_FAILED reply. The reply will
2679 - * be ignored and the connection should be closed now.
2680 - * c) We have sent a regular abort_req that will get to TP too late.
2681 - * That will generate an abort_rpl with status 0, wait for it.
2682 - */
2683 - send_abort_rpl(skb, c3cn->cdev, rst_status);
2684 + c3cn->qset = 0;
2685
2686 - if (!c3cn_flag(c3cn, C3CN_ABORT_RPL_PENDING)) {
2687 - c3cn->err =
2688 - abort_status_to_errno(c3cn, req->status, &rst_status);
2689 + c3cn_free_cpl_skbs(c3cn);
2690
2691 - c3cn_done(c3cn);
2692 + if (c3cn->wr_avail != c3cn->wr_max) {
2693 + purge_wr_queue(c3cn);
2694 + reset_wr_list(c3cn);
2695 }
2696 -}
2697
2698 -/*
2699 - * Process abort replies. We only process these messages if we anticipate
2700 - * them as the coordination between SW and HW in this area is somewhat lacking
2701 - * and sometimes we get ABORT_RPLs after we are done with the connection that
2702 - * originated the ABORT_REQ.
2703 - */
2704 -static void process_abort_rpl(struct s3_conn *c3cn,
2705 - struct sk_buff *skb)
2706 -{
2707 - c3cn_conn_debug("c3cn 0x%p, state 0x%x, flag 0x%lx.\n",
2708 - c3cn, c3cn->state, c3cn->flags);
2709 + if (c3cn->l2t) {
2710 + l2t_release(L2DATA(cdev), c3cn->l2t);
2711 + c3cn->l2t = NULL;
2712 + }
2713
2714 - if (c3cn_flag(c3cn, C3CN_ABORT_RPL_PENDING)) {
2715 - if (!c3cn_flag(c3cn, C3CN_ABORT_RPL_RCVD))
2716 - c3cn_set_flag(c3cn, C3CN_ABORT_RPL_RCVD);
2717 - else {
2718 - c3cn_reset_flag(c3cn, C3CN_ABORT_RPL_RCVD);
2719 - c3cn_reset_flag(c3cn, C3CN_ABORT_RPL_PENDING);
2720 - BUG_ON(c3cn_flag(c3cn, C3CN_ABORT_REQ_RCVD));
2721 - c3cn_done(c3cn);
2722 - }
2723 + if (c3cn->state == C3CN_STATE_CONNECTING) /* we have ATID */
2724 + s3_free_atid(cdev, tid);
2725 + else { /* we have TID */
2726 + cxgb3_remove_tid(cdev, (void *)c3cn, tid);
2727 + c3cn_put(c3cn);
2728 }
2729 - __kfree_skb(skb);
2730 +
2731 + c3cn->cdev = NULL;
2732 }
2733
2734 -/*
2735 - * Process a peer ACK to our FIN.
2736 +/**
2737 + * cxgb3i_c3cn_create - allocate and initialize an s3_conn structure
2738 + * returns the s3_conn structure allocated.
2739 */
2740 -static void process_close_con_rpl(struct s3_conn *c3cn,
2741 - struct sk_buff *skb)
2742 +struct s3_conn *cxgb3i_c3cn_create(void)
2743 {
2744 - struct cpl_close_con_rpl *rpl = cplhdr(skb);
2745 + struct s3_conn *c3cn;
2746
2747 - c3cn_conn_debug("c3cn 0x%p, state 0x%x, flag 0x%lx.\n",
2748 - c3cn, c3cn->state, c3cn->flags);
2749 + c3cn = kzalloc(sizeof(*c3cn), GFP_KERNEL);
2750 + if (!c3cn)
2751 + return NULL;
2752
2753 - c3cn->snd_una = ntohl(rpl->snd_nxt) - 1; /* exclude FIN */
2754 + /* pre-allocate close/abort cpl, so we don't need to wait for memory
2755 + when close/abort is requested. */
2756 + if (c3cn_alloc_cpl_skbs(c3cn) < 0)
2757 + goto free_c3cn;
2758
2759 - if (c3cn_flag(c3cn, C3CN_ABORT_RPL_PENDING))
2760 - goto out;
2761 + c3cn_conn_debug("alloc c3cn 0x%p.\n", c3cn);
2762
2763 - if (c3cn->state == C3CN_STATE_CLOSING) {
2764 - c3cn_done(c3cn);
2765 - } else
2766 - printk(KERN_ERR
2767 - "%s: TID %u received CLOSE_CON_RPL in bad state %d\n",
2768 - c3cn->cdev->name, c3cn->tid, c3cn->state);
2769 -out:
2770 - kfree_skb(skb);
2771 + c3cn->flags = 0;
2772 + spin_lock_init(&c3cn->lock);
2773 + atomic_set(&c3cn->refcnt, 1);
2774 + skb_queue_head_init(&c3cn->receive_queue);
2775 + skb_queue_head_init(&c3cn->write_queue);
2776 + setup_timer(&c3cn->retry_timer, NULL, (unsigned long)c3cn);
2777 + rwlock_init(&c3cn->callback_lock);
2778 +
2779 + return c3cn;
2780 +
2781 +free_c3cn:
2782 + kfree(c3cn);
2783 + return NULL;
2784 }
2785
2786 -/*
2787 - * Random utility functions for CPL message processing ...
2788 - * =======================================================
2789 - */
2790 +static void c3cn_active_close(struct s3_conn *c3cn)
2791 +{
2792 + int data_lost;
2793 + int close_req = 0;
2794 +
2795 + c3cn_conn_debug("c3cn 0x%p, state %u, flag 0x%lx.\n",
2796 + c3cn, c3cn->state, c3cn->flags);
2797 +
2798 + dst_confirm(c3cn->dst_cache);
2799 +
2800 + c3cn_hold(c3cn);
2801 + spin_lock_bh(&c3cn->lock);
2802 +
2803 + data_lost = skb_queue_len(&c3cn->receive_queue);
2804 + __skb_queue_purge(&c3cn->receive_queue);
2805 +
2806 + switch (c3cn->state) {
2807 + case C3CN_STATE_CLOSED:
2808 + case C3CN_STATE_ACTIVE_CLOSE:
2809 + case C3CN_STATE_CLOSE_WAIT_1:
2810 + case C3CN_STATE_CLOSE_WAIT_2:
2811 + case C3CN_STATE_ABORTING:
2812 + /* nothing need to be done */
2813 + break;
2814 + case C3CN_STATE_CONNECTING:
2815 + /* defer until cpl_act_open_rpl or cpl_act_establish */
2816 + c3cn_set_flag(c3cn, C3CN_ACTIVE_CLOSE_NEEDED);
2817 + break;
2818 + case C3CN_STATE_ESTABLISHED:
2819 + close_req = 1;
2820 + c3cn_set_state(c3cn, C3CN_STATE_ACTIVE_CLOSE);
2821 + break;
2822 + case C3CN_STATE_PASSIVE_CLOSE:
2823 + close_req = 1;
2824 + c3cn_set_state(c3cn, C3CN_STATE_CLOSE_WAIT_2);
2825 + break;
2826 + }
2827 +
2828 + if (close_req) {
2829 + if (data_lost)
2830 + /* Unread data was tossed, zap the connection. */
2831 + send_abort_req(c3cn);
2832 + else
2833 + send_close_req(c3cn);
2834 + }
2835 +
2836 + spin_unlock_bh(&c3cn->lock);
2837 + c3cn_put(c3cn);
2838 +}
2839
2840 /**
2841 - * find_best_mtu - find the entry in the MTU table closest to an MTU
2842 - * @d: TOM state
2843 - * @mtu: the target MTU
2844 - *
2845 - * Returns the index of the value in the MTU table that is closest to but
2846 - * does not exceed the target MTU.
2847 + * cxgb3i_c3cn_release - close and release an iscsi tcp connection and any
2848 + * resource held
2849 + * @c3cn: the iscsi tcp connection
2850 */
2851 -static unsigned int find_best_mtu(const struct t3c_data *d, unsigned short mtu)
2852 +void cxgb3i_c3cn_release(struct s3_conn *c3cn)
2853 {
2854 - int i = 0;
2855 -
2856 - while (i < d->nmtus - 1 && d->mtus[i + 1] <= mtu)
2857 - ++i;
2858 - return i;
2859 + c3cn_conn_debug("c3cn 0x%p, s %u, f 0x%lx.\n",
2860 + c3cn, c3cn->state, c3cn->flags);
2861 + if (likely(c3cn->state != C3CN_STATE_CONNECTING))
2862 + c3cn_active_close(c3cn);
2863 + else
2864 + c3cn_set_flag(c3cn, C3CN_ACTIVE_CLOSE_NEEDED);
2865 + c3cn_put(c3cn);
2866 }
2867
2868 -static unsigned int select_mss(struct s3_conn *c3cn, unsigned int pmtu)
2869 +static int is_cxgb3_dev(struct net_device *dev)
2870 {
2871 - unsigned int idx;
2872 - struct dst_entry *dst = c3cn->dst_cache;
2873 - struct t3cdev *cdev = c3cn->cdev;
2874 - const struct t3c_data *td = T3C_DATA(cdev);
2875 - u16 advmss = dst_metric(dst, RTAX_ADVMSS);
2876 + struct cxgb3i_sdev_data *cdata;
2877
2878 - if (advmss > pmtu - 40)
2879 - advmss = pmtu - 40;
2880 - if (advmss < td->mtus[0] - 40)
2881 - advmss = td->mtus[0] - 40;
2882 - idx = find_best_mtu(td, advmss + 40);
2883 - return idx;
2884 + write_lock(&cdata_rwlock);
2885 + list_for_each_entry(cdata, &cdata_list, list) {
2886 + struct adap_ports *ports = &cdata->ports;
2887 + int i;
2888 +
2889 + for (i = 0; i < ports->nports; i++)
2890 + if (dev == ports->lldevs[i]) {
2891 + write_unlock(&cdata_rwlock);
2892 + return 1;
2893 + }
2894 + }
2895 + write_unlock(&cdata_rwlock);
2896 + return 0;
2897 }
2898
2899 -static void fail_act_open(struct s3_conn *c3cn, int errno)
2900 +/**
2901 + * cxgb3_egress_dev - return the cxgb3 egress device
2902 + * @root_dev: the root device anchoring the search
2903 + * @c3cn: the connection used to determine egress port in bonding mode
2904 + * @context: in bonding mode, indicates a connection set up or failover
2905 + *
2906 + * Return egress device or NULL if the egress device isn't one of our ports.
2907 + */
2908 +static struct net_device *cxgb3_egress_dev(struct net_device *root_dev,
2909 + struct s3_conn *c3cn,
2910 + int context)
2911 {
2912 - c3cn_conn_debug("c3cn 0x%p, state 0x%x, flag 0x%lx.\n",
2913 - c3cn, c3cn->state, c3cn->flags);
2914 + while (root_dev) {
2915 + if (root_dev->priv_flags & IFF_802_1Q_VLAN)
2916 + root_dev = vlan_dev_real_dev(root_dev);
2917 + else if (is_cxgb3_dev(root_dev))
2918 + return root_dev;
2919 + else
2920 + return NULL;
2921 + }
2922 + return NULL;
2923 +}
2924
2925 - c3cn->err = errno;
2926 - c3cn_done(c3cn);
2927 +static struct rtable *find_route(__be32 saddr, __be32 daddr,
2928 + __be16 sport, __be16 dport)
2929 +{
2930 + struct rtable *rt;
2931 + struct flowi fl = {
2932 + .oif = 0,
2933 + .nl_u = {
2934 + .ip4_u = {
2935 + .daddr = daddr,
2936 + .saddr = saddr,
2937 + .tos = 0 } },
2938 + .proto = IPPROTO_TCP,
2939 + .uli_u = {
2940 + .ports = {
2941 + .sport = sport,
2942 + .dport = dport } } };
2943 +
2944 + if (ip_route_output_flow(&init_net, &rt, &fl, NULL, 0))
2945 + return NULL;
2946 + return rt;
2947 }
2948
2949 /*
2950 @@ -1847,195 +1458,355 @@ static void init_offload_conn(struct s3_
2951 c3cn->wr_unacked = 0;
2952 c3cn->mss_idx = select_mss(c3cn, dst_mtu(dst));
2953
2954 - c3cn->ctrl_skb_cache = alloc_skb(CTRL_SKB_LEN, gfp_any());
2955 reset_wr_list(c3cn);
2956 }
2957
2958 -static void act_open_retry_timer(unsigned long data)
2959 +static int initiate_act_open(struct s3_conn *c3cn, struct net_device *dev)
2960 {
2961 + struct cxgb3i_sdev_data *cdata = NDEV2CDATA(dev);
2962 + struct t3cdev *cdev = cdata->cdev;
2963 + struct dst_entry *dst = c3cn->dst_cache;
2964 struct sk_buff *skb;
2965 - struct s3_conn *c3cn = (struct s3_conn *)data;
2966
2967 - c3cn_conn_debug("c3cn 0x%p, state 0x%x.\n", c3cn, c3cn->state);
2968 + c3cn_conn_debug("c3cn 0x%p, state %u, flag 0x%lx.\n",
2969 + c3cn, c3cn->state, c3cn->flags);
2970 + /*
2971 + * Initialize connection data. Note that the flags and ULP mode are
2972 + * initialized higher up ...
2973 + */
2974 + c3cn->dev = dev;
2975 + c3cn->cdev = cdev;
2976 + c3cn->tid = cxgb3_alloc_atid(cdev, cdata->client, c3cn);
2977 + if (c3cn->tid < 0)
2978 + goto out_err;
2979
2980 - spin_lock(&c3cn->lock);
2981 - skb = alloc_skb(sizeof(struct cpl_act_open_req), GFP_ATOMIC);
2982 + c3cn->qset = 0;
2983 + c3cn->l2t = t3_l2t_get(cdev, dst->neighbour, dev);
2984 + if (!c3cn->l2t)
2985 + goto free_tid;
2986 +
2987 + skb = alloc_skb(sizeof(struct cpl_act_open_req), GFP_KERNEL);
2988 if (!skb)
2989 - fail_act_open(c3cn, ENOMEM);
2990 - else {
2991 - skb->sk = (struct sock *)c3cn;
2992 - set_arp_failure_handler(skb, act_open_req_arp_failure);
2993 - mk_act_open_req(c3cn, skb, c3cn->tid, c3cn->l2t);
2994 - l2t_send(c3cn->cdev, skb, c3cn->l2t);
2995 - }
2996 - spin_unlock(&c3cn->lock);
2997 - c3cn_put(c3cn);
2998 -}
2999 + goto free_l2t;
3000
3001 -/*
3002 - * Convert an ACT_OPEN_RPL status to a Linux errno.
3003 - */
3004 -static int act_open_rpl_status_to_errno(int status)
3005 -{
3006 - switch (status) {
3007 - case CPL_ERR_CONN_RESET:
3008 - return ECONNREFUSED;
3009 - case CPL_ERR_ARP_MISS:
3010 - return EHOSTUNREACH;
3011 - case CPL_ERR_CONN_TIMEDOUT:
3012 - return ETIMEDOUT;
3013 - case CPL_ERR_TCAM_FULL:
3014 - return ENOMEM;
3015 - case CPL_ERR_CONN_EXIST:
3016 - printk(KERN_ERR "ACTIVE_OPEN_RPL: 4-tuple in use\n");
3017 - return EADDRINUSE;
3018 - default:
3019 - return EIO;
3020 - }
3021 + skb->sk = (struct sock *)c3cn;
3022 + set_arp_failure_handler(skb, act_open_req_arp_failure);
3023 +
3024 + c3cn_hold(c3cn);
3025 +
3026 + init_offload_conn(c3cn, cdev, dst);
3027 + c3cn->err = 0;
3028 +
3029 + make_act_open_req(c3cn, skb, c3cn->tid, c3cn->l2t);
3030 + l2t_send(cdev, skb, c3cn->l2t);
3031 + return 0;
3032 +
3033 +free_l2t:
3034 + l2t_release(L2DATA(cdev), c3cn->l2t);
3035 +free_tid:
3036 + s3_free_atid(cdev, c3cn->tid);
3037 + c3cn->tid = 0;
3038 +out_err:
3039 + return -1;
3040 }
3041
3042 -/*
3043 - * Convert the status code of an ABORT_REQ into a Linux error code. Also
3044 - * indicate whether RST should be sent in response.
3045 +
3046 +/**
3047 + * cxgb3i_c3cn_connect - initiates an iscsi tcp connection to a given address
3048 + * @c3cn: the iscsi tcp connection
3049 + * @usin: destination address
3050 + *
3051 + * return 0 if active open request is sent, < 0 otherwise.
3052 */
3053 -static int abort_status_to_errno(struct s3_conn *c3cn,
3054 - int abort_reason, int *need_rst)
3055 +int cxgb3i_c3cn_connect(struct s3_conn *c3cn, struct sockaddr_in *usin)
3056 {
3057 - switch (abort_reason) {
3058 - case CPL_ERR_BAD_SYN: /* fall through */
3059 - case CPL_ERR_CONN_RESET:
3060 - return c3cn->state == C3CN_STATE_CLOSING ? EPIPE : ECONNRESET;
3061 - case CPL_ERR_XMIT_TIMEDOUT:
3062 - case CPL_ERR_PERSIST_TIMEDOUT:
3063 - case CPL_ERR_FINWAIT2_TIMEDOUT:
3064 - case CPL_ERR_KEEPALIVE_TIMEDOUT:
3065 - return ETIMEDOUT;
3066 - default:
3067 - return EIO;
3068 + struct rtable *rt;
3069 + struct net_device *dev;
3070 + struct cxgb3i_sdev_data *cdata;
3071 + struct t3cdev *cdev;
3072 + __be32 sipv4;
3073 + int err;
3074 +
3075 + if (usin->sin_family != AF_INET)
3076 + return -EAFNOSUPPORT;
3077 +
3078 + c3cn->daddr.sin_port = usin->sin_port;
3079 + c3cn->daddr.sin_addr.s_addr = usin->sin_addr.s_addr;
3080 +
3081 + rt = find_route(c3cn->saddr.sin_addr.s_addr,
3082 + c3cn->daddr.sin_addr.s_addr,
3083 + c3cn->saddr.sin_port,
3084 + c3cn->daddr.sin_port);
3085 + if (rt == NULL) {
3086 + c3cn_conn_debug("NO route to 0x%x, port %u.\n",
3087 + c3cn->daddr.sin_addr.s_addr,
3088 + ntohs(c3cn->daddr.sin_port));
3089 + return -ENETUNREACH;
3090 }
3091 -}
3092
3093 -static void send_abort_rpl(struct sk_buff *skb, struct t3cdev *cdev,
3094 - int rst_status)
3095 -{
3096 - struct sk_buff *reply_skb;
3097 - struct cpl_abort_req_rss *req = cplhdr(skb);
3098 + if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
3099 + c3cn_conn_debug("multi-cast route to 0x%x, port %u.\n",
3100 + c3cn->daddr.sin_addr.s_addr,
3101 + ntohs(c3cn->daddr.sin_port));
3102 + ip_rt_put(rt);
3103 + return -ENETUNREACH;
3104 + }
3105
3106 - reply_skb = get_cpl_reply_skb(skb, sizeof(struct cpl_abort_rpl),
3107 - gfp_any());
3108 + if (!c3cn->saddr.sin_addr.s_addr)
3109 + c3cn->saddr.sin_addr.s_addr = rt->rt_src;
3110
3111 - reply_skb->priority = CPL_PRIORITY_DATA;
3112 - set_abort_rpl_wr(reply_skb, GET_TID(req), rst_status);
3113 - kfree_skb(skb);
3114 - cxgb3_ofld_send(cdev, reply_skb);
3115 -}
3116 + /* now commit destination to connection */
3117 + c3cn->dst_cache = &rt->u.dst;
3118
3119 -/*
3120 - * Returns an sk_buff for a reply CPL message of size len. If the input
3121 - * sk_buff has no other users it is trimmed and reused, otherwise a new buffer
3122 - * is allocated. The input skb must be of size at least len. Note that this
3123 - * operation does not destroy the original skb data even if it decides to reuse
3124 - * the buffer.
3125 - */
3126 -static struct sk_buff *get_cpl_reply_skb(struct sk_buff *skb, size_t len,
3127 - gfp_t gfp)
3128 -{
3129 - if (likely(!skb_cloned(skb))) {
3130 - BUG_ON(skb->len < len);
3131 - __skb_trim(skb, len);
3132 - skb_get(skb);
3133 - } else {
3134 - skb = alloc_skb(len, gfp);
3135 - if (skb)
3136 - __skb_put(skb, len);
3137 + /* try to establish an offloaded connection */
3138 + dev = cxgb3_egress_dev(c3cn->dst_cache->dev, c3cn, 0);
3139 + if (dev == NULL) {
3140 + c3cn_conn_debug("c3cn 0x%p, egress dev NULL.\n", c3cn);
3141 + return -ENETUNREACH;
3142 }
3143 - return skb;
3144 + cdata = NDEV2CDATA(dev);
3145 + cdev = cdata->cdev;
3146 +
3147 + /* get a source port if one hasn't been provided */
3148 + err = c3cn_get_port(c3cn, cdata);
3149 + if (err)
3150 + return err;
3151 +
3152 + c3cn_conn_debug("c3cn 0x%p get port %u.\n",
3153 + c3cn, ntohs(c3cn->saddr.sin_port));
3154 +
3155 + sipv4 = cxgb3i_get_private_ipv4addr(dev);
3156 + if (!sipv4) {
3157 + c3cn_conn_debug("c3cn 0x%p, iscsi ip not configured.\n", c3cn);
3158 + sipv4 = c3cn->saddr.sin_addr.s_addr;
3159 + cxgb3i_set_private_ipv4addr(dev, sipv4);
3160 + } else
3161 + c3cn->saddr.sin_addr.s_addr = sipv4;
3162 +
3163 + c3cn_conn_debug("c3cn 0x%p, %u.%u.%u.%u,%u-%u.%u.%u.%u,%u SYN_SENT.\n",
3164 + c3cn, NIPQUAD(c3cn->saddr.sin_addr.s_addr),
3165 + ntohs(c3cn->saddr.sin_port),
3166 + NIPQUAD(c3cn->daddr.sin_addr.s_addr),
3167 + ntohs(c3cn->daddr.sin_port));
3168 +
3169 + c3cn_set_state(c3cn, C3CN_STATE_CONNECTING);
3170 + if (!initiate_act_open(c3cn, dev))
3171 + return 0;
3172 +
3173 + /*
3174 + * If we get here, we don't have an offload connection so simply
3175 + * return a failure.
3176 + */
3177 + err = -ENOTSUPP;
3178 +
3179 + /*
3180 + * This trashes the connection and releases the local port,
3181 + * if necessary.
3182 + */
3183 + c3cn_conn_debug("c3cn 0x%p -> CLOSED.\n", c3cn);
3184 + c3cn_set_state(c3cn, C3CN_STATE_CLOSED);
3185 + ip_rt_put(rt);
3186 + c3cn_put_port(c3cn);
3187 + c3cn->daddr.sin_port = 0;
3188 + return err;
3189 }
3190
3191 -/*
3192 - * Release resources held by an offload connection (TID, L2T entry, etc.)
3193 +/**
3194 + * cxgb3i_c3cn_rx_credits - ack received tcp data.
3195 + * @c3cn: iscsi tcp connection
3196 + * @copied: # of bytes processed
3197 + *
3198 + * Called after some received data has been read. It returns RX credits
3199 + * to the HW for the amount of data processed.
3200 */
3201 -static void t3_release_offload_resources(struct s3_conn *c3cn)
3202 +void cxgb3i_c3cn_rx_credits(struct s3_conn *c3cn, int copied)
3203 {
3204 - struct t3cdev *cdev = c3cn->cdev;
3205 - unsigned int tid = c3cn->tid;
3206 + struct t3cdev *cdev;
3207 + int must_send;
3208 + u32 credits, dack = 0;
3209
3210 - if (!cdev)
3211 + if (c3cn->state != C3CN_STATE_ESTABLISHED)
3212 return;
3213
3214 - c3cn->qset = 0;
3215 + credits = c3cn->copied_seq - c3cn->rcv_wup;
3216 + if (unlikely(!credits))
3217 + return;
3218
3219 - kfree_skb(c3cn->ctrl_skb_cache);
3220 - c3cn->ctrl_skb_cache = NULL;
3221 + cdev = c3cn->cdev;
3222
3223 - if (c3cn->wr_avail != c3cn->wr_max) {
3224 - purge_wr_queue(c3cn);
3225 - reset_wr_list(c3cn);
3226 + if (unlikely(cxgb3_rx_credit_thres == 0))
3227 + return;
3228 +
3229 + dack = F_RX_DACK_CHANGE | V_RX_DACK_MODE(1);
3230 +
3231 + /*
3232 + * For coalescing to work effectively ensure the receive window has
3233 + * at least 16KB left.
3234 + */
3235 + must_send = credits + 16384 >= cxgb3_rcv_win;
3236 +
3237 + if (must_send || credits >= cxgb3_rx_credit_thres)
3238 + c3cn->rcv_wup += send_rx_credits(c3cn, credits, dack);
3239 +}
3240 +
3241 +/**
3242 + * cxgb3i_c3cn_send_pdus - send the skbs containing iscsi pdus
3243 + * @c3cn: iscsi tcp connection
3244 + * @skb: skb contains the iscsi pdu
3245 + *
3246 + * Add a list of skbs to a connection send queue. The skbs must comply with
3247 + * the max size limit of the device and have a headroom of at least
3248 + * TX_HEADER_LEN bytes.
3249 + * Return # of bytes queued.
3250 + */
3251 +int cxgb3i_c3cn_send_pdus(struct s3_conn *c3cn, struct sk_buff *skb)
3252 +{
3253 + struct sk_buff *next;
3254 + int err, copied = 0;
3255 +
3256 + spin_lock_bh(&c3cn->lock);
3257 +
3258 + if (c3cn->state != C3CN_STATE_ESTABLISHED) {
3259 + c3cn_tx_debug("c3cn 0x%p, not in est. state %u.\n",
3260 + c3cn, c3cn->state);
3261 + err = -EAGAIN;
3262 + goto out_err;
3263 }
3264
3265 - if (c3cn->l2t) {
3266 - l2t_release(L2DATA(cdev), c3cn->l2t);
3267 - c3cn->l2t = NULL;
3268 + err = -EPIPE;
3269 + if (c3cn->err) {
3270 + c3cn_tx_debug("c3cn 0x%p, err %d.\n", c3cn, c3cn->err);
3271 + goto out_err;
3272 }
3273
3274 - if (c3cn->state == C3CN_STATE_SYN_SENT) /* we have ATID */
3275 - free_atid(cdev, tid);
3276 - else { /* we have TID */
3277 - cxgb3_remove_tid(cdev, (void *)c3cn, tid);
3278 - c3cn_put(c3cn);
3279 + while (skb) {
3280 + int frags = skb_shinfo(skb)->nr_frags +
3281 + (skb->len != skb->data_len);
3282 +
3283 + if (unlikely(skb_headroom(skb) < TX_HEADER_LEN)) {
3284 + c3cn_tx_debug("c3cn 0x%p, skb head.\n", c3cn);
3285 + err = -EINVAL;
3286 + goto out_err;
3287 + }
3288 +
3289 + if (frags >= SKB_WR_LIST_SIZE) {
3290 + cxgb3i_log_error("c3cn 0x%p, tx frags %d, len %u,%u.\n",
3291 + c3cn, skb_shinfo(skb)->nr_frags,
3292 + skb->len, skb->data_len);
3293 + err = -EINVAL;
3294 + goto out_err;
3295 + }
3296 +
3297 + next = skb->next;
3298 + skb->next = NULL;
3299 + skb_entail(c3cn, skb, C3CB_FLAG_NO_APPEND | C3CB_FLAG_NEED_HDR);
3300 + copied += skb->len;
3301 + c3cn->write_seq += skb->len + ulp_extra_len(skb);
3302 + skb = next;
3303 }
3304 +done:
3305 + if (likely(skb_queue_len(&c3cn->write_queue)))
3306 + c3cn_push_tx_frames(c3cn, 1);
3307 + spin_unlock_bh(&c3cn->lock);
3308 + return copied;
3309
3310 - c3cn->cdev = NULL;
3311 +out_err:
3312 + if (copied == 0 && err == -EPIPE)
3313 + copied = c3cn->err ? c3cn->err : -EPIPE;
3314 + goto done;
3315 }
3316
3317 -/*
3318 - * Handles Rx data that arrives in a state where the connection isn't
3319 - * accepting new data.
3320 - */
3321 -static void handle_excess_rx(struct s3_conn *c3cn, struct sk_buff *skb)
3322 +static void sdev_data_cleanup(struct cxgb3i_sdev_data *cdata)
3323 {
3324 - if (!c3cn_flag(c3cn, C3CN_ABORT_SHUTDOWN))
3325 - abort_conn(c3cn, skb);
3326 + struct adap_ports *ports = &cdata->ports;
3327 + int i;
3328
3329 - kfree_skb(skb);
3330 + for (i = 0; i < ports->nports; i++)
3331 + NDEV2CDATA(ports->lldevs[i]) = NULL;
3332 + cxgb3i_free_big_mem(cdata);
3333 }
3334
3335 -/*
3336 - * Like get_cpl_reply_skb() but the returned buffer starts out empty.
3337 - */
3338 -static struct sk_buff *__get_cpl_reply_skb(struct sk_buff *skb, size_t len,
3339 - gfp_t gfp)
3340 +void cxgb3i_sdev_cleanup(void)
3341 {
3342 - if (likely(!skb_cloned(skb) && !skb->data_len)) {
3343 - __skb_trim(skb, 0);
3344 - skb_get(skb);
3345 - } else
3346 - skb = alloc_skb(len, gfp);
3347 - return skb;
3348 + struct cxgb3i_sdev_data *cdata;
3349 +
3350 + write_lock(&cdata_rwlock);
3351 + list_for_each_entry(cdata, &cdata_list, list) {
3352 + list_del(&cdata->list);
3353 + sdev_data_cleanup(cdata);
3354 + }
3355 + write_unlock(&cdata_rwlock);
3356 }
3357
3358 -/*
3359 - * Completes some final bits of initialization for just established connections
3360 - * and changes their state to C3CN_STATE_ESTABLISHED.
3361 - *
3362 - * snd_isn here is the ISN after the SYN, i.e., the true ISN + 1.
3363 +int cxgb3i_sdev_init(cxgb3_cpl_handler_func *cpl_handlers)
3364 +{
3365 + cpl_handlers[CPL_ACT_ESTABLISH] = do_act_establish;
3366 + cpl_handlers[CPL_ACT_OPEN_RPL] = do_act_open_rpl;
3367 + cpl_handlers[CPL_PEER_CLOSE] = do_peer_close;
3368 + cpl_handlers[CPL_ABORT_REQ_RSS] = do_abort_req;
3369 + cpl_handlers[CPL_ABORT_RPL_RSS] = do_abort_rpl;
3370 + cpl_handlers[CPL_CLOSE_CON_RPL] = do_close_con_rpl;
3371 + cpl_handlers[CPL_TX_DMA_ACK] = do_wr_ack;
3372 + cpl_handlers[CPL_ISCSI_HDR] = do_iscsi_hdr;
3373 +
3374 + if (cxgb3_max_connect > CXGB3I_MAX_CONN)
3375 + cxgb3_max_connect = CXGB3I_MAX_CONN;
3376 + return 0;
3377 +}
3378 +
3379 +/**
3380 + * cxgb3i_sdev_add - allocate and initialize resources for each adapter found
3381 + * @cdev: t3cdev adapter
3382 + * @client: cxgb3 driver client
3383 */
3384 -static void make_established(struct s3_conn *c3cn, u32 snd_isn,
3385 - unsigned int opt)
3386 +void cxgb3i_sdev_add(struct t3cdev *cdev, struct cxgb3_client *client)
3387 {
3388 - c3cn_conn_debug("c3cn 0x%p, state 0x%x.\n", c3cn, c3cn->state);
3389 + struct cxgb3i_sdev_data *cdata;
3390 + struct ofld_page_info rx_page_info;
3391 + unsigned int wr_len;
3392 + int mapsize = DIV_ROUND_UP(cxgb3_max_connect,
3393 + 8 * sizeof(unsigned long));
3394 + int i;
3395
3396 - c3cn->write_seq = c3cn->snd_nxt = c3cn->snd_una = snd_isn;
3397 + cdata = cxgb3i_alloc_big_mem(sizeof(*cdata) + mapsize, GFP_KERNEL);
3398 + if (!cdata)
3399 + return;
3400
3401 - /*
3402 - * Causes the first RX_DATA_ACK to supply any Rx credits we couldn't
3403 - * pass through opt0.
3404 - */
3405 - if (cxgb3_rcv_win > (M_RCV_BUFSIZ << 10))
3406 - c3cn->rcv_wup -= cxgb3_rcv_win - (M_RCV_BUFSIZ << 10);
3407 + if (cdev->ctl(cdev, GET_WR_LEN, &wr_len) < 0 ||
3408 + cdev->ctl(cdev, GET_PORTS, &cdata->ports) < 0 ||
3409 + cdev->ctl(cdev, GET_RX_PAGE_INFO, &rx_page_info) < 0)
3410 + goto free_cdata;
3411
3412 - dst_confirm(c3cn->dst_cache);
3413 + s3_init_wr_tab(wr_len);
3414
3415 - smp_mb();
3416 - c3cn_set_state(c3cn, C3CN_STATE_ESTABLISHED);
3417 + INIT_LIST_HEAD(&cdata->list);
3418 + cdata->cdev = cdev;
3419 + cdata->client = client;
3420 +
3421 + for (i = 0; i < cdata->ports.nports; i++)
3422 + NDEV2CDATA(cdata->ports.lldevs[i]) = cdata;
3423 +
3424 + write_lock(&cdata_rwlock);
3425 + list_add_tail(&cdata->list, &cdata_list);
3426 + write_unlock(&cdata_rwlock);
3427 +
3428 + return;
3429 +
3430 +free_cdata:
3431 + cxgb3i_free_big_mem(cdata);
3432 +}
3433 +
3434 +/**
3435 + * cxgb3i_sdev_remove - free the allocated resources for the adapter
3436 + * @cdev: t3cdev adapter
3437 + */
3438 +void cxgb3i_sdev_remove(struct t3cdev *cdev)
3439 +{
3440 + struct cxgb3i_sdev_data *cdata = CXGB3_SDEV_DATA(cdev);
3441 +
3442 + write_lock(&cdata_rwlock);
3443 + list_del(&cdata->list);
3444 + write_unlock(&cdata_rwlock);
3445 +
3446 + sdev_data_cleanup(cdata);
3447 }
3448 --- a/drivers/scsi/cxgb3i/cxgb3i_offload.h
3449 +++ b/drivers/scsi/cxgb3i/cxgb3i_offload.h
3450 @@ -1,12 +1,15 @@
3451 /*
3452 - * Copyright (C) 2003-2008 Chelsio Communications. All rights reserved.
3453 + * cxgb3i_offload.h: Chelsio S3xx iscsi offloaded tcp connection management
3454 *
3455 - * Written by Dimitris Michailidis (dm@chelsio.com)
3456 + * Copyright (C) 2003-2008 Chelsio Communications. All rights reserved.
3457 *
3458 * This program is distributed in the hope that it will be useful, but WITHOUT
3459 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
3460 * FITNESS FOR A PARTICULAR PURPOSE. See the LICENSE file included in this
3461 * release for licensing terms and conditions.
3462 + *
3463 + * Written by: Dimitris Michailidis (dm@chelsio.com)
3464 + * Karen Xie (kxie@chelsio.com)
3465 */
3466
3467 #ifndef _CXGB3I_OFFLOAD_H
3468 @@ -23,83 +26,104 @@
3469 #define cxgb3i_log_error(fmt...) printk(KERN_ERR "cxgb3i: ERR! " fmt)
3470 #define cxgb3i_log_warn(fmt...) printk(KERN_WARNING "cxgb3i: WARN! " fmt)
3471 #define cxgb3i_log_info(fmt...) printk(KERN_INFO "cxgb3i: " fmt)
3472 -
3473 -#ifdef __DEBUG_CXGB3I__
3474 #define cxgb3i_log_debug(fmt, args...) \
3475 printk(KERN_INFO "cxgb3i: %s - " fmt, __func__ , ## args)
3476 -#else
3477 -#define cxgb3i_log_debug(fmt...)
3478 -#endif
3479 -
3480 -#ifdef __DEBUG_C3CN_CONN__
3481 -#define c3cn_conn_debug cxgb3i_log_debug
3482 -#else
3483 -#define c3cn_conn_debug(fmt...)
3484 -#endif
3485
3486 -/*
3487 - * Data structure to keep track of cxgb3 connection.
3488 +/**
3489 + * struct s3_conn - an iscsi tcp connection structure
3490 + *
3491 + * @dev: net device of with connection
3492 + * @cdev: adapter t3cdev for net device
3493 + * @flags: see c3cn_flags below
3494 + * @tid: connection id assigned by the h/w
3495 + * @qset: queue set used by connection
3496 + * @mss_idx: Maximum Segment Size table index
3497 + * @l2t: ARP resolution entry for offload packets
3498 + * @wr_max: maximum in-flight writes
3499 + * @wr_avail: number of writes available
3500 + * @wr_unacked: writes since last request for completion notification
3501 + * @wr_pending_head: head of pending write queue
3502 + * @wr_pending_tail: tail of pending write queue
3503 + * @cpl_close: skb for cpl_close_req
3504 + * @cpl_abort_req: skb for cpl_abort_req
3505 + * @cpl_abort_rpl: skb for cpl_abort_rpl
3506 + * @lock: connection status lock
3507 + * @refcnt: reference count on connection
3508 + * @state: connection state
3509 + * @saddr: source ip/port address
3510 + * @daddr: destination ip/port address
3511 + * @dst_cache: reference to destination route
3512 + * @receive_queue: received PDUs
3513 + * @write_queue: un-pushed pending writes
3514 + * @retry_timer: retry timer for various operations
3515 + * @err: connection error status
3516 + * @callback_lock: lock for opaque user context
3517 + * @user_data: opaque user context
3518 + * @rcv_nxt: next receive seq. #
3519 + * @copied_seq: head of yet unread data
3520 + * @rcv_wup: rcv_nxt on last window update sent
3521 + * @snd_nxt: next sequence we send
3522 + * @snd_una: first byte we want an ack for
3523 + * @write_seq: tail+1 of data held in send buffer
3524 */
3525 struct s3_conn {
3526 - struct net_device *dev; /* net device of with connection */
3527 - struct t3cdev *cdev; /* adapter t3cdev for net device */
3528 - unsigned long flags; /* see c3cn_flags below */
3529 - int tid; /* ID of TCP Control Block */
3530 - int qset; /* queue Set used by connection */
3531 - int mss_idx; /* Maximum Segment Size table index */
3532 - struct l2t_entry *l2t; /* ARP resolution for offload packets */
3533 - int wr_max; /* maximum in-flight writes */
3534 - int wr_avail; /* number of writes available */
3535 - int wr_unacked; /* writes since last request for */
3536 - /* completion notification */
3537 - struct sk_buff *wr_pending_head;/* head of pending write queue */
3538 - struct sk_buff *wr_pending_tail;/* tail of pending write queue */
3539 - struct sk_buff *ctrl_skb_cache; /* single entry cached skb for */
3540 - /* short-term control operations */
3541 - spinlock_t lock; /* connection status lock */
3542 - atomic_t refcnt; /* reference count on connection */
3543 - volatile unsigned int state; /* connection state */
3544 - struct sockaddr_in saddr; /* source IP/port address */
3545 - struct sockaddr_in daddr; /* destination IP/port address */
3546 - struct dst_entry *dst_cache; /* reference to destination route */
3547 - unsigned char shutdown; /* shutdown status */
3548 - struct sk_buff_head receive_queue;/* received PDUs */
3549 - struct sk_buff_head write_queue;/* un-pushed pending writes */
3550 -
3551 - struct timer_list retry_timer; /* retry timer for various operations */
3552 - int err; /* connection error status */
3553 - rwlock_t callback_lock; /* lock for opaque user context */
3554 - void *user_data; /* opaque user context */
3555 -
3556 - u32 rcv_nxt; /* what we want to receive next */
3557 - u32 copied_seq; /* head of yet unread data */
3558 - u32 rcv_wup; /* rcv_nxt on last window update sent */
3559 - u32 snd_nxt; /* next sequence we send */
3560 - u32 snd_una; /* first byte we want an ack for */
3561 -
3562 - u32 write_seq; /* tail+1 of data held in send buffer */
3563 -};
3564 -
3565 -/* Flags in c3cn->shutdown */
3566 -#define C3CN_RCV_SHUTDOWN 0x1
3567 -#define C3CN_SEND_SHUTDOWN 0x2
3568 -#define C3CN_SHUTDOWN_MASK (C3CN_RCV_SHUTDOWN | C3CN_SEND_SHUTDOWN)
3569 + struct net_device *dev;
3570 + struct t3cdev *cdev;
3571 + unsigned long flags;
3572 + int tid;
3573 + int qset;
3574 + int mss_idx;
3575 + struct l2t_entry *l2t;
3576 + int wr_max;
3577 + int wr_avail;
3578 + int wr_unacked;
3579 + struct sk_buff *wr_pending_head;
3580 + struct sk_buff *wr_pending_tail;
3581 + struct sk_buff *cpl_close;
3582 + struct sk_buff *cpl_abort_req;
3583 + struct sk_buff *cpl_abort_rpl;
3584 + spinlock_t lock;
3585 + atomic_t refcnt;
3586 + volatile unsigned int state;
3587 + struct sockaddr_in saddr;
3588 + struct sockaddr_in daddr;
3589 + struct dst_entry *dst_cache;
3590 + struct sk_buff_head receive_queue;
3591 + struct sk_buff_head write_queue;
3592 + struct timer_list retry_timer;
3593 + int err;
3594 + rwlock_t callback_lock;
3595 + void *user_data;
3596 +
3597 + u32 rcv_nxt;
3598 + u32 copied_seq;
3599 + u32 rcv_wup;
3600 + u32 snd_nxt;
3601 + u32 snd_una;
3602 + u32 write_seq;
3603 +};
3604
3605 /*
3606 - * connection state bitmap
3607 - */
3608 -#define C3CN_STATE_CLOSE 0x1
3609 -#define C3CN_STATE_SYN_SENT 0x2
3610 -#define C3CN_STATE_ESTABLISHED 0x4
3611 -#define C3CN_STATE_CLOSING 0x8
3612 -#define C3CN_STATE_ABORING 0x10
3613 -
3614 -#define C3CN_STATE_MASK 0xFF
3615 + * connection state
3616 + */
3617 +enum conn_states {
3618 + C3CN_STATE_CONNECTING = 1,
3619 + C3CN_STATE_ESTABLISHED,
3620 + C3CN_STATE_ACTIVE_CLOSE,
3621 + C3CN_STATE_PASSIVE_CLOSE,
3622 + C3CN_STATE_CLOSE_WAIT_1,
3623 + C3CN_STATE_CLOSE_WAIT_2,
3624 + C3CN_STATE_ABORTING,
3625 + C3CN_STATE_CLOSED,
3626 +};
3627
3628 -static inline unsigned int c3cn_in_state(const struct s3_conn *c3cn,
3629 - unsigned int states)
3630 +static inline unsigned int c3cn_is_closing(const struct s3_conn *c3cn)
3631 {
3632 - return states & c3cn->state;
3633 + return c3cn->state >= C3CN_STATE_ACTIVE_CLOSE;
3634 +}
3635 +static inline unsigned int c3cn_is_established(const struct s3_conn *c3cn)
3636 +{
3637 + return c3cn->state == C3CN_STATE_ESTABLISHED;
3638 }
3639
3640 /*
3641 @@ -108,37 +132,35 @@ static inline unsigned int c3cn_in_state
3642 enum c3cn_flags {
3643 C3CN_ABORT_RPL_RCVD, /* received one ABORT_RPL_RSS message */
3644 C3CN_ABORT_REQ_RCVD, /* received one ABORT_REQ_RSS message */
3645 - C3CN_TX_WAIT_IDLE, /* suspend Tx until in-flight data is ACKed */
3646 - C3CN_ABORT_SHUTDOWN, /* shouldn't send more abort requests */
3647 -
3648 C3CN_ABORT_RPL_PENDING, /* expecting an abort reply */
3649 - C3CN_CLOSE_CON_REQUESTED, /* we've sent a close_conn_req */
3650 C3CN_TX_DATA_SENT, /* already sent a TX_DATA WR */
3651 - C3CN_CLOSE_NEEDED, /* need to be closed */
3652 - C3CN_DONE,
3653 + C3CN_ACTIVE_CLOSE_NEEDED, /* need to be closed */
3654 };
3655
3656 -/*
3657 - * Per adapter data. Linked off of each Ethernet device port on the adapter.
3658 +/**
3659 + * cxgb3i_sdev_data - Per adapter data.
3660 + * Linked off of each Ethernet device port on the adapter.
3661 * Also available via the t3cdev structure since we have pointers to our port
3662 * net_device's there ...
3663 + *
3664 + * @list: list head to link elements
3665 + * @cdev: t3cdev adapter
3666 + * @client: CPL client pointer
3667 + * @ports: array of adapter ports
3668 + * @sport_map_next: next index into the port map
3669 + * @sport_map: source port map
3670 */
3671 struct cxgb3i_sdev_data {
3672 - struct list_head list; /* links for list of all adapters */
3673 - struct t3cdev *cdev; /* adapter t3cdev */
3674 - struct cxgb3_client *client; /* CPL client pointer */
3675 - struct adap_ports *ports; /* array of adapter ports */
3676 - unsigned int rx_page_size; /* RX page size */
3677 - struct sk_buff_head deferq; /* queue for processing replies from */
3678 - /* worker thread context */
3679 - struct work_struct deferq_task; /* worker thread */
3680 + struct list_head list;
3681 + struct t3cdev *cdev;
3682 + struct cxgb3_client *client;
3683 + struct adap_ports ports;
3684 + unsigned int sport_map_next;
3685 + unsigned long sport_map[0];
3686 };
3687 #define NDEV2CDATA(ndev) (*(struct cxgb3i_sdev_data **)&(ndev)->ec_ptr)
3688 #define CXGB3_SDEV_DATA(cdev) NDEV2CDATA((cdev)->lldev)
3689
3690 -/*
3691 - * Primary API routines.
3692 - */
3693 void cxgb3i_sdev_cleanup(void);
3694 int cxgb3i_sdev_init(cxgb3_cpl_handler_func *);
3695 void cxgb3i_sdev_add(struct t3cdev *, struct cxgb3_client *);
3696 @@ -147,20 +169,26 @@ void cxgb3i_sdev_remove(struct t3cdev *)
3697 struct s3_conn *cxgb3i_c3cn_create(void);
3698 int cxgb3i_c3cn_connect(struct s3_conn *, struct sockaddr_in *);
3699 void cxgb3i_c3cn_rx_credits(struct s3_conn *, int);
3700 -int cxgb3i_c3cn_send_pdus(struct s3_conn *, struct sk_buff *, int);
3701 +int cxgb3i_c3cn_send_pdus(struct s3_conn *, struct sk_buff *);
3702 void cxgb3i_c3cn_release(struct s3_conn *);
3703
3704 -/*
3705 - * Definitions for sk_buff state and ULP mode management.
3706 +/**
3707 + * cxgb3_skb_cb - control block for received pdu state and ULP mode management.
3708 + *
3709 + * @flag: see C3CB_FLAG_* below
3710 + * @ulp_mode: ULP mode/submode of sk_buff
3711 + * @seq: tcp sequence number
3712 + * @ddigest: pdu data digest
3713 + * @pdulen: recovered pdu length
3714 + * @ulp_data: scratch area for ULP
3715 */
3716 -
3717 struct cxgb3_skb_cb {
3718 - __u8 flags; /* see C3CB_FLAG_* below */
3719 - __u8 ulp_mode; /* ULP mode/submode of sk_buff */
3720 - __u32 seq; /* sequence number */
3721 - __u32 ddigest; /* ULP rx_data_ddp selected field */
3722 - __u32 pdulen; /* ULP rx_data_ddp selected field */
3723 - __u8 ulp_data[16]; /* scratch area for ULP */
3724 + __u8 flags;
3725 + __u8 ulp_mode;
3726 + __u32 seq;
3727 + __u32 ddigest;
3728 + __u32 pdulen;
3729 + __u8 ulp_data[16];
3730 };
3731
3732 #define CXGB3_SKB_CB(skb) ((struct cxgb3_skb_cb *)&((skb)->cb[0]))
3733 @@ -170,28 +198,14 @@ struct cxgb3_skb_cb {
3734 #define skb_ulp_pdulen(skb) (CXGB3_SKB_CB(skb)->pdulen)
3735 #define skb_ulp_data(skb) (CXGB3_SKB_CB(skb)->ulp_data)
3736
3737 -enum {
3738 +enum c3cb_flags {
3739 C3CB_FLAG_NEED_HDR = 1 << 0, /* packet needs a TX_DATA_WR header */
3740 C3CB_FLAG_NO_APPEND = 1 << 1, /* don't grow this skb */
3741 - C3CB_FLAG_BARRIER = 1 << 2, /* set TX_WAIT_IDLE after sending */
3742 - C3CB_FLAG_COMPL = 1 << 4, /* request WR completion */
3743 + C3CB_FLAG_COMPL = 1 << 2, /* request WR completion */
3744 };
3745
3746 -/*
3747 - * Top-level CPL message processing used by most CPL messages that
3748 - * pertain to connections.
3749 - */
3750 -static inline void process_cpl_msg(void (*fn)(struct s3_conn *,
3751 - struct sk_buff *),
3752 - struct s3_conn *c3cn,
3753 - struct sk_buff *skb)
3754 -{
3755 - spin_lock(&c3cn->lock);
3756 - fn(c3cn, skb);
3757 - spin_unlock(&c3cn->lock);
3758 -}
3759 -
3760 -/*
3761 +/**
3762 + * sge_opaque_hdr -
3763 * Opaque version of structure the SGE stores at skb->head of TX_DATA packets
3764 * and for which we must reserve space.
3765 */
3766 @@ -204,9 +218,6 @@ struct sge_opaque_hdr {
3767 #define TX_HEADER_LEN \
3768 (sizeof(struct tx_data_wr) + sizeof(struct sge_opaque_hdr))
3769
3770 -void *cxgb3i_alloc_big_mem(unsigned int);
3771 -void cxgb3i_free_big_mem(void *);
3772 -
3773 /*
3774 * get and set private ip for iscsi traffic
3775 */
3776 --- a/drivers/scsi/cxgb3i/cxgb3i_ulp2.c
3777 +++ b/drivers/scsi/cxgb3i/cxgb3i_ulp2.c
3778 @@ -312,6 +312,7 @@ u32 cxgb3i_ddp_tag_reserve(struct cxgb3i
3779 page_idx, sgcnt, xferlen, ULP2_DDP_THRESHOLD);
3780 return RESERVED_ITT;
3781 }
3782 + return RESERVED_ITT;
3783
3784 gl = ddp_make_gl(xferlen, sgl, sgcnt, gfp);
3785 if (!gl) {
3786 @@ -380,8 +381,14 @@ void cxgb3i_ddp_tag_release(struct cxgb3
3787 if (idx < snic->tag_format.rsvd_mask) {
3788 struct cxgb3i_ddp_info *ddp = snic->ddp;
3789 struct cxgb3i_gather_list *gl = ddp->gl_map[idx];
3790 - unsigned int npods = (gl->nelem + PPOD_PAGES_MAX - 1) >>
3791 - PPOD_PAGES_SHIFT;
3792 + unsigned int npods;
3793 +
3794 + if (!gl || !gl->nelem) {
3795 + cxgb3i_log_warn("release tag 0x%x, idx 0x%x, no gl.\n",
3796 + tag, idx);
3797 + return;
3798 + }
3799 + npods = (gl->nelem + PPOD_PAGES_MAX - 1) >> PPOD_PAGES_SHIFT;
3800
3801 cxgb3i_tag_debug("ddp tag 0x%x, release idx 0x%x, npods %u.\n",
3802 tag, idx, npods);
3803 @@ -469,14 +476,14 @@ static int cxgb3i_conn_read_pdu_skb(stru
3804 (skb_ulp_mode(skb) & ULP2_FLAG_DCRC_ERROR)) ?
3805 ISCSI_SEGMENT_DGST_ERR : 0;
3806 if (skb_ulp_mode(skb) & ULP2_FLAG_DATA_DDPED) {
3807 - cxgb3i_ddp_debug("skb 0x%p, opcode 0x%x, data %u, ddp'ed, "
3808 - "itt 0x%x.\n",
3809 + cxgb3i_ddp_debug("skb 0x%p, opcode 0x%x, data %u, "
3810 + "ddp'ed, itt 0x%x.\n",
3811 skb, hdr->opcode & ISCSI_OPCODE_MASK,
3812 tcp_conn->in.datalen, hdr->itt);
3813 segment->total_copied = segment->total_size;
3814 } else {
3815 - cxgb3i_ddp_debug("skb 0x%p, opcode 0x%x, data %u, not ddp'ed, "
3816 - "itt 0x%x.\n",
3817 + cxgb3i_ddp_debug("skb 0x%p, opcode 0x%x, data %u, "
3818 + "not ddp'ed, itt 0x%x.\n",
3819 skb, hdr->opcode & ISCSI_OPCODE_MASK,
3820 tcp_conn->in.datalen, hdr->itt);
3821 offset += sizeof(struct cpl_iscsi_hdr_norss);
3822 @@ -613,8 +620,7 @@ int cxgb3i_conn_ulp2_xmit(struct iscsi_c
3823 }
3824
3825 send_pdu:
3826 - err = cxgb3i_c3cn_send_pdus((struct s3_conn *)tcp_conn->sock,
3827 - skb, MSG_DONTWAIT | MSG_NOSIGNAL);
3828 + err = cxgb3i_c3cn_send_pdus((struct s3_conn *)tcp_conn->sock, skb);
3829
3830 if (err > 0) {
3831 int pdulen = hdrlen + datalen + padlen;
3832 @@ -758,7 +764,8 @@ int cxgb3i_adapter_ulp_init(struct cxgb3
3833 ddp = cxgb3i_alloc_big_mem(sizeof(struct cxgb3i_ddp_info) +
3834 ppmax *
3835 (sizeof(struct cxgb3i_gather_list *) +
3836 - sizeof(struct sk_buff *)));
3837 + sizeof(struct sk_buff *)),
3838 + GFP_KERNEL);
3839 if (!ddp) {
3840 cxgb3i_log_warn("snic %s unable to alloc ddp ppod 0x%u, "
3841 "ddp disabled.\n", tdev->name, ppmax);
3842 --- a/drivers/scsi/cxgb3i/cxgb3i_ulp2.h
3843 +++ b/drivers/scsi/cxgb3i/cxgb3i_ulp2.h
3844 @@ -106,4 +106,27 @@ struct cpl_rx_data_ddp_norss {
3845 void cxgb3i_conn_closing(struct s3_conn *);
3846 void cxgb3i_conn_pdu_ready(struct s3_conn *c3cn);
3847 void cxgb3i_conn_tx_open(struct s3_conn *c3cn);
3848 +
3849 +/*
3850 + * large memory chunk allocation/release
3851 + * use vmalloc() if kmalloc() fails
3852 + */
3853 +static inline void *cxgb3i_alloc_big_mem(unsigned int size, gfp_t gfp)
3854 +{
3855 + void *p = kmalloc(size, gfp);
3856 +
3857 + if (!p)
3858 + p = vmalloc(size);
3859 + if (p)
3860 + memset(p, 0, size);
3861 + return p;
3862 +}
3863 +
3864 +static inline void cxgb3i_free_big_mem(void *addr)
3865 +{
3866 + if (is_vmalloc_addr(addr))
3867 + vfree(addr);
3868 + else
3869 + kfree(addr);
3870 +}
3871 #endif