]> git.ipfire.org Git - thirdparty/linux.git/blob - drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c
mm, fs: get rid of PAGE_CACHE_* and page_cache_{get,release} macros
[thirdparty/linux.git] / drivers / staging / lustre / lnet / klnds / socklnd / socklnd_lib.c
1 /*
2 * GPL HEADER START
3 *
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19 *
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
22 * have any questions.
23 *
24 * GPL HEADER END
25 */
26 /*
27 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
29 *
30 * Copyright (c) 2011, 2012, Intel Corporation.
31 */
32 /*
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
35 */
36
37 #include "socklnd.h"
38
39 int
40 ksocknal_lib_get_conn_addrs(ksock_conn_t *conn)
41 {
42 int rc = lnet_sock_getaddr(conn->ksnc_sock, 1, &conn->ksnc_ipaddr,
43 &conn->ksnc_port);
44
45 /* Didn't need the {get,put}connsock dance to deref ksnc_sock... */
46 LASSERT(!conn->ksnc_closing);
47
48 if (rc) {
49 CERROR("Error %d getting sock peer IP\n", rc);
50 return rc;
51 }
52
53 rc = lnet_sock_getaddr(conn->ksnc_sock, 0, &conn->ksnc_myipaddr, NULL);
54 if (rc) {
55 CERROR("Error %d getting sock local IP\n", rc);
56 return rc;
57 }
58
59 return 0;
60 }
61
62 int
63 ksocknal_lib_zc_capable(ksock_conn_t *conn)
64 {
65 int caps = conn->ksnc_sock->sk->sk_route_caps;
66
67 if (conn->ksnc_proto == &ksocknal_protocol_v1x)
68 return 0;
69
70 /*
71 * ZC if the socket supports scatter/gather and doesn't need software
72 * checksums
73 */
74 return ((caps & NETIF_F_SG) && (caps & NETIF_F_CSUM_MASK));
75 }
76
77 int
78 ksocknal_lib_send_iov(ksock_conn_t *conn, ksock_tx_t *tx)
79 {
80 struct socket *sock = conn->ksnc_sock;
81 int nob;
82 int rc;
83
84 if (*ksocknal_tunables.ksnd_enable_csum && /* checksum enabled */
85 conn->ksnc_proto == &ksocknal_protocol_v2x && /* V2.x connection */
86 tx->tx_nob == tx->tx_resid && /* frist sending */
87 !tx->tx_msg.ksm_csum) /* not checksummed */
88 ksocknal_lib_csum_tx(tx);
89
90 /*
91 * NB we can't trust socket ops to either consume our iovs
92 * or leave them alone.
93 */
94 {
95 #if SOCKNAL_SINGLE_FRAG_TX
96 struct kvec scratch;
97 struct kvec *scratchiov = &scratch;
98 unsigned int niov = 1;
99 #else
100 struct kvec *scratchiov = conn->ksnc_scheduler->kss_scratch_iov;
101 unsigned int niov = tx->tx_niov;
102 #endif
103 struct msghdr msg = {.msg_flags = MSG_DONTWAIT};
104 int i;
105
106 for (nob = i = 0; i < niov; i++) {
107 scratchiov[i] = tx->tx_iov[i];
108 nob += scratchiov[i].iov_len;
109 }
110
111 if (!list_empty(&conn->ksnc_tx_queue) ||
112 nob < tx->tx_resid)
113 msg.msg_flags |= MSG_MORE;
114
115 rc = kernel_sendmsg(sock, &msg, scratchiov, niov, nob);
116 }
117 return rc;
118 }
119
120 int
121 ksocknal_lib_send_kiov(ksock_conn_t *conn, ksock_tx_t *tx)
122 {
123 struct socket *sock = conn->ksnc_sock;
124 lnet_kiov_t *kiov = tx->tx_kiov;
125 int rc;
126 int nob;
127
128 /* Not NOOP message */
129 LASSERT(tx->tx_lnetmsg);
130
131 /*
132 * NB we can't trust socket ops to either consume our iovs
133 * or leave them alone.
134 */
135 if (tx->tx_msg.ksm_zc_cookies[0]) {
136 /* Zero copy is enabled */
137 struct sock *sk = sock->sk;
138 struct page *page = kiov->kiov_page;
139 int offset = kiov->kiov_offset;
140 int fragsize = kiov->kiov_len;
141 int msgflg = MSG_DONTWAIT;
142
143 CDEBUG(D_NET, "page %p + offset %x for %d\n",
144 page, offset, kiov->kiov_len);
145
146 if (!list_empty(&conn->ksnc_tx_queue) ||
147 fragsize < tx->tx_resid)
148 msgflg |= MSG_MORE;
149
150 if (sk->sk_prot->sendpage) {
151 rc = sk->sk_prot->sendpage(sk, page,
152 offset, fragsize, msgflg);
153 } else {
154 rc = tcp_sendpage(sk, page, offset, fragsize, msgflg);
155 }
156 } else {
157 #if SOCKNAL_SINGLE_FRAG_TX || !SOCKNAL_RISK_KMAP_DEADLOCK
158 struct kvec scratch;
159 struct kvec *scratchiov = &scratch;
160 unsigned int niov = 1;
161 #else
162 #ifdef CONFIG_HIGHMEM
163 #warning "XXX risk of kmap deadlock on multiple frags..."
164 #endif
165 struct kvec *scratchiov = conn->ksnc_scheduler->kss_scratch_iov;
166 unsigned int niov = tx->tx_nkiov;
167 #endif
168 struct msghdr msg = {.msg_flags = MSG_DONTWAIT};
169 int i;
170
171 for (nob = i = 0; i < niov; i++) {
172 scratchiov[i].iov_base = kmap(kiov[i].kiov_page) +
173 kiov[i].kiov_offset;
174 nob += scratchiov[i].iov_len = kiov[i].kiov_len;
175 }
176
177 if (!list_empty(&conn->ksnc_tx_queue) ||
178 nob < tx->tx_resid)
179 msg.msg_flags |= MSG_MORE;
180
181 rc = kernel_sendmsg(sock, &msg, (struct kvec *)scratchiov, niov, nob);
182
183 for (i = 0; i < niov; i++)
184 kunmap(kiov[i].kiov_page);
185 }
186 return rc;
187 }
188
189 void
190 ksocknal_lib_eager_ack(ksock_conn_t *conn)
191 {
192 int opt = 1;
193 struct socket *sock = conn->ksnc_sock;
194
195 /*
196 * Remind the socket to ACK eagerly. If I don't, the socket might
197 * think I'm about to send something it could piggy-back the ACK
198 * on, introducing delay in completing zero-copy sends in my
199 * peer.
200 */
201 kernel_setsockopt(sock, SOL_TCP, TCP_QUICKACK, (char *)&opt,
202 sizeof(opt));
203 }
204
205 int
206 ksocknal_lib_recv_iov(ksock_conn_t *conn)
207 {
208 #if SOCKNAL_SINGLE_FRAG_RX
209 struct kvec scratch;
210 struct kvec *scratchiov = &scratch;
211 unsigned int niov = 1;
212 #else
213 struct kvec *scratchiov = conn->ksnc_scheduler->kss_scratch_iov;
214 unsigned int niov = conn->ksnc_rx_niov;
215 #endif
216 struct kvec *iov = conn->ksnc_rx_iov;
217 struct msghdr msg = {
218 .msg_flags = 0
219 };
220 int nob;
221 int i;
222 int rc;
223 int fragnob;
224 int sum;
225 __u32 saved_csum;
226
227 /*
228 * NB we can't trust socket ops to either consume our iovs
229 * or leave them alone.
230 */
231 LASSERT(niov > 0);
232
233 for (nob = i = 0; i < niov; i++) {
234 scratchiov[i] = iov[i];
235 nob += scratchiov[i].iov_len;
236 }
237 LASSERT(nob <= conn->ksnc_rx_nob_wanted);
238
239 rc = kernel_recvmsg(conn->ksnc_sock, &msg, scratchiov, niov, nob,
240 MSG_DONTWAIT);
241
242 saved_csum = 0;
243 if (conn->ksnc_proto == &ksocknal_protocol_v2x) {
244 saved_csum = conn->ksnc_msg.ksm_csum;
245 conn->ksnc_msg.ksm_csum = 0;
246 }
247
248 if (saved_csum) {
249 /* accumulate checksum */
250 for (i = 0, sum = rc; sum > 0; i++, sum -= fragnob) {
251 LASSERT(i < niov);
252
253 fragnob = iov[i].iov_len;
254 if (fragnob > sum)
255 fragnob = sum;
256
257 conn->ksnc_rx_csum = ksocknal_csum(conn->ksnc_rx_csum,
258 iov[i].iov_base, fragnob);
259 }
260 conn->ksnc_msg.ksm_csum = saved_csum;
261 }
262
263 return rc;
264 }
265
266 static void
267 ksocknal_lib_kiov_vunmap(void *addr)
268 {
269 if (!addr)
270 return;
271
272 vunmap(addr);
273 }
274
275 static void *
276 ksocknal_lib_kiov_vmap(lnet_kiov_t *kiov, int niov,
277 struct kvec *iov, struct page **pages)
278 {
279 void *addr;
280 int nob;
281 int i;
282
283 if (!*ksocknal_tunables.ksnd_zc_recv || !pages)
284 return NULL;
285
286 LASSERT(niov <= LNET_MAX_IOV);
287
288 if (niov < 2 ||
289 niov < *ksocknal_tunables.ksnd_zc_recv_min_nfrags)
290 return NULL;
291
292 for (nob = i = 0; i < niov; i++) {
293 if ((kiov[i].kiov_offset && i > 0) ||
294 (kiov[i].kiov_offset + kiov[i].kiov_len != PAGE_SIZE && i < niov - 1))
295 return NULL;
296
297 pages[i] = kiov[i].kiov_page;
298 nob += kiov[i].kiov_len;
299 }
300
301 addr = vmap(pages, niov, VM_MAP, PAGE_KERNEL);
302 if (!addr)
303 return NULL;
304
305 iov->iov_base = addr + kiov[0].kiov_offset;
306 iov->iov_len = nob;
307
308 return addr;
309 }
310
311 int
312 ksocknal_lib_recv_kiov(ksock_conn_t *conn)
313 {
314 #if SOCKNAL_SINGLE_FRAG_RX || !SOCKNAL_RISK_KMAP_DEADLOCK
315 struct kvec scratch;
316 struct kvec *scratchiov = &scratch;
317 struct page **pages = NULL;
318 unsigned int niov = 1;
319 #else
320 #ifdef CONFIG_HIGHMEM
321 #warning "XXX risk of kmap deadlock on multiple frags..."
322 #endif
323 struct kvec *scratchiov = conn->ksnc_scheduler->kss_scratch_iov;
324 struct page **pages = conn->ksnc_scheduler->kss_rx_scratch_pgs;
325 unsigned int niov = conn->ksnc_rx_nkiov;
326 #endif
327 lnet_kiov_t *kiov = conn->ksnc_rx_kiov;
328 struct msghdr msg = {
329 .msg_flags = 0
330 };
331 int nob;
332 int i;
333 int rc;
334 void *base;
335 void *addr;
336 int sum;
337 int fragnob;
338 int n;
339
340 /*
341 * NB we can't trust socket ops to either consume our iovs
342 * or leave them alone.
343 */
344 addr = ksocknal_lib_kiov_vmap(kiov, niov, scratchiov, pages);
345 if (addr) {
346 nob = scratchiov[0].iov_len;
347 n = 1;
348
349 } else {
350 for (nob = i = 0; i < niov; i++) {
351 nob += scratchiov[i].iov_len = kiov[i].kiov_len;
352 scratchiov[i].iov_base = kmap(kiov[i].kiov_page) +
353 kiov[i].kiov_offset;
354 }
355 n = niov;
356 }
357
358 LASSERT(nob <= conn->ksnc_rx_nob_wanted);
359
360 rc = kernel_recvmsg(conn->ksnc_sock, &msg, (struct kvec *)scratchiov,
361 n, nob, MSG_DONTWAIT);
362
363 if (conn->ksnc_msg.ksm_csum) {
364 for (i = 0, sum = rc; sum > 0; i++, sum -= fragnob) {
365 LASSERT(i < niov);
366
367 /*
368 * Dang! have to kmap again because I have nowhere to
369 * stash the mapped address. But by doing it while the
370 * page is still mapped, the kernel just bumps the map
371 * count and returns me the address it stashed.
372 */
373 base = kmap(kiov[i].kiov_page) + kiov[i].kiov_offset;
374 fragnob = kiov[i].kiov_len;
375 if (fragnob > sum)
376 fragnob = sum;
377
378 conn->ksnc_rx_csum = ksocknal_csum(conn->ksnc_rx_csum,
379 base, fragnob);
380
381 kunmap(kiov[i].kiov_page);
382 }
383 }
384
385 if (addr) {
386 ksocknal_lib_kiov_vunmap(addr);
387 } else {
388 for (i = 0; i < niov; i++)
389 kunmap(kiov[i].kiov_page);
390 }
391
392 return rc;
393 }
394
395 void
396 ksocknal_lib_csum_tx(ksock_tx_t *tx)
397 {
398 int i;
399 __u32 csum;
400 void *base;
401
402 LASSERT(tx->tx_iov[0].iov_base == &tx->tx_msg);
403 LASSERT(tx->tx_conn);
404 LASSERT(tx->tx_conn->ksnc_proto == &ksocknal_protocol_v2x);
405
406 tx->tx_msg.ksm_csum = 0;
407
408 csum = ksocknal_csum(~0, tx->tx_iov[0].iov_base,
409 tx->tx_iov[0].iov_len);
410
411 if (tx->tx_kiov) {
412 for (i = 0; i < tx->tx_nkiov; i++) {
413 base = kmap(tx->tx_kiov[i].kiov_page) +
414 tx->tx_kiov[i].kiov_offset;
415
416 csum = ksocknal_csum(csum, base, tx->tx_kiov[i].kiov_len);
417
418 kunmap(tx->tx_kiov[i].kiov_page);
419 }
420 } else {
421 for (i = 1; i < tx->tx_niov; i++)
422 csum = ksocknal_csum(csum, tx->tx_iov[i].iov_base,
423 tx->tx_iov[i].iov_len);
424 }
425
426 if (*ksocknal_tunables.ksnd_inject_csum_error) {
427 csum++;
428 *ksocknal_tunables.ksnd_inject_csum_error = 0;
429 }
430
431 tx->tx_msg.ksm_csum = csum;
432 }
433
434 int
435 ksocknal_lib_get_conn_tunables(ksock_conn_t *conn, int *txmem, int *rxmem, int *nagle)
436 {
437 struct socket *sock = conn->ksnc_sock;
438 int len;
439 int rc;
440
441 rc = ksocknal_connsock_addref(conn);
442 if (rc) {
443 LASSERT(conn->ksnc_closing);
444 *txmem = *rxmem = *nagle = 0;
445 return -ESHUTDOWN;
446 }
447
448 rc = lnet_sock_getbuf(sock, txmem, rxmem);
449 if (!rc) {
450 len = sizeof(*nagle);
451 rc = kernel_getsockopt(sock, SOL_TCP, TCP_NODELAY,
452 (char *)nagle, &len);
453 }
454
455 ksocknal_connsock_decref(conn);
456
457 if (!rc)
458 *nagle = !*nagle;
459 else
460 *txmem = *rxmem = *nagle = 0;
461
462 return rc;
463 }
464
465 int
466 ksocknal_lib_setup_sock(struct socket *sock)
467 {
468 int rc;
469 int option;
470 int keep_idle;
471 int keep_intvl;
472 int keep_count;
473 int do_keepalive;
474 struct linger linger;
475
476 sock->sk->sk_allocation = GFP_NOFS;
477
478 /*
479 * Ensure this socket aborts active sends immediately when we close
480 * it.
481 */
482 linger.l_onoff = 0;
483 linger.l_linger = 0;
484
485 rc = kernel_setsockopt(sock, SOL_SOCKET, SO_LINGER, (char *)&linger,
486 sizeof(linger));
487 if (rc) {
488 CERROR("Can't set SO_LINGER: %d\n", rc);
489 return rc;
490 }
491
492 option = -1;
493 rc = kernel_setsockopt(sock, SOL_TCP, TCP_LINGER2, (char *)&option,
494 sizeof(option));
495 if (rc) {
496 CERROR("Can't set SO_LINGER2: %d\n", rc);
497 return rc;
498 }
499
500 if (!*ksocknal_tunables.ksnd_nagle) {
501 option = 1;
502
503 rc = kernel_setsockopt(sock, SOL_TCP, TCP_NODELAY,
504 (char *)&option, sizeof(option));
505 if (rc) {
506 CERROR("Can't disable nagle: %d\n", rc);
507 return rc;
508 }
509 }
510
511 rc = lnet_sock_setbuf(sock, *ksocknal_tunables.ksnd_tx_buffer_size,
512 *ksocknal_tunables.ksnd_rx_buffer_size);
513 if (rc) {
514 CERROR("Can't set buffer tx %d, rx %d buffers: %d\n",
515 *ksocknal_tunables.ksnd_tx_buffer_size,
516 *ksocknal_tunables.ksnd_rx_buffer_size, rc);
517 return rc;
518 }
519
520 /* TCP_BACKOFF_* sockopt tunables unsupported in stock kernels */
521
522 /* snapshot tunables */
523 keep_idle = *ksocknal_tunables.ksnd_keepalive_idle;
524 keep_count = *ksocknal_tunables.ksnd_keepalive_count;
525 keep_intvl = *ksocknal_tunables.ksnd_keepalive_intvl;
526
527 do_keepalive = (keep_idle > 0 && keep_count > 0 && keep_intvl > 0);
528
529 option = (do_keepalive ? 1 : 0);
530 rc = kernel_setsockopt(sock, SOL_SOCKET, SO_KEEPALIVE, (char *)&option,
531 sizeof(option));
532 if (rc) {
533 CERROR("Can't set SO_KEEPALIVE: %d\n", rc);
534 return rc;
535 }
536
537 if (!do_keepalive)
538 return 0;
539
540 rc = kernel_setsockopt(sock, SOL_TCP, TCP_KEEPIDLE, (char *)&keep_idle,
541 sizeof(keep_idle));
542 if (rc) {
543 CERROR("Can't set TCP_KEEPIDLE: %d\n", rc);
544 return rc;
545 }
546
547 rc = kernel_setsockopt(sock, SOL_TCP, TCP_KEEPINTVL,
548 (char *)&keep_intvl, sizeof(keep_intvl));
549 if (rc) {
550 CERROR("Can't set TCP_KEEPINTVL: %d\n", rc);
551 return rc;
552 }
553
554 rc = kernel_setsockopt(sock, SOL_TCP, TCP_KEEPCNT, (char *)&keep_count,
555 sizeof(keep_count));
556 if (rc) {
557 CERROR("Can't set TCP_KEEPCNT: %d\n", rc);
558 return rc;
559 }
560
561 return 0;
562 }
563
564 void
565 ksocknal_lib_push_conn(ksock_conn_t *conn)
566 {
567 struct sock *sk;
568 struct tcp_sock *tp;
569 int nonagle;
570 int val = 1;
571 int rc;
572
573 rc = ksocknal_connsock_addref(conn);
574 if (rc) /* being shut down */
575 return;
576
577 sk = conn->ksnc_sock->sk;
578 tp = tcp_sk(sk);
579
580 lock_sock(sk);
581 nonagle = tp->nonagle;
582 tp->nonagle = 1;
583 release_sock(sk);
584
585 rc = kernel_setsockopt(conn->ksnc_sock, SOL_TCP, TCP_NODELAY,
586 (char *)&val, sizeof(val));
587 LASSERT(!rc);
588
589 lock_sock(sk);
590 tp->nonagle = nonagle;
591 release_sock(sk);
592
593 ksocknal_connsock_decref(conn);
594 }
595
596 /*
597 * socket call back in Linux
598 */
599 static void
600 ksocknal_data_ready(struct sock *sk)
601 {
602 ksock_conn_t *conn;
603
604 /* interleave correctly with closing sockets... */
605 LASSERT(!in_irq());
606 read_lock(&ksocknal_data.ksnd_global_lock);
607
608 conn = sk->sk_user_data;
609 if (!conn) { /* raced with ksocknal_terminate_conn */
610 LASSERT(sk->sk_data_ready != &ksocknal_data_ready);
611 sk->sk_data_ready(sk);
612 } else {
613 ksocknal_read_callback(conn);
614 }
615
616 read_unlock(&ksocknal_data.ksnd_global_lock);
617 }
618
619 static void
620 ksocknal_write_space(struct sock *sk)
621 {
622 ksock_conn_t *conn;
623 int wspace;
624 int min_wpace;
625
626 /* interleave correctly with closing sockets... */
627 LASSERT(!in_irq());
628 read_lock(&ksocknal_data.ksnd_global_lock);
629
630 conn = sk->sk_user_data;
631 wspace = sk_stream_wspace(sk);
632 min_wpace = sk_stream_min_wspace(sk);
633
634 CDEBUG(D_NET, "sk %p wspace %d low water %d conn %p%s%s%s\n",
635 sk, wspace, min_wpace, conn,
636 !conn ? "" : (conn->ksnc_tx_ready ?
637 " ready" : " blocked"),
638 !conn ? "" : (conn->ksnc_tx_scheduled ?
639 " scheduled" : " idle"),
640 !conn ? "" : (list_empty(&conn->ksnc_tx_queue) ?
641 " empty" : " queued"));
642
643 if (!conn) { /* raced with ksocknal_terminate_conn */
644 LASSERT(sk->sk_write_space != &ksocknal_write_space);
645 sk->sk_write_space(sk);
646
647 read_unlock(&ksocknal_data.ksnd_global_lock);
648 return;
649 }
650
651 if (wspace >= min_wpace) { /* got enough space */
652 ksocknal_write_callback(conn);
653
654 /*
655 * Clear SOCK_NOSPACE _after_ ksocknal_write_callback so the
656 * ENOMEM check in ksocknal_transmit is race-free (think about
657 * it).
658 */
659 clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
660 }
661
662 read_unlock(&ksocknal_data.ksnd_global_lock);
663 }
664
665 void
666 ksocknal_lib_save_callback(struct socket *sock, ksock_conn_t *conn)
667 {
668 conn->ksnc_saved_data_ready = sock->sk->sk_data_ready;
669 conn->ksnc_saved_write_space = sock->sk->sk_write_space;
670 }
671
672 void
673 ksocknal_lib_set_callback(struct socket *sock, ksock_conn_t *conn)
674 {
675 sock->sk->sk_user_data = conn;
676 sock->sk->sk_data_ready = ksocknal_data_ready;
677 sock->sk->sk_write_space = ksocknal_write_space;
678 return;
679 }
680
681 void
682 ksocknal_lib_reset_callback(struct socket *sock, ksock_conn_t *conn)
683 {
684 /*
685 * Remove conn's network callbacks.
686 * NB I _have_ to restore the callback, rather than storing a noop,
687 * since the socket could survive past this module being unloaded!!
688 */
689 sock->sk->sk_data_ready = conn->ksnc_saved_data_ready;
690 sock->sk->sk_write_space = conn->ksnc_saved_write_space;
691
692 /*
693 * A callback could be in progress already; they hold a read lock
694 * on ksnd_global_lock (to serialise with me) and NOOP if
695 * sk_user_data is NULL.
696 */
697 sock->sk->sk_user_data = NULL;
698
699 return ;
700 }
701
702 int
703 ksocknal_lib_memory_pressure(ksock_conn_t *conn)
704 {
705 int rc = 0;
706 ksock_sched_t *sched;
707
708 sched = conn->ksnc_scheduler;
709 spin_lock_bh(&sched->kss_lock);
710
711 if (!test_bit(SOCK_NOSPACE, &conn->ksnc_sock->flags) &&
712 !conn->ksnc_tx_ready) {
713 /*
714 * SOCK_NOSPACE is set when the socket fills
715 * and cleared in the write_space callback
716 * (which also sets ksnc_tx_ready). If
717 * SOCK_NOSPACE and ksnc_tx_ready are BOTH
718 * zero, I didn't fill the socket and
719 * write_space won't reschedule me, so I
720 * return -ENOMEM to get my caller to retry
721 * after a timeout
722 */
723 rc = -ENOMEM;
724 }
725
726 spin_unlock_bh(&sched->kss_lock);
727
728 return rc;
729 }