]> git.ipfire.org Git - thirdparty/kernel/stable.git/blob - net/rxrpc/call_object.c
rxrpc: Use structs to hold connection params and protocol info
[thirdparty/kernel/stable.git] / net / rxrpc / call_object.c
1 /* RxRPC individual remote procedure call handling
2 *
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
14 #include <linux/slab.h>
15 #include <linux/module.h>
16 #include <linux/circ_buf.h>
17 #include <linux/hashtable.h>
18 #include <linux/spinlock_types.h>
19 #include <net/sock.h>
20 #include <net/af_rxrpc.h>
21 #include "ar-internal.h"
22
23 /*
24 * Maximum lifetime of a call (in jiffies).
25 */
26 unsigned int rxrpc_max_call_lifetime = 60 * HZ;
27
28 /*
29 * Time till dead call expires after last use (in jiffies).
30 */
31 unsigned int rxrpc_dead_call_expiry = 2 * HZ;
32
33 const char *const rxrpc_call_states[NR__RXRPC_CALL_STATES] = {
34 [RXRPC_CALL_CLIENT_SEND_REQUEST] = "ClSndReq",
35 [RXRPC_CALL_CLIENT_AWAIT_REPLY] = "ClAwtRpl",
36 [RXRPC_CALL_CLIENT_RECV_REPLY] = "ClRcvRpl",
37 [RXRPC_CALL_CLIENT_FINAL_ACK] = "ClFnlACK",
38 [RXRPC_CALL_SERVER_SECURING] = "SvSecure",
39 [RXRPC_CALL_SERVER_ACCEPTING] = "SvAccept",
40 [RXRPC_CALL_SERVER_RECV_REQUEST] = "SvRcvReq",
41 [RXRPC_CALL_SERVER_ACK_REQUEST] = "SvAckReq",
42 [RXRPC_CALL_SERVER_SEND_REPLY] = "SvSndRpl",
43 [RXRPC_CALL_SERVER_AWAIT_ACK] = "SvAwtACK",
44 [RXRPC_CALL_COMPLETE] = "Complete",
45 [RXRPC_CALL_SERVER_BUSY] = "SvBusy ",
46 [RXRPC_CALL_REMOTELY_ABORTED] = "RmtAbort",
47 [RXRPC_CALL_LOCALLY_ABORTED] = "LocAbort",
48 [RXRPC_CALL_NETWORK_ERROR] = "NetError",
49 [RXRPC_CALL_DEAD] = "Dead ",
50 };
51
52 struct kmem_cache *rxrpc_call_jar;
53 LIST_HEAD(rxrpc_calls);
54 DEFINE_RWLOCK(rxrpc_call_lock);
55
56 static void rxrpc_destroy_call(struct work_struct *work);
57 static void rxrpc_call_life_expired(unsigned long _call);
58 static void rxrpc_dead_call_expired(unsigned long _call);
59 static void rxrpc_ack_time_expired(unsigned long _call);
60 static void rxrpc_resend_time_expired(unsigned long _call);
61
62 static DEFINE_SPINLOCK(rxrpc_call_hash_lock);
63 static DEFINE_HASHTABLE(rxrpc_call_hash, 10);
64
65 /*
66 * Hash function for rxrpc_call_hash
67 */
68 static unsigned long rxrpc_call_hashfunc(
69 u8 in_clientflag,
70 u32 cid,
71 u32 call_id,
72 u32 epoch,
73 u16 service_id,
74 sa_family_t family,
75 void *localptr,
76 unsigned int addr_size,
77 const u8 *peer_addr)
78 {
79 const u16 *p;
80 unsigned int i;
81 unsigned long key;
82
83 _enter("");
84
85 key = (unsigned long)localptr;
86 /* We just want to add up the __be32 values, so forcing the
87 * cast should be okay.
88 */
89 key += epoch;
90 key += service_id;
91 key += call_id;
92 key += (cid & RXRPC_CIDMASK) >> RXRPC_CIDSHIFT;
93 key += cid & RXRPC_CHANNELMASK;
94 key += in_clientflag;
95 key += family;
96 /* Step through the peer address in 16-bit portions for speed */
97 for (i = 0, p = (const u16 *)peer_addr; i < addr_size >> 1; i++, p++)
98 key += *p;
99 _leave(" key = 0x%lx", key);
100 return key;
101 }
102
103 /*
104 * Add a call to the hashtable
105 */
106 static void rxrpc_call_hash_add(struct rxrpc_call *call)
107 {
108 unsigned long key;
109 unsigned int addr_size = 0;
110
111 _enter("");
112 switch (call->family) {
113 case AF_INET:
114 addr_size = sizeof(call->peer_ip.ipv4_addr);
115 break;
116 case AF_INET6:
117 addr_size = sizeof(call->peer_ip.ipv6_addr);
118 break;
119 default:
120 break;
121 }
122 key = rxrpc_call_hashfunc(call->in_clientflag, call->cid,
123 call->call_id, call->epoch,
124 call->service_id, call->family,
125 call->conn->trans->local, addr_size,
126 call->peer_ip.ipv6_addr);
127 /* Store the full key in the call */
128 call->hash_key = key;
129 spin_lock(&rxrpc_call_hash_lock);
130 hash_add_rcu(rxrpc_call_hash, &call->hash_node, key);
131 spin_unlock(&rxrpc_call_hash_lock);
132 _leave("");
133 }
134
135 /*
136 * Remove a call from the hashtable
137 */
138 static void rxrpc_call_hash_del(struct rxrpc_call *call)
139 {
140 _enter("");
141 spin_lock(&rxrpc_call_hash_lock);
142 hash_del_rcu(&call->hash_node);
143 spin_unlock(&rxrpc_call_hash_lock);
144 _leave("");
145 }
146
147 /*
148 * Find a call in the hashtable and return it, or NULL if it
149 * isn't there.
150 */
151 struct rxrpc_call *rxrpc_find_call_hash(
152 struct rxrpc_host_header *hdr,
153 void *localptr,
154 sa_family_t family,
155 const void *peer_addr)
156 {
157 unsigned long key;
158 unsigned int addr_size = 0;
159 struct rxrpc_call *call = NULL;
160 struct rxrpc_call *ret = NULL;
161 u8 in_clientflag = hdr->flags & RXRPC_CLIENT_INITIATED;
162
163 _enter("");
164 switch (family) {
165 case AF_INET:
166 addr_size = sizeof(call->peer_ip.ipv4_addr);
167 break;
168 case AF_INET6:
169 addr_size = sizeof(call->peer_ip.ipv6_addr);
170 break;
171 default:
172 break;
173 }
174
175 key = rxrpc_call_hashfunc(in_clientflag, hdr->cid, hdr->callNumber,
176 hdr->epoch, hdr->serviceId,
177 family, localptr, addr_size,
178 peer_addr);
179 hash_for_each_possible_rcu(rxrpc_call_hash, call, hash_node, key) {
180 if (call->hash_key == key &&
181 call->call_id == hdr->callNumber &&
182 call->cid == hdr->cid &&
183 call->in_clientflag == in_clientflag &&
184 call->service_id == hdr->serviceId &&
185 call->family == family &&
186 call->local == localptr &&
187 memcmp(call->peer_ip.ipv6_addr, peer_addr,
188 addr_size) == 0 &&
189 call->epoch == hdr->epoch) {
190 ret = call;
191 break;
192 }
193 }
194 _leave(" = %p", ret);
195 return ret;
196 }
197
198 /*
199 * find an extant server call
200 * - called in process context with IRQs enabled
201 */
202 struct rxrpc_call *rxrpc_find_call_by_user_ID(struct rxrpc_sock *rx,
203 unsigned long user_call_ID)
204 {
205 struct rxrpc_call *call;
206 struct rb_node *p;
207
208 _enter("%p,%lx", rx, user_call_ID);
209
210 read_lock(&rx->call_lock);
211
212 p = rx->calls.rb_node;
213 while (p) {
214 call = rb_entry(p, struct rxrpc_call, sock_node);
215
216 if (user_call_ID < call->user_call_ID)
217 p = p->rb_left;
218 else if (user_call_ID > call->user_call_ID)
219 p = p->rb_right;
220 else
221 goto found_extant_call;
222 }
223
224 read_unlock(&rx->call_lock);
225 _leave(" = NULL");
226 return NULL;
227
228 found_extant_call:
229 rxrpc_get_call(call);
230 read_unlock(&rx->call_lock);
231 _leave(" = %p [%d]", call, atomic_read(&call->usage));
232 return call;
233 }
234
235 /*
236 * allocate a new call
237 */
238 static struct rxrpc_call *rxrpc_alloc_call(gfp_t gfp)
239 {
240 struct rxrpc_call *call;
241
242 call = kmem_cache_zalloc(rxrpc_call_jar, gfp);
243 if (!call)
244 return NULL;
245
246 call->acks_winsz = 16;
247 call->acks_window = kmalloc(call->acks_winsz * sizeof(unsigned long),
248 gfp);
249 if (!call->acks_window) {
250 kmem_cache_free(rxrpc_call_jar, call);
251 return NULL;
252 }
253
254 setup_timer(&call->lifetimer, &rxrpc_call_life_expired,
255 (unsigned long) call);
256 setup_timer(&call->deadspan, &rxrpc_dead_call_expired,
257 (unsigned long) call);
258 setup_timer(&call->ack_timer, &rxrpc_ack_time_expired,
259 (unsigned long) call);
260 setup_timer(&call->resend_timer, &rxrpc_resend_time_expired,
261 (unsigned long) call);
262 INIT_WORK(&call->destroyer, &rxrpc_destroy_call);
263 INIT_WORK(&call->processor, &rxrpc_process_call);
264 INIT_LIST_HEAD(&call->accept_link);
265 skb_queue_head_init(&call->rx_queue);
266 skb_queue_head_init(&call->rx_oos_queue);
267 init_waitqueue_head(&call->tx_waitq);
268 spin_lock_init(&call->lock);
269 rwlock_init(&call->state_lock);
270 atomic_set(&call->usage, 1);
271 call->debug_id = atomic_inc_return(&rxrpc_debug_id);
272 call->state = RXRPC_CALL_CLIENT_SEND_REQUEST;
273
274 memset(&call->sock_node, 0xed, sizeof(call->sock_node));
275
276 call->rx_data_expect = 1;
277 call->rx_data_eaten = 0;
278 call->rx_first_oos = 0;
279 call->ackr_win_top = call->rx_data_eaten + 1 + rxrpc_rx_window_size;
280 call->creation_jif = jiffies;
281 return call;
282 }
283
284 /*
285 * allocate a new client call and attempt to get a connection slot for it
286 */
287 static struct rxrpc_call *rxrpc_alloc_client_call(
288 struct rxrpc_sock *rx,
289 struct rxrpc_conn_parameters *cp,
290 struct rxrpc_transport *trans,
291 struct rxrpc_conn_bundle *bundle,
292 gfp_t gfp)
293 {
294 struct rxrpc_call *call;
295 int ret;
296
297 _enter("");
298
299 ASSERT(rx != NULL);
300 ASSERT(trans != NULL);
301 ASSERT(bundle != NULL);
302
303 call = rxrpc_alloc_call(gfp);
304 if (!call)
305 return ERR_PTR(-ENOMEM);
306
307 sock_hold(&rx->sk);
308 call->socket = rx;
309 call->rx_data_post = 1;
310
311 ret = rxrpc_connect_call(rx, cp, trans, bundle, call, gfp);
312 if (ret < 0) {
313 kmem_cache_free(rxrpc_call_jar, call);
314 return ERR_PTR(ret);
315 }
316
317 /* Record copies of information for hashtable lookup */
318 call->family = rx->family;
319 call->local = call->conn->params.local;
320 switch (call->family) {
321 case AF_INET:
322 call->peer_ip.ipv4_addr =
323 trans->peer->srx.transport.sin.sin_addr.s_addr;
324 break;
325 case AF_INET6:
326 memcpy(call->peer_ip.ipv6_addr,
327 trans->peer->srx.transport.sin6.sin6_addr.in6_u.u6_addr8,
328 sizeof(call->peer_ip.ipv6_addr));
329 break;
330 }
331 call->epoch = call->conn->proto.epoch;
332 call->service_id = call->conn->params.service_id;
333 call->in_clientflag = call->conn->proto.in_clientflag;
334 /* Add the new call to the hashtable */
335 rxrpc_call_hash_add(call);
336
337 spin_lock(&call->conn->trans->peer->lock);
338 hlist_add_head(&call->error_link, &call->conn->trans->peer->error_targets);
339 spin_unlock(&call->conn->trans->peer->lock);
340
341 call->lifetimer.expires = jiffies + rxrpc_max_call_lifetime;
342 add_timer(&call->lifetimer);
343
344 _leave(" = %p", call);
345 return call;
346 }
347
348 /*
349 * set up a call for the given data
350 * - called in process context with IRQs enabled
351 */
352 struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx,
353 struct rxrpc_conn_parameters *cp,
354 struct rxrpc_transport *trans,
355 struct rxrpc_conn_bundle *bundle,
356 unsigned long user_call_ID,
357 gfp_t gfp)
358 {
359 struct rxrpc_call *call, *xcall;
360 struct rb_node *parent, **pp;
361
362 _enter("%p,%d,%d,%lx",
363 rx, trans->debug_id, bundle ? bundle->debug_id : -1,
364 user_call_ID);
365
366 call = rxrpc_alloc_client_call(rx, cp, trans, bundle, gfp);
367 if (IS_ERR(call)) {
368 _leave(" = %ld", PTR_ERR(call));
369 return call;
370 }
371
372 call->user_call_ID = user_call_ID;
373 __set_bit(RXRPC_CALL_HAS_USERID, &call->flags);
374
375 write_lock(&rx->call_lock);
376
377 pp = &rx->calls.rb_node;
378 parent = NULL;
379 while (*pp) {
380 parent = *pp;
381 xcall = rb_entry(parent, struct rxrpc_call, sock_node);
382
383 if (user_call_ID < xcall->user_call_ID)
384 pp = &(*pp)->rb_left;
385 else if (user_call_ID > xcall->user_call_ID)
386 pp = &(*pp)->rb_right;
387 else
388 goto found_user_ID_now_present;
389 }
390
391 rxrpc_get_call(call);
392
393 rb_link_node(&call->sock_node, parent, pp);
394 rb_insert_color(&call->sock_node, &rx->calls);
395 write_unlock(&rx->call_lock);
396
397 write_lock_bh(&rxrpc_call_lock);
398 list_add_tail(&call->link, &rxrpc_calls);
399 write_unlock_bh(&rxrpc_call_lock);
400
401 _net("CALL new %d on CONN %d", call->debug_id, call->conn->debug_id);
402
403 _leave(" = %p [new]", call);
404 return call;
405
406 /* We unexpectedly found the user ID in the list after taking
407 * the call_lock. This shouldn't happen unless the user races
408 * with itself and tries to add the same user ID twice at the
409 * same time in different threads.
410 */
411 found_user_ID_now_present:
412 write_unlock(&rx->call_lock);
413 rxrpc_put_call(call);
414 _leave(" = -EEXIST [%p]", call);
415 return ERR_PTR(-EEXIST);
416 }
417
418 /*
419 * set up an incoming call
420 * - called in process context with IRQs enabled
421 */
422 struct rxrpc_call *rxrpc_incoming_call(struct rxrpc_sock *rx,
423 struct rxrpc_connection *conn,
424 struct rxrpc_host_header *hdr)
425 {
426 struct rxrpc_call *call, *candidate;
427 struct rb_node **p, *parent;
428 u32 call_id;
429
430 _enter(",%d", conn->debug_id);
431
432 ASSERT(rx != NULL);
433
434 candidate = rxrpc_alloc_call(GFP_NOIO);
435 if (!candidate)
436 return ERR_PTR(-EBUSY);
437
438 candidate->socket = rx;
439 candidate->conn = conn;
440 candidate->cid = hdr->cid;
441 candidate->call_id = hdr->callNumber;
442 candidate->channel = hdr->cid & RXRPC_CHANNELMASK;
443 candidate->rx_data_post = 0;
444 candidate->state = RXRPC_CALL_SERVER_ACCEPTING;
445 if (conn->security_ix > 0)
446 candidate->state = RXRPC_CALL_SERVER_SECURING;
447
448 write_lock_bh(&conn->lock);
449
450 /* set the channel for this call */
451 call = conn->channels[candidate->channel];
452 _debug("channel[%u] is %p", candidate->channel, call);
453 if (call && call->call_id == hdr->callNumber) {
454 /* already set; must've been a duplicate packet */
455 _debug("extant call [%d]", call->state);
456 ASSERTCMP(call->conn, ==, conn);
457
458 read_lock(&call->state_lock);
459 switch (call->state) {
460 case RXRPC_CALL_LOCALLY_ABORTED:
461 if (!test_and_set_bit(RXRPC_CALL_EV_ABORT, &call->events))
462 rxrpc_queue_call(call);
463 case RXRPC_CALL_REMOTELY_ABORTED:
464 read_unlock(&call->state_lock);
465 goto aborted_call;
466 default:
467 rxrpc_get_call(call);
468 read_unlock(&call->state_lock);
469 goto extant_call;
470 }
471 }
472
473 if (call) {
474 /* it seems the channel is still in use from the previous call
475 * - ditch the old binding if its call is now complete */
476 _debug("CALL: %u { %s }",
477 call->debug_id, rxrpc_call_states[call->state]);
478
479 if (call->state >= RXRPC_CALL_COMPLETE) {
480 conn->channels[call->channel] = NULL;
481 } else {
482 write_unlock_bh(&conn->lock);
483 kmem_cache_free(rxrpc_call_jar, candidate);
484 _leave(" = -EBUSY");
485 return ERR_PTR(-EBUSY);
486 }
487 }
488
489 /* check the call number isn't duplicate */
490 _debug("check dup");
491 call_id = hdr->callNumber;
492 p = &conn->calls.rb_node;
493 parent = NULL;
494 while (*p) {
495 parent = *p;
496 call = rb_entry(parent, struct rxrpc_call, conn_node);
497
498 /* The tree is sorted in order of the __be32 value without
499 * turning it into host order.
500 */
501 if (call_id < call->call_id)
502 p = &(*p)->rb_left;
503 else if (call_id > call->call_id)
504 p = &(*p)->rb_right;
505 else
506 goto old_call;
507 }
508
509 /* make the call available */
510 _debug("new call");
511 call = candidate;
512 candidate = NULL;
513 rb_link_node(&call->conn_node, parent, p);
514 rb_insert_color(&call->conn_node, &conn->calls);
515 conn->channels[call->channel] = call;
516 sock_hold(&rx->sk);
517 atomic_inc(&conn->usage);
518 write_unlock_bh(&conn->lock);
519
520 spin_lock(&conn->trans->peer->lock);
521 hlist_add_head(&call->error_link, &conn->trans->peer->error_targets);
522 spin_unlock(&conn->trans->peer->lock);
523
524 write_lock_bh(&rxrpc_call_lock);
525 list_add_tail(&call->link, &rxrpc_calls);
526 write_unlock_bh(&rxrpc_call_lock);
527
528 /* Record copies of information for hashtable lookup */
529 call->family = rx->family;
530 call->local = conn->trans->local;
531 switch (call->family) {
532 case AF_INET:
533 call->peer_ip.ipv4_addr =
534 conn->trans->peer->srx.transport.sin.sin_addr.s_addr;
535 break;
536 case AF_INET6:
537 memcpy(call->peer_ip.ipv6_addr,
538 conn->trans->peer->srx.transport.sin6.sin6_addr.in6_u.u6_addr8,
539 sizeof(call->peer_ip.ipv6_addr));
540 break;
541 default:
542 break;
543 }
544 call->epoch = conn->proto.epoch;
545 call->service_id = conn->params.service_id;
546 call->in_clientflag = conn->proto.in_clientflag;
547 /* Add the new call to the hashtable */
548 rxrpc_call_hash_add(call);
549
550 _net("CALL incoming %d on CONN %d", call->debug_id, call->conn->debug_id);
551
552 call->lifetimer.expires = jiffies + rxrpc_max_call_lifetime;
553 add_timer(&call->lifetimer);
554 _leave(" = %p {%d} [new]", call, call->debug_id);
555 return call;
556
557 extant_call:
558 write_unlock_bh(&conn->lock);
559 kmem_cache_free(rxrpc_call_jar, candidate);
560 _leave(" = %p {%d} [extant]", call, call ? call->debug_id : -1);
561 return call;
562
563 aborted_call:
564 write_unlock_bh(&conn->lock);
565 kmem_cache_free(rxrpc_call_jar, candidate);
566 _leave(" = -ECONNABORTED");
567 return ERR_PTR(-ECONNABORTED);
568
569 old_call:
570 write_unlock_bh(&conn->lock);
571 kmem_cache_free(rxrpc_call_jar, candidate);
572 _leave(" = -ECONNRESET [old]");
573 return ERR_PTR(-ECONNRESET);
574 }
575
576 /*
577 * detach a call from a socket and set up for release
578 */
579 void rxrpc_release_call(struct rxrpc_call *call)
580 {
581 struct rxrpc_connection *conn = call->conn;
582 struct rxrpc_sock *rx = call->socket;
583
584 _enter("{%d,%d,%d,%d}",
585 call->debug_id, atomic_read(&call->usage),
586 atomic_read(&call->ackr_not_idle),
587 call->rx_first_oos);
588
589 spin_lock_bh(&call->lock);
590 if (test_and_set_bit(RXRPC_CALL_RELEASED, &call->flags))
591 BUG();
592 spin_unlock_bh(&call->lock);
593
594 /* dissociate from the socket
595 * - the socket's ref on the call is passed to the death timer
596 */
597 _debug("RELEASE CALL %p (%d CONN %p)", call, call->debug_id, conn);
598
599 write_lock_bh(&rx->call_lock);
600 if (!list_empty(&call->accept_link)) {
601 _debug("unlinking once-pending call %p { e=%lx f=%lx }",
602 call, call->events, call->flags);
603 ASSERT(!test_bit(RXRPC_CALL_HAS_USERID, &call->flags));
604 list_del_init(&call->accept_link);
605 sk_acceptq_removed(&rx->sk);
606 } else if (test_bit(RXRPC_CALL_HAS_USERID, &call->flags)) {
607 rb_erase(&call->sock_node, &rx->calls);
608 memset(&call->sock_node, 0xdd, sizeof(call->sock_node));
609 clear_bit(RXRPC_CALL_HAS_USERID, &call->flags);
610 }
611 write_unlock_bh(&rx->call_lock);
612
613 /* free up the channel for reuse */
614 spin_lock(&conn->trans->client_lock);
615 write_lock_bh(&conn->lock);
616 write_lock(&call->state_lock);
617
618 if (conn->channels[call->channel] == call)
619 conn->channels[call->channel] = NULL;
620
621 if (conn->out_clientflag && conn->bundle) {
622 conn->avail_calls++;
623 switch (conn->avail_calls) {
624 case 1:
625 list_move_tail(&conn->bundle_link,
626 &conn->bundle->avail_conns);
627 case 2 ... RXRPC_MAXCALLS - 1:
628 ASSERT(conn->channels[0] == NULL ||
629 conn->channels[1] == NULL ||
630 conn->channels[2] == NULL ||
631 conn->channels[3] == NULL);
632 break;
633 case RXRPC_MAXCALLS:
634 list_move_tail(&conn->bundle_link,
635 &conn->bundle->unused_conns);
636 ASSERT(conn->channels[0] == NULL &&
637 conn->channels[1] == NULL &&
638 conn->channels[2] == NULL &&
639 conn->channels[3] == NULL);
640 break;
641 default:
642 pr_err("conn->avail_calls=%d\n", conn->avail_calls);
643 BUG();
644 }
645 }
646
647 spin_unlock(&conn->trans->client_lock);
648
649 if (call->state < RXRPC_CALL_COMPLETE &&
650 call->state != RXRPC_CALL_CLIENT_FINAL_ACK) {
651 _debug("+++ ABORTING STATE %d +++\n", call->state);
652 call->state = RXRPC_CALL_LOCALLY_ABORTED;
653 call->local_abort = RX_CALL_DEAD;
654 set_bit(RXRPC_CALL_EV_ABORT, &call->events);
655 rxrpc_queue_call(call);
656 }
657 write_unlock(&call->state_lock);
658 write_unlock_bh(&conn->lock);
659
660 /* clean up the Rx queue */
661 if (!skb_queue_empty(&call->rx_queue) ||
662 !skb_queue_empty(&call->rx_oos_queue)) {
663 struct rxrpc_skb_priv *sp;
664 struct sk_buff *skb;
665
666 _debug("purge Rx queues");
667
668 spin_lock_bh(&call->lock);
669 while ((skb = skb_dequeue(&call->rx_queue)) ||
670 (skb = skb_dequeue(&call->rx_oos_queue))) {
671 sp = rxrpc_skb(skb);
672 if (sp->call) {
673 ASSERTCMP(sp->call, ==, call);
674 rxrpc_put_call(call);
675 sp->call = NULL;
676 }
677 skb->destructor = NULL;
678 spin_unlock_bh(&call->lock);
679
680 _debug("- zap %s %%%u #%u",
681 rxrpc_pkts[sp->hdr.type],
682 sp->hdr.serial, sp->hdr.seq);
683 rxrpc_free_skb(skb);
684 spin_lock_bh(&call->lock);
685 }
686 spin_unlock_bh(&call->lock);
687
688 ASSERTCMP(call->state, !=, RXRPC_CALL_COMPLETE);
689 }
690
691 del_timer_sync(&call->resend_timer);
692 del_timer_sync(&call->ack_timer);
693 del_timer_sync(&call->lifetimer);
694 call->deadspan.expires = jiffies + rxrpc_dead_call_expiry;
695 add_timer(&call->deadspan);
696
697 _leave("");
698 }
699
700 /*
701 * handle a dead call being ready for reaping
702 */
703 static void rxrpc_dead_call_expired(unsigned long _call)
704 {
705 struct rxrpc_call *call = (struct rxrpc_call *) _call;
706
707 _enter("{%d}", call->debug_id);
708
709 write_lock_bh(&call->state_lock);
710 call->state = RXRPC_CALL_DEAD;
711 write_unlock_bh(&call->state_lock);
712 rxrpc_put_call(call);
713 }
714
715 /*
716 * mark a call as to be released, aborting it if it's still in progress
717 * - called with softirqs disabled
718 */
719 static void rxrpc_mark_call_released(struct rxrpc_call *call)
720 {
721 bool sched;
722
723 write_lock(&call->state_lock);
724 if (call->state < RXRPC_CALL_DEAD) {
725 sched = false;
726 if (call->state < RXRPC_CALL_COMPLETE) {
727 _debug("abort call %p", call);
728 call->state = RXRPC_CALL_LOCALLY_ABORTED;
729 call->local_abort = RX_CALL_DEAD;
730 if (!test_and_set_bit(RXRPC_CALL_EV_ABORT, &call->events))
731 sched = true;
732 }
733 if (!test_and_set_bit(RXRPC_CALL_EV_RELEASE, &call->events))
734 sched = true;
735 if (sched)
736 rxrpc_queue_call(call);
737 }
738 write_unlock(&call->state_lock);
739 }
740
741 /*
742 * release all the calls associated with a socket
743 */
744 void rxrpc_release_calls_on_socket(struct rxrpc_sock *rx)
745 {
746 struct rxrpc_call *call;
747 struct rb_node *p;
748
749 _enter("%p", rx);
750
751 read_lock_bh(&rx->call_lock);
752
753 /* mark all the calls as no longer wanting incoming packets */
754 for (p = rb_first(&rx->calls); p; p = rb_next(p)) {
755 call = rb_entry(p, struct rxrpc_call, sock_node);
756 rxrpc_mark_call_released(call);
757 }
758
759 /* kill the not-yet-accepted incoming calls */
760 list_for_each_entry(call, &rx->secureq, accept_link) {
761 rxrpc_mark_call_released(call);
762 }
763
764 list_for_each_entry(call, &rx->acceptq, accept_link) {
765 rxrpc_mark_call_released(call);
766 }
767
768 read_unlock_bh(&rx->call_lock);
769 _leave("");
770 }
771
772 /*
773 * release a call
774 */
775 void __rxrpc_put_call(struct rxrpc_call *call)
776 {
777 ASSERT(call != NULL);
778
779 _enter("%p{u=%d}", call, atomic_read(&call->usage));
780
781 ASSERTCMP(atomic_read(&call->usage), >, 0);
782
783 if (atomic_dec_and_test(&call->usage)) {
784 _debug("call %d dead", call->debug_id);
785 ASSERTCMP(call->state, ==, RXRPC_CALL_DEAD);
786 rxrpc_queue_work(&call->destroyer);
787 }
788 _leave("");
789 }
790
791 /*
792 * clean up a call
793 */
794 static void rxrpc_cleanup_call(struct rxrpc_call *call)
795 {
796 _net("DESTROY CALL %d", call->debug_id);
797
798 ASSERT(call->socket);
799
800 memset(&call->sock_node, 0xcd, sizeof(call->sock_node));
801
802 del_timer_sync(&call->lifetimer);
803 del_timer_sync(&call->deadspan);
804 del_timer_sync(&call->ack_timer);
805 del_timer_sync(&call->resend_timer);
806
807 ASSERT(test_bit(RXRPC_CALL_RELEASED, &call->flags));
808 ASSERTCMP(call->events, ==, 0);
809 if (work_pending(&call->processor)) {
810 _debug("defer destroy");
811 rxrpc_queue_work(&call->destroyer);
812 return;
813 }
814
815 if (call->conn) {
816 spin_lock(&call->conn->trans->peer->lock);
817 hlist_del_init(&call->error_link);
818 spin_unlock(&call->conn->trans->peer->lock);
819
820 write_lock_bh(&call->conn->lock);
821 rb_erase(&call->conn_node, &call->conn->calls);
822 write_unlock_bh(&call->conn->lock);
823 rxrpc_put_connection(call->conn);
824 }
825
826 /* Remove the call from the hash */
827 rxrpc_call_hash_del(call);
828
829 if (call->acks_window) {
830 _debug("kill Tx window %d",
831 CIRC_CNT(call->acks_head, call->acks_tail,
832 call->acks_winsz));
833 smp_mb();
834 while (CIRC_CNT(call->acks_head, call->acks_tail,
835 call->acks_winsz) > 0) {
836 struct rxrpc_skb_priv *sp;
837 unsigned long _skb;
838
839 _skb = call->acks_window[call->acks_tail] & ~1;
840 sp = rxrpc_skb((struct sk_buff *)_skb);
841 _debug("+++ clear Tx %u", sp->hdr.seq);
842 rxrpc_free_skb((struct sk_buff *)_skb);
843 call->acks_tail =
844 (call->acks_tail + 1) & (call->acks_winsz - 1);
845 }
846
847 kfree(call->acks_window);
848 }
849
850 rxrpc_free_skb(call->tx_pending);
851
852 rxrpc_purge_queue(&call->rx_queue);
853 ASSERT(skb_queue_empty(&call->rx_oos_queue));
854 sock_put(&call->socket->sk);
855 kmem_cache_free(rxrpc_call_jar, call);
856 }
857
858 /*
859 * destroy a call
860 */
861 static void rxrpc_destroy_call(struct work_struct *work)
862 {
863 struct rxrpc_call *call =
864 container_of(work, struct rxrpc_call, destroyer);
865
866 _enter("%p{%d,%d,%p}",
867 call, atomic_read(&call->usage), call->channel, call->conn);
868
869 ASSERTCMP(call->state, ==, RXRPC_CALL_DEAD);
870
871 write_lock_bh(&rxrpc_call_lock);
872 list_del_init(&call->link);
873 write_unlock_bh(&rxrpc_call_lock);
874
875 rxrpc_cleanup_call(call);
876 _leave("");
877 }
878
879 /*
880 * preemptively destroy all the call records from a transport endpoint rather
881 * than waiting for them to time out
882 */
883 void __exit rxrpc_destroy_all_calls(void)
884 {
885 struct rxrpc_call *call;
886
887 _enter("");
888 write_lock_bh(&rxrpc_call_lock);
889
890 while (!list_empty(&rxrpc_calls)) {
891 call = list_entry(rxrpc_calls.next, struct rxrpc_call, link);
892 _debug("Zapping call %p", call);
893
894 list_del_init(&call->link);
895
896 switch (atomic_read(&call->usage)) {
897 case 0:
898 ASSERTCMP(call->state, ==, RXRPC_CALL_DEAD);
899 break;
900 case 1:
901 if (del_timer_sync(&call->deadspan) != 0 &&
902 call->state != RXRPC_CALL_DEAD)
903 rxrpc_dead_call_expired((unsigned long) call);
904 if (call->state != RXRPC_CALL_DEAD)
905 break;
906 default:
907 pr_err("Call %p still in use (%d,%d,%s,%lx,%lx)!\n",
908 call, atomic_read(&call->usage),
909 atomic_read(&call->ackr_not_idle),
910 rxrpc_call_states[call->state],
911 call->flags, call->events);
912 if (!skb_queue_empty(&call->rx_queue))
913 pr_err("Rx queue occupied\n");
914 if (!skb_queue_empty(&call->rx_oos_queue))
915 pr_err("OOS queue occupied\n");
916 break;
917 }
918
919 write_unlock_bh(&rxrpc_call_lock);
920 cond_resched();
921 write_lock_bh(&rxrpc_call_lock);
922 }
923
924 write_unlock_bh(&rxrpc_call_lock);
925 _leave("");
926 }
927
928 /*
929 * handle call lifetime being exceeded
930 */
931 static void rxrpc_call_life_expired(unsigned long _call)
932 {
933 struct rxrpc_call *call = (struct rxrpc_call *) _call;
934
935 if (call->state >= RXRPC_CALL_COMPLETE)
936 return;
937
938 _enter("{%d}", call->debug_id);
939 read_lock_bh(&call->state_lock);
940 if (call->state < RXRPC_CALL_COMPLETE) {
941 set_bit(RXRPC_CALL_EV_LIFE_TIMER, &call->events);
942 rxrpc_queue_call(call);
943 }
944 read_unlock_bh(&call->state_lock);
945 }
946
947 /*
948 * handle resend timer expiry
949 * - may not take call->state_lock as this can deadlock against del_timer_sync()
950 */
951 static void rxrpc_resend_time_expired(unsigned long _call)
952 {
953 struct rxrpc_call *call = (struct rxrpc_call *) _call;
954
955 _enter("{%d}", call->debug_id);
956
957 if (call->state >= RXRPC_CALL_COMPLETE)
958 return;
959
960 clear_bit(RXRPC_CALL_RUN_RTIMER, &call->flags);
961 if (!test_and_set_bit(RXRPC_CALL_EV_RESEND_TIMER, &call->events))
962 rxrpc_queue_call(call);
963 }
964
965 /*
966 * handle ACK timer expiry
967 */
968 static void rxrpc_ack_time_expired(unsigned long _call)
969 {
970 struct rxrpc_call *call = (struct rxrpc_call *) _call;
971
972 _enter("{%d}", call->debug_id);
973
974 if (call->state >= RXRPC_CALL_COMPLETE)
975 return;
976
977 read_lock_bh(&call->state_lock);
978 if (call->state < RXRPC_CALL_COMPLETE &&
979 !test_and_set_bit(RXRPC_CALL_EV_ACK, &call->events))
980 rxrpc_queue_call(call);
981 read_unlock_bh(&call->state_lock);
982 }