]> git.ipfire.org Git - thirdparty/kernel/stable.git/blob - net/rxrpc/input.c
rxrpc: Don't expose skbs to in-kernel users [ver #2]
[thirdparty/kernel/stable.git] / net / rxrpc / input.c
1 /* RxRPC packet reception
2 *
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
14 #include <linux/module.h>
15 #include <linux/net.h>
16 #include <linux/skbuff.h>
17 #include <linux/errqueue.h>
18 #include <linux/udp.h>
19 #include <linux/in.h>
20 #include <linux/in6.h>
21 #include <linux/icmp.h>
22 #include <linux/gfp.h>
23 #include <net/sock.h>
24 #include <net/af_rxrpc.h>
25 #include <net/ip.h>
26 #include <net/udp.h>
27 #include <net/net_namespace.h>
28 #include "ar-internal.h"
29
30 /*
31 * queue a packet for recvmsg to pass to userspace
32 * - the caller must hold a lock on call->lock
33 * - must not be called with interrupts disabled (sk_filter() disables BH's)
34 * - eats the packet whether successful or not
35 * - there must be just one reference to the packet, which the caller passes to
36 * this function
37 */
38 int rxrpc_queue_rcv_skb(struct rxrpc_call *call, struct sk_buff *skb,
39 bool force, bool terminal)
40 {
41 struct rxrpc_skb_priv *sp;
42 struct rxrpc_sock *rx = call->socket;
43 struct sock *sk;
44 int ret;
45
46 _enter(",,%d,%d", force, terminal);
47
48 ASSERT(!irqs_disabled());
49
50 sp = rxrpc_skb(skb);
51 ASSERTCMP(sp->call, ==, call);
52
53 /* if we've already posted the terminal message for a call, then we
54 * don't post any more */
55 if (test_bit(RXRPC_CALL_TERMINAL_MSG, &call->flags)) {
56 _debug("already terminated");
57 ASSERTCMP(call->state, >=, RXRPC_CALL_COMPLETE);
58 rxrpc_free_skb(skb);
59 return 0;
60 }
61
62 sk = &rx->sk;
63
64 if (!force) {
65 /* cast skb->rcvbuf to unsigned... It's pointless, but
66 * reduces number of warnings when compiling with -W
67 * --ANK */
68 // ret = -ENOBUFS;
69 // if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
70 // (unsigned int) sk->sk_rcvbuf)
71 // goto out;
72
73 ret = sk_filter(sk, skb);
74 if (ret < 0)
75 goto out;
76 }
77
78 spin_lock_bh(&sk->sk_receive_queue.lock);
79 if (!test_bit(RXRPC_CALL_TERMINAL_MSG, &call->flags) &&
80 !test_bit(RXRPC_CALL_RELEASED, &call->flags) &&
81 call->socket->sk.sk_state != RXRPC_CLOSE) {
82 skb->destructor = rxrpc_packet_destructor;
83 skb->dev = NULL;
84 skb->sk = sk;
85 atomic_add(skb->truesize, &sk->sk_rmem_alloc);
86
87 if (terminal) {
88 _debug("<<<< TERMINAL MESSAGE >>>>");
89 set_bit(RXRPC_CALL_TERMINAL_MSG, &call->flags);
90 }
91
92 /* allow interception by a kernel service */
93 if (skb->mark == RXRPC_SKB_MARK_NEW_CALL &&
94 rx->notify_new_call) {
95 spin_unlock_bh(&sk->sk_receive_queue.lock);
96 skb_queue_tail(&call->knlrecv_queue, skb);
97 rx->notify_new_call(&rx->sk);
98 } else if (call->notify_rx) {
99 spin_unlock_bh(&sk->sk_receive_queue.lock);
100 skb_queue_tail(&call->knlrecv_queue, skb);
101 call->notify_rx(&rx->sk, call, call->user_call_ID);
102 } else {
103 _net("post skb %p", skb);
104 __skb_queue_tail(&sk->sk_receive_queue, skb);
105 spin_unlock_bh(&sk->sk_receive_queue.lock);
106
107 if (!sock_flag(sk, SOCK_DEAD))
108 sk->sk_data_ready(sk);
109 }
110 skb = NULL;
111 } else {
112 spin_unlock_bh(&sk->sk_receive_queue.lock);
113 }
114 ret = 0;
115
116 out:
117 rxrpc_free_skb(skb);
118
119 _leave(" = %d", ret);
120 return ret;
121 }
122
123 /*
124 * process a DATA packet, posting the packet to the appropriate queue
125 * - eats the packet if successful
126 */
127 static int rxrpc_fast_process_data(struct rxrpc_call *call,
128 struct sk_buff *skb, u32 seq)
129 {
130 struct rxrpc_skb_priv *sp;
131 bool terminal;
132 int ret, ackbit, ack;
133 u32 serial;
134 u16 skew;
135 u8 flags;
136
137 _enter("{%u,%u},,{%u}", call->rx_data_post, call->rx_first_oos, seq);
138
139 sp = rxrpc_skb(skb);
140 ASSERTCMP(sp->call, ==, NULL);
141 flags = sp->hdr.flags;
142 serial = sp->hdr.serial;
143 skew = skb->priority;
144
145 spin_lock(&call->lock);
146
147 if (call->state > RXRPC_CALL_COMPLETE)
148 goto discard;
149
150 ASSERTCMP(call->rx_data_expect, >=, call->rx_data_post);
151 ASSERTCMP(call->rx_data_post, >=, call->rx_data_recv);
152 ASSERTCMP(call->rx_data_recv, >=, call->rx_data_eaten);
153
154 if (seq < call->rx_data_post) {
155 _debug("dup #%u [-%u]", seq, call->rx_data_post);
156 ack = RXRPC_ACK_DUPLICATE;
157 ret = -ENOBUFS;
158 goto discard_and_ack;
159 }
160
161 /* we may already have the packet in the out of sequence queue */
162 ackbit = seq - (call->rx_data_eaten + 1);
163 ASSERTCMP(ackbit, >=, 0);
164 if (__test_and_set_bit(ackbit, call->ackr_window)) {
165 _debug("dup oos #%u [%u,%u]",
166 seq, call->rx_data_eaten, call->rx_data_post);
167 ack = RXRPC_ACK_DUPLICATE;
168 goto discard_and_ack;
169 }
170
171 if (seq >= call->ackr_win_top) {
172 _debug("exceed #%u [%u]", seq, call->ackr_win_top);
173 __clear_bit(ackbit, call->ackr_window);
174 ack = RXRPC_ACK_EXCEEDS_WINDOW;
175 goto discard_and_ack;
176 }
177
178 if (seq == call->rx_data_expect) {
179 clear_bit(RXRPC_CALL_EXPECT_OOS, &call->flags);
180 call->rx_data_expect++;
181 } else if (seq > call->rx_data_expect) {
182 _debug("oos #%u [%u]", seq, call->rx_data_expect);
183 call->rx_data_expect = seq + 1;
184 if (test_and_set_bit(RXRPC_CALL_EXPECT_OOS, &call->flags)) {
185 ack = RXRPC_ACK_OUT_OF_SEQUENCE;
186 goto enqueue_and_ack;
187 }
188 goto enqueue_packet;
189 }
190
191 if (seq != call->rx_data_post) {
192 _debug("ahead #%u [%u]", seq, call->rx_data_post);
193 goto enqueue_packet;
194 }
195
196 if (test_bit(RXRPC_CALL_RCVD_LAST, &call->flags))
197 goto protocol_error;
198
199 /* if the packet need security things doing to it, then it goes down
200 * the slow path */
201 if (call->conn->security_ix)
202 goto enqueue_packet;
203
204 sp->call = call;
205 rxrpc_get_call_for_skb(call, skb);
206 terminal = ((flags & RXRPC_LAST_PACKET) &&
207 !(flags & RXRPC_CLIENT_INITIATED));
208 ret = rxrpc_queue_rcv_skb(call, skb, false, terminal);
209 if (ret < 0) {
210 if (ret == -ENOMEM || ret == -ENOBUFS) {
211 __clear_bit(ackbit, call->ackr_window);
212 ack = RXRPC_ACK_NOSPACE;
213 goto discard_and_ack;
214 }
215 goto out;
216 }
217
218 skb = NULL;
219 sp = NULL;
220
221 _debug("post #%u", seq);
222 ASSERTCMP(call->rx_data_post, ==, seq);
223 call->rx_data_post++;
224
225 if (flags & RXRPC_LAST_PACKET)
226 set_bit(RXRPC_CALL_RCVD_LAST, &call->flags);
227
228 /* if we've reached an out of sequence packet then we need to drain
229 * that queue into the socket Rx queue now */
230 if (call->rx_data_post == call->rx_first_oos) {
231 _debug("drain rx oos now");
232 read_lock(&call->state_lock);
233 if (call->state < RXRPC_CALL_COMPLETE &&
234 !test_and_set_bit(RXRPC_CALL_EV_DRAIN_RX_OOS, &call->events))
235 rxrpc_queue_call(call);
236 read_unlock(&call->state_lock);
237 }
238
239 spin_unlock(&call->lock);
240 atomic_inc(&call->ackr_not_idle);
241 rxrpc_propose_ACK(call, RXRPC_ACK_DELAY, skew, serial, false);
242 _leave(" = 0 [posted]");
243 return 0;
244
245 protocol_error:
246 ret = -EBADMSG;
247 out:
248 spin_unlock(&call->lock);
249 _leave(" = %d", ret);
250 return ret;
251
252 discard_and_ack:
253 _debug("discard and ACK packet %p", skb);
254 __rxrpc_propose_ACK(call, ack, skew, serial, true);
255 discard:
256 spin_unlock(&call->lock);
257 rxrpc_free_skb(skb);
258 _leave(" = 0 [discarded]");
259 return 0;
260
261 enqueue_and_ack:
262 __rxrpc_propose_ACK(call, ack, skew, serial, true);
263 enqueue_packet:
264 _net("defer skb %p", skb);
265 spin_unlock(&call->lock);
266 skb_queue_tail(&call->rx_queue, skb);
267 atomic_inc(&call->ackr_not_idle);
268 read_lock(&call->state_lock);
269 if (call->state < RXRPC_CALL_DEAD)
270 rxrpc_queue_call(call);
271 read_unlock(&call->state_lock);
272 _leave(" = 0 [queued]");
273 return 0;
274 }
275
276 /*
277 * assume an implicit ACKALL of the transmission phase of a client socket upon
278 * reception of the first reply packet
279 */
280 static void rxrpc_assume_implicit_ackall(struct rxrpc_call *call, u32 serial)
281 {
282 write_lock_bh(&call->state_lock);
283
284 switch (call->state) {
285 case RXRPC_CALL_CLIENT_AWAIT_REPLY:
286 call->state = RXRPC_CALL_CLIENT_RECV_REPLY;
287 call->acks_latest = serial;
288
289 _debug("implicit ACKALL %%%u", call->acks_latest);
290 set_bit(RXRPC_CALL_EV_RCVD_ACKALL, &call->events);
291 write_unlock_bh(&call->state_lock);
292
293 if (try_to_del_timer_sync(&call->resend_timer) >= 0) {
294 clear_bit(RXRPC_CALL_EV_RESEND_TIMER, &call->events);
295 clear_bit(RXRPC_CALL_EV_RESEND, &call->events);
296 clear_bit(RXRPC_CALL_RUN_RTIMER, &call->flags);
297 }
298 break;
299
300 default:
301 write_unlock_bh(&call->state_lock);
302 break;
303 }
304 }
305
306 /*
307 * post an incoming packet to the nominated call to deal with
308 * - must get rid of the sk_buff, either by freeing it or by queuing it
309 */
310 void rxrpc_fast_process_packet(struct rxrpc_call *call, struct sk_buff *skb)
311 {
312 struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
313 __be32 wtmp;
314 u32 abort_code;
315
316 _enter("%p,%p", call, skb);
317
318 ASSERT(!irqs_disabled());
319
320 #if 0 // INJECT RX ERROR
321 if (sp->hdr.type == RXRPC_PACKET_TYPE_DATA) {
322 static int skip = 0;
323 if (++skip == 3) {
324 printk("DROPPED 3RD PACKET!!!!!!!!!!!!!\n");
325 skip = 0;
326 goto free_packet;
327 }
328 }
329 #endif
330
331 /* request ACK generation for any ACK or DATA packet that requests
332 * it */
333 if (sp->hdr.flags & RXRPC_REQUEST_ACK) {
334 _proto("ACK Requested on %%%u", sp->hdr.serial);
335 rxrpc_propose_ACK(call, RXRPC_ACK_REQUESTED,
336 skb->priority, sp->hdr.serial, false);
337 }
338
339 switch (sp->hdr.type) {
340 case RXRPC_PACKET_TYPE_ABORT:
341 _debug("abort");
342
343 if (skb_copy_bits(skb, 0, &wtmp, sizeof(wtmp)) < 0)
344 goto protocol_error;
345
346 abort_code = ntohl(wtmp);
347 _proto("Rx ABORT %%%u { %x }", sp->hdr.serial, abort_code);
348
349 if (__rxrpc_set_call_completion(call,
350 RXRPC_CALL_REMOTELY_ABORTED,
351 abort_code, ECONNABORTED)) {
352 set_bit(RXRPC_CALL_EV_RCVD_ABORT, &call->events);
353 rxrpc_queue_call(call);
354 }
355 goto free_packet;
356
357 case RXRPC_PACKET_TYPE_BUSY:
358 _proto("Rx BUSY %%%u", sp->hdr.serial);
359
360 if (rxrpc_conn_is_service(call->conn))
361 goto protocol_error;
362
363 write_lock_bh(&call->state_lock);
364 switch (call->state) {
365 case RXRPC_CALL_CLIENT_SEND_REQUEST:
366 __rxrpc_set_call_completion(call,
367 RXRPC_CALL_SERVER_BUSY,
368 0, EBUSY);
369 set_bit(RXRPC_CALL_EV_RCVD_BUSY, &call->events);
370 rxrpc_queue_call(call);
371 case RXRPC_CALL_SERVER_BUSY:
372 goto free_packet_unlock;
373 default:
374 goto protocol_error_locked;
375 }
376
377 default:
378 _proto("Rx %s %%%u", rxrpc_pkts[sp->hdr.type], sp->hdr.serial);
379 goto protocol_error;
380
381 case RXRPC_PACKET_TYPE_DATA:
382 _proto("Rx DATA %%%u { #%u }", sp->hdr.serial, sp->hdr.seq);
383
384 if (sp->hdr.seq == 0)
385 goto protocol_error;
386
387 call->ackr_prev_seq = sp->hdr.seq;
388
389 /* received data implicitly ACKs all of the request packets we
390 * sent when we're acting as a client */
391 if (call->state == RXRPC_CALL_CLIENT_AWAIT_REPLY)
392 rxrpc_assume_implicit_ackall(call, sp->hdr.serial);
393
394 switch (rxrpc_fast_process_data(call, skb, sp->hdr.seq)) {
395 case 0:
396 skb = NULL;
397 goto done;
398
399 default:
400 BUG();
401
402 /* data packet received beyond the last packet */
403 case -EBADMSG:
404 goto protocol_error;
405 }
406
407 case RXRPC_PACKET_TYPE_ACKALL:
408 case RXRPC_PACKET_TYPE_ACK:
409 /* ACK processing is done in process context */
410 read_lock_bh(&call->state_lock);
411 if (call->state < RXRPC_CALL_DEAD) {
412 skb_queue_tail(&call->rx_queue, skb);
413 rxrpc_queue_call(call);
414 skb = NULL;
415 }
416 read_unlock_bh(&call->state_lock);
417 goto free_packet;
418 }
419
420 protocol_error:
421 _debug("protocol error");
422 write_lock_bh(&call->state_lock);
423 protocol_error_locked:
424 if (__rxrpc_abort_call(call, RX_PROTOCOL_ERROR, EPROTO))
425 rxrpc_queue_call(call);
426 free_packet_unlock:
427 write_unlock_bh(&call->state_lock);
428 free_packet:
429 rxrpc_free_skb(skb);
430 done:
431 _leave("");
432 }
433
434 /*
435 * split up a jumbo data packet
436 */
437 static void rxrpc_process_jumbo_packet(struct rxrpc_call *call,
438 struct sk_buff *jumbo)
439 {
440 struct rxrpc_jumbo_header jhdr;
441 struct rxrpc_skb_priv *sp;
442 struct sk_buff *part;
443
444 _enter(",{%u,%u}", jumbo->data_len, jumbo->len);
445
446 sp = rxrpc_skb(jumbo);
447
448 do {
449 sp->hdr.flags &= ~RXRPC_JUMBO_PACKET;
450
451 /* make a clone to represent the first subpacket in what's left
452 * of the jumbo packet */
453 part = skb_clone(jumbo, GFP_ATOMIC);
454 if (!part) {
455 /* simply ditch the tail in the event of ENOMEM */
456 pskb_trim(jumbo, RXRPC_JUMBO_DATALEN);
457 break;
458 }
459 rxrpc_new_skb(part);
460
461 pskb_trim(part, RXRPC_JUMBO_DATALEN);
462
463 if (!pskb_pull(jumbo, RXRPC_JUMBO_DATALEN))
464 goto protocol_error;
465
466 if (skb_copy_bits(jumbo, 0, &jhdr, sizeof(jhdr)) < 0)
467 goto protocol_error;
468 if (!pskb_pull(jumbo, sizeof(jhdr)))
469 BUG();
470
471 sp->hdr.seq += 1;
472 sp->hdr.serial += 1;
473 sp->hdr.flags = jhdr.flags;
474 sp->hdr._rsvd = ntohs(jhdr._rsvd);
475
476 _proto("Rx DATA Jumbo %%%u", sp->hdr.serial - 1);
477
478 rxrpc_fast_process_packet(call, part);
479 part = NULL;
480
481 } while (sp->hdr.flags & RXRPC_JUMBO_PACKET);
482
483 rxrpc_fast_process_packet(call, jumbo);
484 _leave("");
485 return;
486
487 protocol_error:
488 _debug("protocol error");
489 rxrpc_free_skb(part);
490 rxrpc_free_skb(jumbo);
491 if (rxrpc_abort_call(call, RX_PROTOCOL_ERROR, EPROTO))
492 rxrpc_queue_call(call);
493 _leave("");
494 }
495
496 /*
497 * post an incoming packet to the appropriate call/socket to deal with
498 * - must get rid of the sk_buff, either by freeing it or by queuing it
499 */
500 static void rxrpc_post_packet_to_call(struct rxrpc_call *call,
501 struct sk_buff *skb)
502 {
503 struct rxrpc_skb_priv *sp;
504
505 _enter("%p,%p", call, skb);
506
507 sp = rxrpc_skb(skb);
508
509 _debug("extant call [%d]", call->state);
510
511 read_lock(&call->state_lock);
512 switch (call->state) {
513 case RXRPC_CALL_DEAD:
514 goto dead_call;
515
516 case RXRPC_CALL_COMPLETE:
517 switch (call->completion) {
518 case RXRPC_CALL_LOCALLY_ABORTED:
519 if (!test_and_set_bit(RXRPC_CALL_EV_ABORT,
520 &call->events)) {
521 rxrpc_queue_call(call);
522 goto free_unlock;
523 }
524 default:
525 goto dead_call;
526 case RXRPC_CALL_SUCCEEDED:
527 if (rxrpc_conn_is_service(call->conn))
528 goto dead_call;
529 goto resend_final_ack;
530 }
531
532 case RXRPC_CALL_CLIENT_FINAL_ACK:
533 goto resend_final_ack;
534
535 default:
536 break;
537 }
538
539 read_unlock(&call->state_lock);
540 rxrpc_get_call(call);
541
542 if (sp->hdr.type == RXRPC_PACKET_TYPE_DATA &&
543 sp->hdr.flags & RXRPC_JUMBO_PACKET)
544 rxrpc_process_jumbo_packet(call, skb);
545 else
546 rxrpc_fast_process_packet(call, skb);
547
548 rxrpc_put_call(call);
549 goto done;
550
551 resend_final_ack:
552 _debug("final ack again");
553 rxrpc_get_call(call);
554 set_bit(RXRPC_CALL_EV_ACK_FINAL, &call->events);
555 rxrpc_queue_call(call);
556 goto free_unlock;
557
558 dead_call:
559 if (sp->hdr.type != RXRPC_PACKET_TYPE_ABORT) {
560 skb->priority = RX_CALL_DEAD;
561 rxrpc_reject_packet(call->conn->params.local, skb);
562 goto unlock;
563 }
564 free_unlock:
565 rxrpc_free_skb(skb);
566 unlock:
567 read_unlock(&call->state_lock);
568 done:
569 _leave("");
570 }
571
572 /*
573 * post connection-level events to the connection
574 * - this includes challenges, responses, some aborts and call terminal packet
575 * retransmission.
576 */
577 static void rxrpc_post_packet_to_conn(struct rxrpc_connection *conn,
578 struct sk_buff *skb)
579 {
580 _enter("%p,%p", conn, skb);
581
582 skb_queue_tail(&conn->rx_queue, skb);
583 rxrpc_queue_conn(conn);
584 }
585
586 /*
587 * post endpoint-level events to the local endpoint
588 * - this includes debug and version messages
589 */
590 static void rxrpc_post_packet_to_local(struct rxrpc_local *local,
591 struct sk_buff *skb)
592 {
593 _enter("%p,%p", local, skb);
594
595 skb_queue_tail(&local->event_queue, skb);
596 rxrpc_queue_local(local);
597 }
598
599 /*
600 * Extract the wire header from a packet and translate the byte order.
601 */
602 static noinline
603 int rxrpc_extract_header(struct rxrpc_skb_priv *sp, struct sk_buff *skb)
604 {
605 struct rxrpc_wire_header whdr;
606
607 /* dig out the RxRPC connection details */
608 if (skb_copy_bits(skb, 0, &whdr, sizeof(whdr)) < 0)
609 return -EBADMSG;
610 if (!pskb_pull(skb, sizeof(whdr)))
611 BUG();
612
613 memset(sp, 0, sizeof(*sp));
614 sp->hdr.epoch = ntohl(whdr.epoch);
615 sp->hdr.cid = ntohl(whdr.cid);
616 sp->hdr.callNumber = ntohl(whdr.callNumber);
617 sp->hdr.seq = ntohl(whdr.seq);
618 sp->hdr.serial = ntohl(whdr.serial);
619 sp->hdr.flags = whdr.flags;
620 sp->hdr.type = whdr.type;
621 sp->hdr.userStatus = whdr.userStatus;
622 sp->hdr.securityIndex = whdr.securityIndex;
623 sp->hdr._rsvd = ntohs(whdr._rsvd);
624 sp->hdr.serviceId = ntohs(whdr.serviceId);
625 return 0;
626 }
627
628 /*
629 * handle data received on the local endpoint
630 * - may be called in interrupt context
631 *
632 * The socket is locked by the caller and this prevents the socket from being
633 * shut down and the local endpoint from going away, thus sk_user_data will not
634 * be cleared until this function returns.
635 */
636 void rxrpc_data_ready(struct sock *sk)
637 {
638 struct rxrpc_connection *conn;
639 struct rxrpc_skb_priv *sp;
640 struct rxrpc_local *local = sk->sk_user_data;
641 struct sk_buff *skb;
642 int ret, skew;
643
644 _enter("%p", sk);
645
646 ASSERT(!irqs_disabled());
647
648 skb = skb_recv_datagram(sk, 0, 1, &ret);
649 if (!skb) {
650 if (ret == -EAGAIN)
651 return;
652 _debug("UDP socket error %d", ret);
653 return;
654 }
655
656 rxrpc_new_skb(skb);
657
658 _net("recv skb %p", skb);
659
660 /* we'll probably need to checksum it (didn't call sock_recvmsg) */
661 if (skb_checksum_complete(skb)) {
662 rxrpc_free_skb(skb);
663 __UDP_INC_STATS(&init_net, UDP_MIB_INERRORS, 0);
664 _leave(" [CSUM failed]");
665 return;
666 }
667
668 __UDP_INC_STATS(&init_net, UDP_MIB_INDATAGRAMS, 0);
669
670 /* The socket buffer we have is owned by UDP, with UDP's data all over
671 * it, but we really want our own data there.
672 */
673 skb_orphan(skb);
674 sp = rxrpc_skb(skb);
675
676 _net("Rx UDP packet from %08x:%04hu",
677 ntohl(ip_hdr(skb)->saddr), ntohs(udp_hdr(skb)->source));
678
679 /* dig out the RxRPC connection details */
680 if (rxrpc_extract_header(sp, skb) < 0)
681 goto bad_message;
682
683 _net("Rx RxRPC %s ep=%x call=%x:%x",
684 sp->hdr.flags & RXRPC_CLIENT_INITIATED ? "ToServer" : "ToClient",
685 sp->hdr.epoch, sp->hdr.cid, sp->hdr.callNumber);
686
687 if (sp->hdr.type >= RXRPC_N_PACKET_TYPES ||
688 !((RXRPC_SUPPORTED_PACKET_TYPES >> sp->hdr.type) & 1)) {
689 _proto("Rx Bad Packet Type %u", sp->hdr.type);
690 goto bad_message;
691 }
692
693 if (sp->hdr.type == RXRPC_PACKET_TYPE_VERSION) {
694 rxrpc_post_packet_to_local(local, skb);
695 goto out;
696 }
697
698 if (sp->hdr.type == RXRPC_PACKET_TYPE_DATA &&
699 (sp->hdr.callNumber == 0 || sp->hdr.seq == 0))
700 goto bad_message;
701
702 rcu_read_lock();
703
704 conn = rxrpc_find_connection_rcu(local, skb);
705 if (!conn) {
706 skb->priority = 0;
707 goto cant_route_call;
708 }
709
710 /* Note the serial number skew here */
711 skew = (int)sp->hdr.serial - (int)conn->hi_serial;
712 if (skew >= 0) {
713 if (skew > 0)
714 conn->hi_serial = sp->hdr.serial;
715 skb->priority = 0;
716 } else {
717 skew = -skew;
718 skb->priority = min(skew, 65535);
719 }
720
721 if (sp->hdr.callNumber == 0) {
722 /* Connection-level packet */
723 _debug("CONN %p {%d}", conn, conn->debug_id);
724 rxrpc_post_packet_to_conn(conn, skb);
725 goto out_unlock;
726 } else {
727 /* Call-bound packets are routed by connection channel. */
728 unsigned int channel = sp->hdr.cid & RXRPC_CHANNELMASK;
729 struct rxrpc_channel *chan = &conn->channels[channel];
730 struct rxrpc_call *call;
731
732 /* Ignore really old calls */
733 if (sp->hdr.callNumber < chan->last_call)
734 goto discard_unlock;
735
736 if (sp->hdr.callNumber == chan->last_call) {
737 /* For the previous service call, if completed
738 * successfully, we discard all further packets.
739 */
740 if (rxrpc_conn_is_service(conn) &&
741 (chan->last_type == RXRPC_PACKET_TYPE_ACK ||
742 sp->hdr.type == RXRPC_PACKET_TYPE_ABORT))
743 goto discard_unlock;
744
745 /* But otherwise we need to retransmit the final packet
746 * from data cached in the connection record.
747 */
748 rxrpc_post_packet_to_conn(conn, skb);
749 goto out_unlock;
750 }
751
752 call = rcu_dereference(chan->call);
753 if (!call || atomic_read(&call->usage) == 0)
754 goto cant_route_call;
755
756 rxrpc_see_call(call);
757 rxrpc_post_packet_to_call(call, skb);
758 goto out_unlock;
759 }
760
761 discard_unlock:
762 rxrpc_free_skb(skb);
763 out_unlock:
764 rcu_read_unlock();
765 out:
766 return;
767
768 cant_route_call:
769 rcu_read_unlock();
770
771 _debug("can't route call");
772 if (sp->hdr.flags & RXRPC_CLIENT_INITIATED &&
773 sp->hdr.type == RXRPC_PACKET_TYPE_DATA) {
774 if (sp->hdr.seq == 1) {
775 _debug("first packet");
776 skb_queue_tail(&local->accept_queue, skb);
777 rxrpc_queue_work(&local->processor);
778 _leave(" [incoming]");
779 return;
780 }
781 skb->priority = RX_INVALID_OPERATION;
782 } else {
783 skb->priority = RX_CALL_DEAD;
784 }
785
786 if (sp->hdr.type != RXRPC_PACKET_TYPE_ABORT) {
787 _debug("reject type %d",sp->hdr.type);
788 rxrpc_reject_packet(local, skb);
789 } else {
790 rxrpc_free_skb(skb);
791 }
792 _leave(" [no call]");
793 return;
794
795 bad_message:
796 skb->priority = RX_PROTOCOL_ERROR;
797 rxrpc_reject_packet(local, skb);
798 _leave(" [badmsg]");
799 }