]> git.ipfire.org Git - thirdparty/linux.git/blob - net/xfrm/xfrm_input.c
Merge tag 'x86-fpu-2020-06-01' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
[thirdparty/linux.git] / net / xfrm / xfrm_input.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * xfrm_input.c
4 *
5 * Changes:
6 * YOSHIFUJI Hideaki @USAGI
7 * Split up af-specific portion
8 *
9 */
10
11 #include <linux/bottom_half.h>
12 #include <linux/cache.h>
13 #include <linux/interrupt.h>
14 #include <linux/slab.h>
15 #include <linux/module.h>
16 #include <linux/netdevice.h>
17 #include <linux/percpu.h>
18 #include <net/dst.h>
19 #include <net/ip.h>
20 #include <net/xfrm.h>
21 #include <net/ip_tunnels.h>
22 #include <net/ip6_tunnel.h>
23
24 #include "xfrm_inout.h"
25
26 struct xfrm_trans_tasklet {
27 struct tasklet_struct tasklet;
28 struct sk_buff_head queue;
29 };
30
31 struct xfrm_trans_cb {
32 union {
33 struct inet_skb_parm h4;
34 #if IS_ENABLED(CONFIG_IPV6)
35 struct inet6_skb_parm h6;
36 #endif
37 } header;
38 int (*finish)(struct net *net, struct sock *sk, struct sk_buff *skb);
39 struct net *net;
40 };
41
42 #define XFRM_TRANS_SKB_CB(__skb) ((struct xfrm_trans_cb *)&((__skb)->cb[0]))
43
44 static DEFINE_SPINLOCK(xfrm_input_afinfo_lock);
45 static struct xfrm_input_afinfo const __rcu *xfrm_input_afinfo[AF_INET6 + 1];
46
47 static struct gro_cells gro_cells;
48 static struct net_device xfrm_napi_dev;
49
50 static DEFINE_PER_CPU(struct xfrm_trans_tasklet, xfrm_trans_tasklet);
51
52 int xfrm_input_register_afinfo(const struct xfrm_input_afinfo *afinfo)
53 {
54 int err = 0;
55
56 if (WARN_ON(afinfo->family >= ARRAY_SIZE(xfrm_input_afinfo)))
57 return -EAFNOSUPPORT;
58
59 spin_lock_bh(&xfrm_input_afinfo_lock);
60 if (unlikely(xfrm_input_afinfo[afinfo->family] != NULL))
61 err = -EEXIST;
62 else
63 rcu_assign_pointer(xfrm_input_afinfo[afinfo->family], afinfo);
64 spin_unlock_bh(&xfrm_input_afinfo_lock);
65 return err;
66 }
67 EXPORT_SYMBOL(xfrm_input_register_afinfo);
68
69 int xfrm_input_unregister_afinfo(const struct xfrm_input_afinfo *afinfo)
70 {
71 int err = 0;
72
73 spin_lock_bh(&xfrm_input_afinfo_lock);
74 if (likely(xfrm_input_afinfo[afinfo->family] != NULL)) {
75 if (unlikely(xfrm_input_afinfo[afinfo->family] != afinfo))
76 err = -EINVAL;
77 else
78 RCU_INIT_POINTER(xfrm_input_afinfo[afinfo->family], NULL);
79 }
80 spin_unlock_bh(&xfrm_input_afinfo_lock);
81 synchronize_rcu();
82 return err;
83 }
84 EXPORT_SYMBOL(xfrm_input_unregister_afinfo);
85
86 static const struct xfrm_input_afinfo *xfrm_input_get_afinfo(unsigned int family)
87 {
88 const struct xfrm_input_afinfo *afinfo;
89
90 if (WARN_ON_ONCE(family >= ARRAY_SIZE(xfrm_input_afinfo)))
91 return NULL;
92
93 rcu_read_lock();
94 afinfo = rcu_dereference(xfrm_input_afinfo[family]);
95 if (unlikely(!afinfo))
96 rcu_read_unlock();
97 return afinfo;
98 }
99
100 static int xfrm_rcv_cb(struct sk_buff *skb, unsigned int family, u8 protocol,
101 int err)
102 {
103 int ret;
104 const struct xfrm_input_afinfo *afinfo = xfrm_input_get_afinfo(family);
105
106 if (!afinfo)
107 return -EAFNOSUPPORT;
108
109 ret = afinfo->callback(skb, protocol, err);
110 rcu_read_unlock();
111
112 return ret;
113 }
114
115 struct sec_path *secpath_set(struct sk_buff *skb)
116 {
117 struct sec_path *sp, *tmp = skb_ext_find(skb, SKB_EXT_SEC_PATH);
118
119 sp = skb_ext_add(skb, SKB_EXT_SEC_PATH);
120 if (!sp)
121 return NULL;
122
123 if (tmp) /* reused existing one (was COW'd if needed) */
124 return sp;
125
126 /* allocated new secpath */
127 memset(sp->ovec, 0, sizeof(sp->ovec));
128 sp->olen = 0;
129 sp->len = 0;
130
131 return sp;
132 }
133 EXPORT_SYMBOL(secpath_set);
134
135 /* Fetch spi and seq from ipsec header */
136
137 int xfrm_parse_spi(struct sk_buff *skb, u8 nexthdr, __be32 *spi, __be32 *seq)
138 {
139 int offset, offset_seq;
140 int hlen;
141
142 switch (nexthdr) {
143 case IPPROTO_AH:
144 hlen = sizeof(struct ip_auth_hdr);
145 offset = offsetof(struct ip_auth_hdr, spi);
146 offset_seq = offsetof(struct ip_auth_hdr, seq_no);
147 break;
148 case IPPROTO_ESP:
149 hlen = sizeof(struct ip_esp_hdr);
150 offset = offsetof(struct ip_esp_hdr, spi);
151 offset_seq = offsetof(struct ip_esp_hdr, seq_no);
152 break;
153 case IPPROTO_COMP:
154 if (!pskb_may_pull(skb, sizeof(struct ip_comp_hdr)))
155 return -EINVAL;
156 *spi = htonl(ntohs(*(__be16 *)(skb_transport_header(skb) + 2)));
157 *seq = 0;
158 return 0;
159 default:
160 return 1;
161 }
162
163 if (!pskb_may_pull(skb, hlen))
164 return -EINVAL;
165
166 *spi = *(__be32 *)(skb_transport_header(skb) + offset);
167 *seq = *(__be32 *)(skb_transport_header(skb) + offset_seq);
168 return 0;
169 }
170 EXPORT_SYMBOL(xfrm_parse_spi);
171
172 static int xfrm4_remove_beet_encap(struct xfrm_state *x, struct sk_buff *skb)
173 {
174 struct iphdr *iph;
175 int optlen = 0;
176 int err = -EINVAL;
177
178 if (unlikely(XFRM_MODE_SKB_CB(skb)->protocol == IPPROTO_BEETPH)) {
179 struct ip_beet_phdr *ph;
180 int phlen;
181
182 if (!pskb_may_pull(skb, sizeof(*ph)))
183 goto out;
184
185 ph = (struct ip_beet_phdr *)skb->data;
186
187 phlen = sizeof(*ph) + ph->padlen;
188 optlen = ph->hdrlen * 8 + (IPV4_BEET_PHMAXLEN - phlen);
189 if (optlen < 0 || optlen & 3 || optlen > 250)
190 goto out;
191
192 XFRM_MODE_SKB_CB(skb)->protocol = ph->nexthdr;
193
194 if (!pskb_may_pull(skb, phlen))
195 goto out;
196 __skb_pull(skb, phlen);
197 }
198
199 skb_push(skb, sizeof(*iph));
200 skb_reset_network_header(skb);
201 skb_mac_header_rebuild(skb);
202
203 xfrm4_beet_make_header(skb);
204
205 iph = ip_hdr(skb);
206
207 iph->ihl += optlen / 4;
208 iph->tot_len = htons(skb->len);
209 iph->daddr = x->sel.daddr.a4;
210 iph->saddr = x->sel.saddr.a4;
211 iph->check = 0;
212 iph->check = ip_fast_csum(skb_network_header(skb), iph->ihl);
213 err = 0;
214 out:
215 return err;
216 }
217
218 static void ipip_ecn_decapsulate(struct sk_buff *skb)
219 {
220 struct iphdr *inner_iph = ipip_hdr(skb);
221
222 if (INET_ECN_is_ce(XFRM_MODE_SKB_CB(skb)->tos))
223 IP_ECN_set_ce(inner_iph);
224 }
225
226 static int xfrm4_remove_tunnel_encap(struct xfrm_state *x, struct sk_buff *skb)
227 {
228 int err = -EINVAL;
229
230 if (XFRM_MODE_SKB_CB(skb)->protocol != IPPROTO_IPIP)
231 goto out;
232
233 if (!pskb_may_pull(skb, sizeof(struct iphdr)))
234 goto out;
235
236 err = skb_unclone(skb, GFP_ATOMIC);
237 if (err)
238 goto out;
239
240 if (x->props.flags & XFRM_STATE_DECAP_DSCP)
241 ipv4_copy_dscp(XFRM_MODE_SKB_CB(skb)->tos, ipip_hdr(skb));
242 if (!(x->props.flags & XFRM_STATE_NOECN))
243 ipip_ecn_decapsulate(skb);
244
245 skb_reset_network_header(skb);
246 skb_mac_header_rebuild(skb);
247 if (skb->mac_len)
248 eth_hdr(skb)->h_proto = skb->protocol;
249
250 err = 0;
251
252 out:
253 return err;
254 }
255
256 static void ipip6_ecn_decapsulate(struct sk_buff *skb)
257 {
258 struct ipv6hdr *inner_iph = ipipv6_hdr(skb);
259
260 if (INET_ECN_is_ce(XFRM_MODE_SKB_CB(skb)->tos))
261 IP6_ECN_set_ce(skb, inner_iph);
262 }
263
264 static int xfrm6_remove_tunnel_encap(struct xfrm_state *x, struct sk_buff *skb)
265 {
266 int err = -EINVAL;
267
268 if (XFRM_MODE_SKB_CB(skb)->protocol != IPPROTO_IPV6)
269 goto out;
270 if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
271 goto out;
272
273 err = skb_unclone(skb, GFP_ATOMIC);
274 if (err)
275 goto out;
276
277 if (x->props.flags & XFRM_STATE_DECAP_DSCP)
278 ipv6_copy_dscp(ipv6_get_dsfield(ipv6_hdr(skb)),
279 ipipv6_hdr(skb));
280 if (!(x->props.flags & XFRM_STATE_NOECN))
281 ipip6_ecn_decapsulate(skb);
282
283 skb_reset_network_header(skb);
284 skb_mac_header_rebuild(skb);
285 if (skb->mac_len)
286 eth_hdr(skb)->h_proto = skb->protocol;
287
288 err = 0;
289
290 out:
291 return err;
292 }
293
294 static int xfrm6_remove_beet_encap(struct xfrm_state *x, struct sk_buff *skb)
295 {
296 struct ipv6hdr *ip6h;
297 int size = sizeof(struct ipv6hdr);
298 int err;
299
300 err = skb_cow_head(skb, size + skb->mac_len);
301 if (err)
302 goto out;
303
304 __skb_push(skb, size);
305 skb_reset_network_header(skb);
306 skb_mac_header_rebuild(skb);
307
308 xfrm6_beet_make_header(skb);
309
310 ip6h = ipv6_hdr(skb);
311 ip6h->payload_len = htons(skb->len - size);
312 ip6h->daddr = x->sel.daddr.in6;
313 ip6h->saddr = x->sel.saddr.in6;
314 err = 0;
315 out:
316 return err;
317 }
318
319 /* Remove encapsulation header.
320 *
321 * The IP header will be moved over the top of the encapsulation
322 * header.
323 *
324 * On entry, the transport header shall point to where the IP header
325 * should be and the network header shall be set to where the IP
326 * header currently is. skb->data shall point to the start of the
327 * payload.
328 */
329 static int
330 xfrm_inner_mode_encap_remove(struct xfrm_state *x,
331 const struct xfrm_mode *inner_mode,
332 struct sk_buff *skb)
333 {
334 switch (inner_mode->encap) {
335 case XFRM_MODE_BEET:
336 if (inner_mode->family == AF_INET)
337 return xfrm4_remove_beet_encap(x, skb);
338 if (inner_mode->family == AF_INET6)
339 return xfrm6_remove_beet_encap(x, skb);
340 break;
341 case XFRM_MODE_TUNNEL:
342 if (inner_mode->family == AF_INET)
343 return xfrm4_remove_tunnel_encap(x, skb);
344 if (inner_mode->family == AF_INET6)
345 return xfrm6_remove_tunnel_encap(x, skb);
346 break;
347 }
348
349 WARN_ON_ONCE(1);
350 return -EOPNOTSUPP;
351 }
352
353 static int xfrm_prepare_input(struct xfrm_state *x, struct sk_buff *skb)
354 {
355 const struct xfrm_mode *inner_mode = &x->inner_mode;
356 const struct xfrm_state_afinfo *afinfo;
357 int err = -EAFNOSUPPORT;
358
359 rcu_read_lock();
360 afinfo = xfrm_state_afinfo_get_rcu(x->outer_mode.family);
361 if (likely(afinfo))
362 err = afinfo->extract_input(x, skb);
363 rcu_read_unlock();
364
365 if (err)
366 return err;
367
368 if (x->sel.family == AF_UNSPEC) {
369 inner_mode = xfrm_ip2inner_mode(x, XFRM_MODE_SKB_CB(skb)->protocol);
370 if (!inner_mode)
371 return -EAFNOSUPPORT;
372 }
373
374 switch (inner_mode->family) {
375 case AF_INET:
376 skb->protocol = htons(ETH_P_IP);
377 break;
378 case AF_INET6:
379 skb->protocol = htons(ETH_P_IPV6);
380 break;
381 default:
382 WARN_ON_ONCE(1);
383 break;
384 }
385
386 return xfrm_inner_mode_encap_remove(x, inner_mode, skb);
387 }
388
389 /* Remove encapsulation header.
390 *
391 * The IP header will be moved over the top of the encapsulation header.
392 *
393 * On entry, skb_transport_header() shall point to where the IP header
394 * should be and skb_network_header() shall be set to where the IP header
395 * currently is. skb->data shall point to the start of the payload.
396 */
397 static int xfrm4_transport_input(struct xfrm_state *x, struct sk_buff *skb)
398 {
399 int ihl = skb->data - skb_transport_header(skb);
400
401 if (skb->transport_header != skb->network_header) {
402 memmove(skb_transport_header(skb),
403 skb_network_header(skb), ihl);
404 skb->network_header = skb->transport_header;
405 }
406 ip_hdr(skb)->tot_len = htons(skb->len + ihl);
407 skb_reset_transport_header(skb);
408 return 0;
409 }
410
411 static int xfrm6_transport_input(struct xfrm_state *x, struct sk_buff *skb)
412 {
413 #if IS_ENABLED(CONFIG_IPV6)
414 int ihl = skb->data - skb_transport_header(skb);
415
416 if (skb->transport_header != skb->network_header) {
417 memmove(skb_transport_header(skb),
418 skb_network_header(skb), ihl);
419 skb->network_header = skb->transport_header;
420 }
421 ipv6_hdr(skb)->payload_len = htons(skb->len + ihl -
422 sizeof(struct ipv6hdr));
423 skb_reset_transport_header(skb);
424 return 0;
425 #else
426 WARN_ON_ONCE(1);
427 return -EAFNOSUPPORT;
428 #endif
429 }
430
431 static int xfrm_inner_mode_input(struct xfrm_state *x,
432 const struct xfrm_mode *inner_mode,
433 struct sk_buff *skb)
434 {
435 switch (inner_mode->encap) {
436 case XFRM_MODE_BEET:
437 case XFRM_MODE_TUNNEL:
438 return xfrm_prepare_input(x, skb);
439 case XFRM_MODE_TRANSPORT:
440 if (inner_mode->family == AF_INET)
441 return xfrm4_transport_input(x, skb);
442 if (inner_mode->family == AF_INET6)
443 return xfrm6_transport_input(x, skb);
444 break;
445 case XFRM_MODE_ROUTEOPTIMIZATION:
446 WARN_ON_ONCE(1);
447 break;
448 default:
449 WARN_ON_ONCE(1);
450 break;
451 }
452
453 return -EOPNOTSUPP;
454 }
455
456 int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
457 {
458 const struct xfrm_state_afinfo *afinfo;
459 struct net *net = dev_net(skb->dev);
460 const struct xfrm_mode *inner_mode;
461 int err;
462 __be32 seq;
463 __be32 seq_hi;
464 struct xfrm_state *x = NULL;
465 xfrm_address_t *daddr;
466 u32 mark = skb->mark;
467 unsigned int family = AF_UNSPEC;
468 int decaps = 0;
469 int async = 0;
470 bool xfrm_gro = false;
471 bool crypto_done = false;
472 struct xfrm_offload *xo = xfrm_offload(skb);
473 struct sec_path *sp;
474
475 if (encap_type < 0) {
476 x = xfrm_input_state(skb);
477
478 if (unlikely(x->km.state != XFRM_STATE_VALID)) {
479 if (x->km.state == XFRM_STATE_ACQ)
480 XFRM_INC_STATS(net, LINUX_MIB_XFRMACQUIREERROR);
481 else
482 XFRM_INC_STATS(net,
483 LINUX_MIB_XFRMINSTATEINVALID);
484
485 if (encap_type == -1)
486 dev_put(skb->dev);
487 goto drop;
488 }
489
490 family = x->outer_mode.family;
491
492 /* An encap_type of -1 indicates async resumption. */
493 if (encap_type == -1) {
494 async = 1;
495 seq = XFRM_SKB_CB(skb)->seq.input.low;
496 goto resume;
497 }
498
499 /* encap_type < -1 indicates a GRO call. */
500 encap_type = 0;
501 seq = XFRM_SPI_SKB_CB(skb)->seq;
502
503 if (xo && (xo->flags & CRYPTO_DONE)) {
504 crypto_done = true;
505 family = XFRM_SPI_SKB_CB(skb)->family;
506
507 if (!(xo->status & CRYPTO_SUCCESS)) {
508 if (xo->status &
509 (CRYPTO_TRANSPORT_AH_AUTH_FAILED |
510 CRYPTO_TRANSPORT_ESP_AUTH_FAILED |
511 CRYPTO_TUNNEL_AH_AUTH_FAILED |
512 CRYPTO_TUNNEL_ESP_AUTH_FAILED)) {
513
514 xfrm_audit_state_icvfail(x, skb,
515 x->type->proto);
516 x->stats.integrity_failed++;
517 XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEPROTOERROR);
518 goto drop;
519 }
520
521 if (xo->status & CRYPTO_INVALID_PROTOCOL) {
522 XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEPROTOERROR);
523 goto drop;
524 }
525
526 XFRM_INC_STATS(net, LINUX_MIB_XFRMINBUFFERERROR);
527 goto drop;
528 }
529
530 if ((err = xfrm_parse_spi(skb, nexthdr, &spi, &seq)) != 0) {
531 XFRM_INC_STATS(net, LINUX_MIB_XFRMINHDRERROR);
532 goto drop;
533 }
534 }
535
536 goto lock;
537 }
538
539 family = XFRM_SPI_SKB_CB(skb)->family;
540
541 /* if tunnel is present override skb->mark value with tunnel i_key */
542 switch (family) {
543 case AF_INET:
544 if (XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4)
545 mark = be32_to_cpu(XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4->parms.i_key);
546 break;
547 case AF_INET6:
548 if (XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6)
549 mark = be32_to_cpu(XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6->parms.i_key);
550 break;
551 }
552
553 sp = secpath_set(skb);
554 if (!sp) {
555 XFRM_INC_STATS(net, LINUX_MIB_XFRMINERROR);
556 goto drop;
557 }
558
559 seq = 0;
560 if (!spi && (err = xfrm_parse_spi(skb, nexthdr, &spi, &seq)) != 0) {
561 secpath_reset(skb);
562 XFRM_INC_STATS(net, LINUX_MIB_XFRMINHDRERROR);
563 goto drop;
564 }
565
566 daddr = (xfrm_address_t *)(skb_network_header(skb) +
567 XFRM_SPI_SKB_CB(skb)->daddroff);
568 do {
569 sp = skb_sec_path(skb);
570
571 if (sp->len == XFRM_MAX_DEPTH) {
572 secpath_reset(skb);
573 XFRM_INC_STATS(net, LINUX_MIB_XFRMINBUFFERERROR);
574 goto drop;
575 }
576
577 x = xfrm_state_lookup(net, mark, daddr, spi, nexthdr, family);
578 if (x == NULL) {
579 secpath_reset(skb);
580 XFRM_INC_STATS(net, LINUX_MIB_XFRMINNOSTATES);
581 xfrm_audit_state_notfound(skb, family, spi, seq);
582 goto drop;
583 }
584
585 skb->mark = xfrm_smark_get(skb->mark, x);
586
587 sp->xvec[sp->len++] = x;
588
589 skb_dst_force(skb);
590 if (!skb_dst(skb)) {
591 XFRM_INC_STATS(net, LINUX_MIB_XFRMINERROR);
592 goto drop;
593 }
594
595 lock:
596 spin_lock(&x->lock);
597
598 if (unlikely(x->km.state != XFRM_STATE_VALID)) {
599 if (x->km.state == XFRM_STATE_ACQ)
600 XFRM_INC_STATS(net, LINUX_MIB_XFRMACQUIREERROR);
601 else
602 XFRM_INC_STATS(net,
603 LINUX_MIB_XFRMINSTATEINVALID);
604 goto drop_unlock;
605 }
606
607 if ((x->encap ? x->encap->encap_type : 0) != encap_type) {
608 XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEMISMATCH);
609 goto drop_unlock;
610 }
611
612 if (x->repl->check(x, skb, seq)) {
613 XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATESEQERROR);
614 goto drop_unlock;
615 }
616
617 if (xfrm_state_check_expire(x)) {
618 XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEEXPIRED);
619 goto drop_unlock;
620 }
621
622 spin_unlock(&x->lock);
623
624 if (xfrm_tunnel_check(skb, x, family)) {
625 XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEMODEERROR);
626 goto drop;
627 }
628
629 seq_hi = htonl(xfrm_replay_seqhi(x, seq));
630
631 XFRM_SKB_CB(skb)->seq.input.low = seq;
632 XFRM_SKB_CB(skb)->seq.input.hi = seq_hi;
633
634 dev_hold(skb->dev);
635
636 if (crypto_done)
637 nexthdr = x->type_offload->input_tail(x, skb);
638 else
639 nexthdr = x->type->input(x, skb);
640
641 if (nexthdr == -EINPROGRESS)
642 return 0;
643 resume:
644 dev_put(skb->dev);
645
646 spin_lock(&x->lock);
647 if (nexthdr < 0) {
648 if (nexthdr == -EBADMSG) {
649 xfrm_audit_state_icvfail(x, skb,
650 x->type->proto);
651 x->stats.integrity_failed++;
652 }
653 XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEPROTOERROR);
654 goto drop_unlock;
655 }
656
657 /* only the first xfrm gets the encap type */
658 encap_type = 0;
659
660 if (async && x->repl->recheck(x, skb, seq)) {
661 XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATESEQERROR);
662 goto drop_unlock;
663 }
664
665 x->repl->advance(x, seq);
666
667 x->curlft.bytes += skb->len;
668 x->curlft.packets++;
669
670 spin_unlock(&x->lock);
671
672 XFRM_MODE_SKB_CB(skb)->protocol = nexthdr;
673
674 inner_mode = &x->inner_mode;
675
676 if (x->sel.family == AF_UNSPEC) {
677 inner_mode = xfrm_ip2inner_mode(x, XFRM_MODE_SKB_CB(skb)->protocol);
678 if (inner_mode == NULL) {
679 XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEMODEERROR);
680 goto drop;
681 }
682 }
683
684 if (xfrm_inner_mode_input(x, inner_mode, skb)) {
685 XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEMODEERROR);
686 goto drop;
687 }
688
689 if (x->outer_mode.flags & XFRM_MODE_FLAG_TUNNEL) {
690 decaps = 1;
691 break;
692 }
693
694 /*
695 * We need the inner address. However, we only get here for
696 * transport mode so the outer address is identical.
697 */
698 daddr = &x->id.daddr;
699 family = x->outer_mode.family;
700
701 err = xfrm_parse_spi(skb, nexthdr, &spi, &seq);
702 if (err < 0) {
703 XFRM_INC_STATS(net, LINUX_MIB_XFRMINHDRERROR);
704 goto drop;
705 }
706 crypto_done = false;
707 } while (!err);
708
709 err = xfrm_rcv_cb(skb, family, x->type->proto, 0);
710 if (err)
711 goto drop;
712
713 nf_reset_ct(skb);
714
715 if (decaps) {
716 sp = skb_sec_path(skb);
717 if (sp)
718 sp->olen = 0;
719 skb_dst_drop(skb);
720 gro_cells_receive(&gro_cells, skb);
721 return 0;
722 } else {
723 xo = xfrm_offload(skb);
724 if (xo)
725 xfrm_gro = xo->flags & XFRM_GRO;
726
727 err = -EAFNOSUPPORT;
728 rcu_read_lock();
729 afinfo = xfrm_state_afinfo_get_rcu(x->inner_mode.family);
730 if (likely(afinfo))
731 err = afinfo->transport_finish(skb, xfrm_gro || async);
732 rcu_read_unlock();
733 if (xfrm_gro) {
734 sp = skb_sec_path(skb);
735 if (sp)
736 sp->olen = 0;
737 skb_dst_drop(skb);
738 gro_cells_receive(&gro_cells, skb);
739 return err;
740 }
741
742 return err;
743 }
744
745 drop_unlock:
746 spin_unlock(&x->lock);
747 drop:
748 xfrm_rcv_cb(skb, family, x && x->type ? x->type->proto : nexthdr, -1);
749 kfree_skb(skb);
750 return 0;
751 }
752 EXPORT_SYMBOL(xfrm_input);
753
754 int xfrm_input_resume(struct sk_buff *skb, int nexthdr)
755 {
756 return xfrm_input(skb, nexthdr, 0, -1);
757 }
758 EXPORT_SYMBOL(xfrm_input_resume);
759
760 static void xfrm_trans_reinject(unsigned long data)
761 {
762 struct xfrm_trans_tasklet *trans = (void *)data;
763 struct sk_buff_head queue;
764 struct sk_buff *skb;
765
766 __skb_queue_head_init(&queue);
767 skb_queue_splice_init(&trans->queue, &queue);
768
769 while ((skb = __skb_dequeue(&queue)))
770 XFRM_TRANS_SKB_CB(skb)->finish(XFRM_TRANS_SKB_CB(skb)->net,
771 NULL, skb);
772 }
773
774 int xfrm_trans_queue_net(struct net *net, struct sk_buff *skb,
775 int (*finish)(struct net *, struct sock *,
776 struct sk_buff *))
777 {
778 struct xfrm_trans_tasklet *trans;
779
780 trans = this_cpu_ptr(&xfrm_trans_tasklet);
781
782 if (skb_queue_len(&trans->queue) >= netdev_max_backlog)
783 return -ENOBUFS;
784
785 BUILD_BUG_ON(sizeof(struct xfrm_trans_cb) > sizeof(skb->cb));
786
787 XFRM_TRANS_SKB_CB(skb)->finish = finish;
788 XFRM_TRANS_SKB_CB(skb)->net = net;
789 __skb_queue_tail(&trans->queue, skb);
790 tasklet_schedule(&trans->tasklet);
791 return 0;
792 }
793 EXPORT_SYMBOL(xfrm_trans_queue_net);
794
795 int xfrm_trans_queue(struct sk_buff *skb,
796 int (*finish)(struct net *, struct sock *,
797 struct sk_buff *))
798 {
799 return xfrm_trans_queue_net(dev_net(skb->dev), skb, finish);
800 }
801 EXPORT_SYMBOL(xfrm_trans_queue);
802
803 void __init xfrm_input_init(void)
804 {
805 int err;
806 int i;
807
808 init_dummy_netdev(&xfrm_napi_dev);
809 err = gro_cells_init(&gro_cells, &xfrm_napi_dev);
810 if (err)
811 gro_cells.cells = NULL;
812
813 for_each_possible_cpu(i) {
814 struct xfrm_trans_tasklet *trans;
815
816 trans = &per_cpu(xfrm_trans_tasklet, i);
817 __skb_queue_head_init(&trans->queue);
818 tasklet_init(&trans->tasklet, xfrm_trans_reinject,
819 (unsigned long)trans);
820 }
821 }