]> git.ipfire.org Git - thirdparty/kernel/stable.git/blob - net/dccp/ipv6.c
clang-format: Update with the latest for_each macro list
[thirdparty/kernel/stable.git] / net / dccp / ipv6.c
1 /*
2 * DCCP over IPv6
3 * Linux INET6 implementation
4 *
5 * Based on net/dccp6/ipv6.c
6 *
7 * Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
13 */
14
15 #include <linux/module.h>
16 #include <linux/random.h>
17 #include <linux/slab.h>
18 #include <linux/xfrm.h>
19 #include <linux/string.h>
20
21 #include <net/addrconf.h>
22 #include <net/inet_common.h>
23 #include <net/inet_hashtables.h>
24 #include <net/inet_sock.h>
25 #include <net/inet6_connection_sock.h>
26 #include <net/inet6_hashtables.h>
27 #include <net/ip6_route.h>
28 #include <net/ipv6.h>
29 #include <net/protocol.h>
30 #include <net/transp_v6.h>
31 #include <net/ip6_checksum.h>
32 #include <net/xfrm.h>
33 #include <net/secure_seq.h>
34 #include <net/sock.h>
35
36 #include "dccp.h"
37 #include "ipv6.h"
38 #include "feat.h"
39
40 /* The per-net dccp.v6_ctl_sk is used for sending RSTs and ACKs */
41
42 static const struct inet_connection_sock_af_ops dccp_ipv6_mapped;
43 static const struct inet_connection_sock_af_ops dccp_ipv6_af_ops;
44
45 /* add pseudo-header to DCCP checksum stored in skb->csum */
46 static inline __sum16 dccp_v6_csum_finish(struct sk_buff *skb,
47 const struct in6_addr *saddr,
48 const struct in6_addr *daddr)
49 {
50 return csum_ipv6_magic(saddr, daddr, skb->len, IPPROTO_DCCP, skb->csum);
51 }
52
53 static inline void dccp_v6_send_check(struct sock *sk, struct sk_buff *skb)
54 {
55 struct ipv6_pinfo *np = inet6_sk(sk);
56 struct dccp_hdr *dh = dccp_hdr(skb);
57
58 dccp_csum_outgoing(skb);
59 dh->dccph_checksum = dccp_v6_csum_finish(skb, &np->saddr, &sk->sk_v6_daddr);
60 }
61
62 static inline __u64 dccp_v6_init_sequence(struct sk_buff *skb)
63 {
64 return secure_dccpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32,
65 ipv6_hdr(skb)->saddr.s6_addr32,
66 dccp_hdr(skb)->dccph_dport,
67 dccp_hdr(skb)->dccph_sport );
68
69 }
70
71 static int dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
72 u8 type, u8 code, int offset, __be32 info)
73 {
74 const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
75 const struct dccp_hdr *dh;
76 struct dccp_sock *dp;
77 struct ipv6_pinfo *np;
78 struct sock *sk;
79 int err;
80 __u64 seq;
81 struct net *net = dev_net(skb->dev);
82
83 /* Only need dccph_dport & dccph_sport which are the first
84 * 4 bytes in dccp header.
85 * Our caller (icmpv6_notify()) already pulled 8 bytes for us.
86 */
87 BUILD_BUG_ON(offsetofend(struct dccp_hdr, dccph_sport) > 8);
88 BUILD_BUG_ON(offsetofend(struct dccp_hdr, dccph_dport) > 8);
89 dh = (struct dccp_hdr *)(skb->data + offset);
90
91 sk = __inet6_lookup_established(net, &dccp_hashinfo,
92 &hdr->daddr, dh->dccph_dport,
93 &hdr->saddr, ntohs(dh->dccph_sport),
94 inet6_iif(skb), 0);
95
96 if (!sk) {
97 __ICMP6_INC_STATS(net, __in6_dev_get(skb->dev),
98 ICMP6_MIB_INERRORS);
99 return -ENOENT;
100 }
101
102 if (sk->sk_state == DCCP_TIME_WAIT) {
103 inet_twsk_put(inet_twsk(sk));
104 return 0;
105 }
106 seq = dccp_hdr_seq(dh);
107 if (sk->sk_state == DCCP_NEW_SYN_RECV) {
108 dccp_req_err(sk, seq);
109 return 0;
110 }
111
112 bh_lock_sock(sk);
113 if (sock_owned_by_user(sk))
114 __NET_INC_STATS(net, LINUX_MIB_LOCKDROPPEDICMPS);
115
116 if (sk->sk_state == DCCP_CLOSED)
117 goto out;
118
119 dp = dccp_sk(sk);
120 if ((1 << sk->sk_state) & ~(DCCPF_REQUESTING | DCCPF_LISTEN) &&
121 !between48(seq, dp->dccps_awl, dp->dccps_awh)) {
122 __NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
123 goto out;
124 }
125
126 np = inet6_sk(sk);
127
128 if (type == NDISC_REDIRECT) {
129 if (!sock_owned_by_user(sk)) {
130 struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie);
131
132 if (dst)
133 dst->ops->redirect(dst, sk, skb);
134 }
135 goto out;
136 }
137
138 if (type == ICMPV6_PKT_TOOBIG) {
139 struct dst_entry *dst = NULL;
140
141 if (!ip6_sk_accept_pmtu(sk))
142 goto out;
143
144 if (sock_owned_by_user(sk))
145 goto out;
146 if ((1 << sk->sk_state) & (DCCPF_LISTEN | DCCPF_CLOSED))
147 goto out;
148
149 dst = inet6_csk_update_pmtu(sk, ntohl(info));
150 if (!dst)
151 goto out;
152
153 if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst))
154 dccp_sync_mss(sk, dst_mtu(dst));
155 goto out;
156 }
157
158 icmpv6_err_convert(type, code, &err);
159
160 /* Might be for an request_sock */
161 switch (sk->sk_state) {
162 case DCCP_REQUESTING:
163 case DCCP_RESPOND: /* Cannot happen.
164 It can, it SYNs are crossed. --ANK */
165 if (!sock_owned_by_user(sk)) {
166 __DCCP_INC_STATS(DCCP_MIB_ATTEMPTFAILS);
167 sk->sk_err = err;
168 /*
169 * Wake people up to see the error
170 * (see connect in sock.c)
171 */
172 sk->sk_error_report(sk);
173 dccp_done(sk);
174 } else
175 sk->sk_err_soft = err;
176 goto out;
177 }
178
179 if (!sock_owned_by_user(sk) && np->recverr) {
180 sk->sk_err = err;
181 sk->sk_error_report(sk);
182 } else
183 sk->sk_err_soft = err;
184
185 out:
186 bh_unlock_sock(sk);
187 sock_put(sk);
188 return 0;
189 }
190
191
192 static int dccp_v6_send_response(const struct sock *sk, struct request_sock *req)
193 {
194 struct inet_request_sock *ireq = inet_rsk(req);
195 struct ipv6_pinfo *np = inet6_sk(sk);
196 struct sk_buff *skb;
197 struct in6_addr *final_p, final;
198 struct flowi6 fl6;
199 int err = -1;
200 struct dst_entry *dst;
201
202 memset(&fl6, 0, sizeof(fl6));
203 fl6.flowi6_proto = IPPROTO_DCCP;
204 fl6.daddr = ireq->ir_v6_rmt_addr;
205 fl6.saddr = ireq->ir_v6_loc_addr;
206 fl6.flowlabel = 0;
207 fl6.flowi6_oif = ireq->ir_iif;
208 fl6.fl6_dport = ireq->ir_rmt_port;
209 fl6.fl6_sport = htons(ireq->ir_num);
210 security_req_classify_flow(req, flowi6_to_flowi(&fl6));
211
212
213 rcu_read_lock();
214 final_p = fl6_update_dst(&fl6, rcu_dereference(np->opt), &final);
215 rcu_read_unlock();
216
217 dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
218 if (IS_ERR(dst)) {
219 err = PTR_ERR(dst);
220 dst = NULL;
221 goto done;
222 }
223
224 skb = dccp_make_response(sk, dst, req);
225 if (skb != NULL) {
226 struct dccp_hdr *dh = dccp_hdr(skb);
227 struct ipv6_txoptions *opt;
228
229 dh->dccph_checksum = dccp_v6_csum_finish(skb,
230 &ireq->ir_v6_loc_addr,
231 &ireq->ir_v6_rmt_addr);
232 fl6.daddr = ireq->ir_v6_rmt_addr;
233 rcu_read_lock();
234 opt = ireq->ipv6_opt;
235 if (!opt)
236 opt = rcu_dereference(np->opt);
237 err = ip6_xmit(sk, skb, &fl6, sk->sk_mark, opt, np->tclass);
238 rcu_read_unlock();
239 err = net_xmit_eval(err);
240 }
241
242 done:
243 dst_release(dst);
244 return err;
245 }
246
247 static void dccp_v6_reqsk_destructor(struct request_sock *req)
248 {
249 dccp_feat_list_purge(&dccp_rsk(req)->dreq_featneg);
250 kfree(inet_rsk(req)->ipv6_opt);
251 kfree_skb(inet_rsk(req)->pktopts);
252 }
253
254 static void dccp_v6_ctl_send_reset(const struct sock *sk, struct sk_buff *rxskb)
255 {
256 const struct ipv6hdr *rxip6h;
257 struct sk_buff *skb;
258 struct flowi6 fl6;
259 struct net *net = dev_net(skb_dst(rxskb)->dev);
260 struct sock *ctl_sk = net->dccp.v6_ctl_sk;
261 struct dst_entry *dst;
262
263 if (dccp_hdr(rxskb)->dccph_type == DCCP_PKT_RESET)
264 return;
265
266 if (!ipv6_unicast_destination(rxskb))
267 return;
268
269 skb = dccp_ctl_make_reset(ctl_sk, rxskb);
270 if (skb == NULL)
271 return;
272
273 rxip6h = ipv6_hdr(rxskb);
274 dccp_hdr(skb)->dccph_checksum = dccp_v6_csum_finish(skb, &rxip6h->saddr,
275 &rxip6h->daddr);
276
277 memset(&fl6, 0, sizeof(fl6));
278 fl6.daddr = rxip6h->saddr;
279 fl6.saddr = rxip6h->daddr;
280
281 fl6.flowi6_proto = IPPROTO_DCCP;
282 fl6.flowi6_oif = inet6_iif(rxskb);
283 fl6.fl6_dport = dccp_hdr(skb)->dccph_dport;
284 fl6.fl6_sport = dccp_hdr(skb)->dccph_sport;
285 security_skb_classify_flow(rxskb, flowi6_to_flowi(&fl6));
286
287 /* sk = NULL, but it is safe for now. RST socket required. */
288 dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL);
289 if (!IS_ERR(dst)) {
290 skb_dst_set(skb, dst);
291 ip6_xmit(ctl_sk, skb, &fl6, 0, NULL, 0);
292 DCCP_INC_STATS(DCCP_MIB_OUTSEGS);
293 DCCP_INC_STATS(DCCP_MIB_OUTRSTS);
294 return;
295 }
296
297 kfree_skb(skb);
298 }
299
300 static struct request_sock_ops dccp6_request_sock_ops = {
301 .family = AF_INET6,
302 .obj_size = sizeof(struct dccp6_request_sock),
303 .rtx_syn_ack = dccp_v6_send_response,
304 .send_ack = dccp_reqsk_send_ack,
305 .destructor = dccp_v6_reqsk_destructor,
306 .send_reset = dccp_v6_ctl_send_reset,
307 .syn_ack_timeout = dccp_syn_ack_timeout,
308 };
309
310 static int dccp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
311 {
312 struct request_sock *req;
313 struct dccp_request_sock *dreq;
314 struct inet_request_sock *ireq;
315 struct ipv6_pinfo *np = inet6_sk(sk);
316 const __be32 service = dccp_hdr_request(skb)->dccph_req_service;
317 struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb);
318
319 if (skb->protocol == htons(ETH_P_IP))
320 return dccp_v4_conn_request(sk, skb);
321
322 if (!ipv6_unicast_destination(skb))
323 return 0; /* discard, don't send a reset here */
324
325 if (dccp_bad_service_code(sk, service)) {
326 dcb->dccpd_reset_code = DCCP_RESET_CODE_BAD_SERVICE_CODE;
327 goto drop;
328 }
329 /*
330 * There are no SYN attacks on IPv6, yet...
331 */
332 dcb->dccpd_reset_code = DCCP_RESET_CODE_TOO_BUSY;
333 if (inet_csk_reqsk_queue_is_full(sk))
334 goto drop;
335
336 if (sk_acceptq_is_full(sk))
337 goto drop;
338
339 req = inet_reqsk_alloc(&dccp6_request_sock_ops, sk, true);
340 if (req == NULL)
341 goto drop;
342
343 if (dccp_reqsk_init(req, dccp_sk(sk), skb))
344 goto drop_and_free;
345
346 dreq = dccp_rsk(req);
347 if (dccp_parse_options(sk, dreq, skb))
348 goto drop_and_free;
349
350 if (security_inet_conn_request(sk, skb, req))
351 goto drop_and_free;
352
353 ireq = inet_rsk(req);
354 ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
355 ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
356 ireq->ireq_family = AF_INET6;
357 ireq->ir_mark = inet_request_mark(sk, skb);
358
359 if (ipv6_opt_accepted(sk, skb, IP6CB(skb)) ||
360 np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
361 np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) {
362 refcount_inc(&skb->users);
363 ireq->pktopts = skb;
364 }
365 ireq->ir_iif = sk->sk_bound_dev_if;
366
367 /* So that link locals have meaning */
368 if (!sk->sk_bound_dev_if &&
369 ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL)
370 ireq->ir_iif = inet6_iif(skb);
371
372 /*
373 * Step 3: Process LISTEN state
374 *
375 * Set S.ISR, S.GSR, S.SWL, S.SWH from packet or Init Cookie
376 *
377 * Setting S.SWL/S.SWH to is deferred to dccp_create_openreq_child().
378 */
379 dreq->dreq_isr = dcb->dccpd_seq;
380 dreq->dreq_gsr = dreq->dreq_isr;
381 dreq->dreq_iss = dccp_v6_init_sequence(skb);
382 dreq->dreq_gss = dreq->dreq_iss;
383 dreq->dreq_service = service;
384
385 if (dccp_v6_send_response(sk, req))
386 goto drop_and_free;
387
388 inet_csk_reqsk_queue_hash_add(sk, req, DCCP_TIMEOUT_INIT);
389 reqsk_put(req);
390 return 0;
391
392 drop_and_free:
393 reqsk_free(req);
394 drop:
395 __DCCP_INC_STATS(DCCP_MIB_ATTEMPTFAILS);
396 return -1;
397 }
398
399 static struct sock *dccp_v6_request_recv_sock(const struct sock *sk,
400 struct sk_buff *skb,
401 struct request_sock *req,
402 struct dst_entry *dst,
403 struct request_sock *req_unhash,
404 bool *own_req)
405 {
406 struct inet_request_sock *ireq = inet_rsk(req);
407 struct ipv6_pinfo *newnp;
408 const struct ipv6_pinfo *np = inet6_sk(sk);
409 struct ipv6_txoptions *opt;
410 struct inet_sock *newinet;
411 struct dccp6_sock *newdp6;
412 struct sock *newsk;
413
414 if (skb->protocol == htons(ETH_P_IP)) {
415 /*
416 * v6 mapped
417 */
418 newsk = dccp_v4_request_recv_sock(sk, skb, req, dst,
419 req_unhash, own_req);
420 if (newsk == NULL)
421 return NULL;
422
423 newdp6 = (struct dccp6_sock *)newsk;
424 newinet = inet_sk(newsk);
425 newinet->pinet6 = &newdp6->inet6;
426 newnp = inet6_sk(newsk);
427
428 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
429
430 newnp->saddr = newsk->sk_v6_rcv_saddr;
431
432 inet_csk(newsk)->icsk_af_ops = &dccp_ipv6_mapped;
433 newsk->sk_backlog_rcv = dccp_v4_do_rcv;
434 newnp->pktoptions = NULL;
435 newnp->opt = NULL;
436 newnp->ipv6_mc_list = NULL;
437 newnp->ipv6_ac_list = NULL;
438 newnp->ipv6_fl_list = NULL;
439 newnp->mcast_oif = inet6_iif(skb);
440 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
441
442 /*
443 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
444 * here, dccp_create_openreq_child now does this for us, see the comment in
445 * that function for the gory details. -acme
446 */
447
448 /* It is tricky place. Until this moment IPv4 tcp
449 worked with IPv6 icsk.icsk_af_ops.
450 Sync it now.
451 */
452 dccp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
453
454 return newsk;
455 }
456
457
458 if (sk_acceptq_is_full(sk))
459 goto out_overflow;
460
461 if (!dst) {
462 struct flowi6 fl6;
463
464 dst = inet6_csk_route_req(sk, &fl6, req, IPPROTO_DCCP);
465 if (!dst)
466 goto out;
467 }
468
469 newsk = dccp_create_openreq_child(sk, req, skb);
470 if (newsk == NULL)
471 goto out_nonewsk;
472
473 /*
474 * No need to charge this sock to the relevant IPv6 refcnt debug socks
475 * count here, dccp_create_openreq_child now does this for us, see the
476 * comment in that function for the gory details. -acme
477 */
478
479 ip6_dst_store(newsk, dst, NULL, NULL);
480 newsk->sk_route_caps = dst->dev->features & ~(NETIF_F_IP_CSUM |
481 NETIF_F_TSO);
482 newdp6 = (struct dccp6_sock *)newsk;
483 newinet = inet_sk(newsk);
484 newinet->pinet6 = &newdp6->inet6;
485 newnp = inet6_sk(newsk);
486
487 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
488
489 newsk->sk_v6_daddr = ireq->ir_v6_rmt_addr;
490 newnp->saddr = ireq->ir_v6_loc_addr;
491 newsk->sk_v6_rcv_saddr = ireq->ir_v6_loc_addr;
492 newsk->sk_bound_dev_if = ireq->ir_iif;
493
494 /* Now IPv6 options...
495
496 First: no IPv4 options.
497 */
498 newinet->inet_opt = NULL;
499
500 /* Clone RX bits */
501 newnp->rxopt.all = np->rxopt.all;
502
503 newnp->ipv6_mc_list = NULL;
504 newnp->ipv6_ac_list = NULL;
505 newnp->ipv6_fl_list = NULL;
506 newnp->pktoptions = NULL;
507 newnp->opt = NULL;
508 newnp->mcast_oif = inet6_iif(skb);
509 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
510
511 /*
512 * Clone native IPv6 options from listening socket (if any)
513 *
514 * Yes, keeping reference count would be much more clever, but we make
515 * one more one thing there: reattach optmem to newsk.
516 */
517 opt = ireq->ipv6_opt;
518 if (!opt)
519 opt = rcu_dereference(np->opt);
520 if (opt) {
521 opt = ipv6_dup_options(newsk, opt);
522 RCU_INIT_POINTER(newnp->opt, opt);
523 }
524 inet_csk(newsk)->icsk_ext_hdr_len = 0;
525 if (opt)
526 inet_csk(newsk)->icsk_ext_hdr_len = opt->opt_nflen +
527 opt->opt_flen;
528
529 dccp_sync_mss(newsk, dst_mtu(dst));
530
531 newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
532 newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
533
534 if (__inet_inherit_port(sk, newsk) < 0) {
535 inet_csk_prepare_forced_close(newsk);
536 dccp_done(newsk);
537 goto out;
538 }
539 *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash));
540 /* Clone pktoptions received with SYN, if we own the req */
541 if (*own_req && ireq->pktopts) {
542 newnp->pktoptions = skb_clone(ireq->pktopts, GFP_ATOMIC);
543 consume_skb(ireq->pktopts);
544 ireq->pktopts = NULL;
545 if (newnp->pktoptions)
546 skb_set_owner_r(newnp->pktoptions, newsk);
547 }
548
549 return newsk;
550
551 out_overflow:
552 __NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
553 out_nonewsk:
554 dst_release(dst);
555 out:
556 __NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENDROPS);
557 return NULL;
558 }
559
560 /* The socket must have it's spinlock held when we get
561 * here.
562 *
563 * We have a potential double-lock case here, so even when
564 * doing backlog processing we use the BH locking scheme.
565 * This is because we cannot sleep with the original spinlock
566 * held.
567 */
568 static int dccp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
569 {
570 struct ipv6_pinfo *np = inet6_sk(sk);
571 struct sk_buff *opt_skb = NULL;
572
573 /* Imagine: socket is IPv6. IPv4 packet arrives,
574 goes to IPv4 receive handler and backlogged.
575 From backlog it always goes here. Kerboom...
576 Fortunately, dccp_rcv_established and rcv_established
577 handle them correctly, but it is not case with
578 dccp_v6_hnd_req and dccp_v6_ctl_send_reset(). --ANK
579 */
580
581 if (skb->protocol == htons(ETH_P_IP))
582 return dccp_v4_do_rcv(sk, skb);
583
584 if (sk_filter(sk, skb))
585 goto discard;
586
587 /*
588 * socket locking is here for SMP purposes as backlog rcv is currently
589 * called with bh processing disabled.
590 */
591
592 /* Do Stevens' IPV6_PKTOPTIONS.
593
594 Yes, guys, it is the only place in our code, where we
595 may make it not affecting IPv4.
596 The rest of code is protocol independent,
597 and I do not like idea to uglify IPv4.
598
599 Actually, all the idea behind IPV6_PKTOPTIONS
600 looks not very well thought. For now we latch
601 options, received in the last packet, enqueued
602 by tcp. Feel free to propose better solution.
603 --ANK (980728)
604 */
605 if (np->rxopt.all)
606 opt_skb = skb_clone(skb, GFP_ATOMIC);
607
608 if (sk->sk_state == DCCP_OPEN) { /* Fast path */
609 if (dccp_rcv_established(sk, skb, dccp_hdr(skb), skb->len))
610 goto reset;
611 if (opt_skb)
612 goto ipv6_pktoptions;
613 return 0;
614 }
615
616 /*
617 * Step 3: Process LISTEN state
618 * If S.state == LISTEN,
619 * If P.type == Request or P contains a valid Init Cookie option,
620 * (* Must scan the packet's options to check for Init
621 * Cookies. Only Init Cookies are processed here,
622 * however; other options are processed in Step 8. This
623 * scan need only be performed if the endpoint uses Init
624 * Cookies *)
625 * (* Generate a new socket and switch to that socket *)
626 * Set S := new socket for this port pair
627 * S.state = RESPOND
628 * Choose S.ISS (initial seqno) or set from Init Cookies
629 * Initialize S.GAR := S.ISS
630 * Set S.ISR, S.GSR, S.SWL, S.SWH from packet or Init Cookies
631 * Continue with S.state == RESPOND
632 * (* A Response packet will be generated in Step 11 *)
633 * Otherwise,
634 * Generate Reset(No Connection) unless P.type == Reset
635 * Drop packet and return
636 *
637 * NOTE: the check for the packet types is done in
638 * dccp_rcv_state_process
639 */
640
641 if (dccp_rcv_state_process(sk, skb, dccp_hdr(skb), skb->len))
642 goto reset;
643 if (opt_skb)
644 goto ipv6_pktoptions;
645 return 0;
646
647 reset:
648 dccp_v6_ctl_send_reset(sk, skb);
649 discard:
650 if (opt_skb != NULL)
651 __kfree_skb(opt_skb);
652 kfree_skb(skb);
653 return 0;
654
655 /* Handling IPV6_PKTOPTIONS skb the similar
656 * way it's done for net/ipv6/tcp_ipv6.c
657 */
658 ipv6_pktoptions:
659 if (!((1 << sk->sk_state) & (DCCPF_CLOSED | DCCPF_LISTEN))) {
660 if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
661 np->mcast_oif = inet6_iif(opt_skb);
662 if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
663 np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit;
664 if (np->rxopt.bits.rxflow || np->rxopt.bits.rxtclass)
665 np->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(opt_skb));
666 if (np->repflow)
667 np->flow_label = ip6_flowlabel(ipv6_hdr(opt_skb));
668 if (ipv6_opt_accepted(sk, opt_skb,
669 &DCCP_SKB_CB(opt_skb)->header.h6)) {
670 skb_set_owner_r(opt_skb, sk);
671 memmove(IP6CB(opt_skb),
672 &DCCP_SKB_CB(opt_skb)->header.h6,
673 sizeof(struct inet6_skb_parm));
674 opt_skb = xchg(&np->pktoptions, opt_skb);
675 } else {
676 __kfree_skb(opt_skb);
677 opt_skb = xchg(&np->pktoptions, NULL);
678 }
679 }
680
681 kfree_skb(opt_skb);
682 return 0;
683 }
684
685 static int dccp_v6_rcv(struct sk_buff *skb)
686 {
687 const struct dccp_hdr *dh;
688 bool refcounted;
689 struct sock *sk;
690 int min_cov;
691
692 /* Step 1: Check header basics */
693
694 if (dccp_invalid_packet(skb))
695 goto discard_it;
696
697 /* Step 1: If header checksum is incorrect, drop packet and return. */
698 if (dccp_v6_csum_finish(skb, &ipv6_hdr(skb)->saddr,
699 &ipv6_hdr(skb)->daddr)) {
700 DCCP_WARN("dropped packet with invalid checksum\n");
701 goto discard_it;
702 }
703
704 dh = dccp_hdr(skb);
705
706 DCCP_SKB_CB(skb)->dccpd_seq = dccp_hdr_seq(dh);
707 DCCP_SKB_CB(skb)->dccpd_type = dh->dccph_type;
708
709 if (dccp_packet_without_ack(skb))
710 DCCP_SKB_CB(skb)->dccpd_ack_seq = DCCP_PKT_WITHOUT_ACK_SEQ;
711 else
712 DCCP_SKB_CB(skb)->dccpd_ack_seq = dccp_hdr_ack_seq(skb);
713
714 lookup:
715 sk = __inet6_lookup_skb(&dccp_hashinfo, skb, __dccp_hdr_len(dh),
716 dh->dccph_sport, dh->dccph_dport,
717 inet6_iif(skb), 0, &refcounted);
718 if (!sk) {
719 dccp_pr_debug("failed to look up flow ID in table and "
720 "get corresponding socket\n");
721 goto no_dccp_socket;
722 }
723
724 /*
725 * Step 2:
726 * ... or S.state == TIMEWAIT,
727 * Generate Reset(No Connection) unless P.type == Reset
728 * Drop packet and return
729 */
730 if (sk->sk_state == DCCP_TIME_WAIT) {
731 dccp_pr_debug("sk->sk_state == DCCP_TIME_WAIT: do_time_wait\n");
732 inet_twsk_put(inet_twsk(sk));
733 goto no_dccp_socket;
734 }
735
736 if (sk->sk_state == DCCP_NEW_SYN_RECV) {
737 struct request_sock *req = inet_reqsk(sk);
738 struct sock *nsk;
739
740 sk = req->rsk_listener;
741 if (unlikely(sk->sk_state != DCCP_LISTEN)) {
742 inet_csk_reqsk_queue_drop_and_put(sk, req);
743 goto lookup;
744 }
745 sock_hold(sk);
746 refcounted = true;
747 nsk = dccp_check_req(sk, skb, req);
748 if (!nsk) {
749 reqsk_put(req);
750 goto discard_and_relse;
751 }
752 if (nsk == sk) {
753 reqsk_put(req);
754 } else if (dccp_child_process(sk, nsk, skb)) {
755 dccp_v6_ctl_send_reset(sk, skb);
756 goto discard_and_relse;
757 } else {
758 sock_put(sk);
759 return 0;
760 }
761 }
762 /*
763 * RFC 4340, sec. 9.2.1: Minimum Checksum Coverage
764 * o if MinCsCov = 0, only packets with CsCov = 0 are accepted
765 * o if MinCsCov > 0, also accept packets with CsCov >= MinCsCov
766 */
767 min_cov = dccp_sk(sk)->dccps_pcrlen;
768 if (dh->dccph_cscov && (min_cov == 0 || dh->dccph_cscov < min_cov)) {
769 dccp_pr_debug("Packet CsCov %d does not satisfy MinCsCov %d\n",
770 dh->dccph_cscov, min_cov);
771 /* FIXME: send Data Dropped option (see also dccp_v4_rcv) */
772 goto discard_and_relse;
773 }
774
775 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
776 goto discard_and_relse;
777
778 return __sk_receive_skb(sk, skb, 1, dh->dccph_doff * 4,
779 refcounted) ? -1 : 0;
780
781 no_dccp_socket:
782 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
783 goto discard_it;
784 /*
785 * Step 2:
786 * If no socket ...
787 * Generate Reset(No Connection) unless P.type == Reset
788 * Drop packet and return
789 */
790 if (dh->dccph_type != DCCP_PKT_RESET) {
791 DCCP_SKB_CB(skb)->dccpd_reset_code =
792 DCCP_RESET_CODE_NO_CONNECTION;
793 dccp_v6_ctl_send_reset(sk, skb);
794 }
795
796 discard_it:
797 kfree_skb(skb);
798 return 0;
799
800 discard_and_relse:
801 if (refcounted)
802 sock_put(sk);
803 goto discard_it;
804 }
805
806 static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
807 int addr_len)
808 {
809 struct sockaddr_in6 *usin = (struct sockaddr_in6 *)uaddr;
810 struct inet_connection_sock *icsk = inet_csk(sk);
811 struct inet_sock *inet = inet_sk(sk);
812 struct ipv6_pinfo *np = inet6_sk(sk);
813 struct dccp_sock *dp = dccp_sk(sk);
814 struct in6_addr *saddr = NULL, *final_p, final;
815 struct ipv6_txoptions *opt;
816 struct flowi6 fl6;
817 struct dst_entry *dst;
818 int addr_type;
819 int err;
820
821 dp->dccps_role = DCCP_ROLE_CLIENT;
822
823 if (addr_len < SIN6_LEN_RFC2133)
824 return -EINVAL;
825
826 if (usin->sin6_family != AF_INET6)
827 return -EAFNOSUPPORT;
828
829 memset(&fl6, 0, sizeof(fl6));
830
831 if (np->sndflow) {
832 fl6.flowlabel = usin->sin6_flowinfo & IPV6_FLOWINFO_MASK;
833 IP6_ECN_flow_init(fl6.flowlabel);
834 if (fl6.flowlabel & IPV6_FLOWLABEL_MASK) {
835 struct ip6_flowlabel *flowlabel;
836 flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
837 if (flowlabel == NULL)
838 return -EINVAL;
839 fl6_sock_release(flowlabel);
840 }
841 }
842 /*
843 * connect() to INADDR_ANY means loopback (BSD'ism).
844 */
845 if (ipv6_addr_any(&usin->sin6_addr))
846 usin->sin6_addr.s6_addr[15] = 1;
847
848 addr_type = ipv6_addr_type(&usin->sin6_addr);
849
850 if (addr_type & IPV6_ADDR_MULTICAST)
851 return -ENETUNREACH;
852
853 if (addr_type & IPV6_ADDR_LINKLOCAL) {
854 if (addr_len >= sizeof(struct sockaddr_in6) &&
855 usin->sin6_scope_id) {
856 /* If interface is set while binding, indices
857 * must coincide.
858 */
859 if (sk->sk_bound_dev_if &&
860 sk->sk_bound_dev_if != usin->sin6_scope_id)
861 return -EINVAL;
862
863 sk->sk_bound_dev_if = usin->sin6_scope_id;
864 }
865
866 /* Connect to link-local address requires an interface */
867 if (!sk->sk_bound_dev_if)
868 return -EINVAL;
869 }
870
871 sk->sk_v6_daddr = usin->sin6_addr;
872 np->flow_label = fl6.flowlabel;
873
874 /*
875 * DCCP over IPv4
876 */
877 if (addr_type == IPV6_ADDR_MAPPED) {
878 u32 exthdrlen = icsk->icsk_ext_hdr_len;
879 struct sockaddr_in sin;
880
881 SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
882
883 if (__ipv6_only_sock(sk))
884 return -ENETUNREACH;
885
886 sin.sin_family = AF_INET;
887 sin.sin_port = usin->sin6_port;
888 sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
889
890 icsk->icsk_af_ops = &dccp_ipv6_mapped;
891 sk->sk_backlog_rcv = dccp_v4_do_rcv;
892
893 err = dccp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
894 if (err) {
895 icsk->icsk_ext_hdr_len = exthdrlen;
896 icsk->icsk_af_ops = &dccp_ipv6_af_ops;
897 sk->sk_backlog_rcv = dccp_v6_do_rcv;
898 goto failure;
899 }
900 np->saddr = sk->sk_v6_rcv_saddr;
901 return err;
902 }
903
904 if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr))
905 saddr = &sk->sk_v6_rcv_saddr;
906
907 fl6.flowi6_proto = IPPROTO_DCCP;
908 fl6.daddr = sk->sk_v6_daddr;
909 fl6.saddr = saddr ? *saddr : np->saddr;
910 fl6.flowi6_oif = sk->sk_bound_dev_if;
911 fl6.fl6_dport = usin->sin6_port;
912 fl6.fl6_sport = inet->inet_sport;
913 security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
914
915 opt = rcu_dereference_protected(np->opt, lockdep_sock_is_held(sk));
916 final_p = fl6_update_dst(&fl6, opt, &final);
917
918 dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
919 if (IS_ERR(dst)) {
920 err = PTR_ERR(dst);
921 goto failure;
922 }
923
924 if (saddr == NULL) {
925 saddr = &fl6.saddr;
926 sk->sk_v6_rcv_saddr = *saddr;
927 }
928
929 /* set the source address */
930 np->saddr = *saddr;
931 inet->inet_rcv_saddr = LOOPBACK4_IPV6;
932
933 ip6_dst_store(sk, dst, NULL, NULL);
934
935 icsk->icsk_ext_hdr_len = 0;
936 if (opt)
937 icsk->icsk_ext_hdr_len = opt->opt_flen + opt->opt_nflen;
938
939 inet->inet_dport = usin->sin6_port;
940
941 dccp_set_state(sk, DCCP_REQUESTING);
942 err = inet6_hash_connect(&dccp_death_row, sk);
943 if (err)
944 goto late_failure;
945
946 dp->dccps_iss = secure_dccpv6_sequence_number(np->saddr.s6_addr32,
947 sk->sk_v6_daddr.s6_addr32,
948 inet->inet_sport,
949 inet->inet_dport);
950 err = dccp_connect(sk);
951 if (err)
952 goto late_failure;
953
954 return 0;
955
956 late_failure:
957 dccp_set_state(sk, DCCP_CLOSED);
958 __sk_dst_reset(sk);
959 failure:
960 inet->inet_dport = 0;
961 sk->sk_route_caps = 0;
962 return err;
963 }
964
965 static const struct inet_connection_sock_af_ops dccp_ipv6_af_ops = {
966 .queue_xmit = inet6_csk_xmit,
967 .send_check = dccp_v6_send_check,
968 .rebuild_header = inet6_sk_rebuild_header,
969 .conn_request = dccp_v6_conn_request,
970 .syn_recv_sock = dccp_v6_request_recv_sock,
971 .net_header_len = sizeof(struct ipv6hdr),
972 .setsockopt = ipv6_setsockopt,
973 .getsockopt = ipv6_getsockopt,
974 .addr2sockaddr = inet6_csk_addr2sockaddr,
975 .sockaddr_len = sizeof(struct sockaddr_in6),
976 #ifdef CONFIG_COMPAT
977 .compat_setsockopt = compat_ipv6_setsockopt,
978 .compat_getsockopt = compat_ipv6_getsockopt,
979 #endif
980 };
981
982 /*
983 * DCCP over IPv4 via INET6 API
984 */
985 static const struct inet_connection_sock_af_ops dccp_ipv6_mapped = {
986 .queue_xmit = ip_queue_xmit,
987 .send_check = dccp_v4_send_check,
988 .rebuild_header = inet_sk_rebuild_header,
989 .conn_request = dccp_v6_conn_request,
990 .syn_recv_sock = dccp_v6_request_recv_sock,
991 .net_header_len = sizeof(struct iphdr),
992 .setsockopt = ipv6_setsockopt,
993 .getsockopt = ipv6_getsockopt,
994 .addr2sockaddr = inet6_csk_addr2sockaddr,
995 .sockaddr_len = sizeof(struct sockaddr_in6),
996 #ifdef CONFIG_COMPAT
997 .compat_setsockopt = compat_ipv6_setsockopt,
998 .compat_getsockopt = compat_ipv6_getsockopt,
999 #endif
1000 };
1001
1002 /* NOTE: A lot of things set to zero explicitly by call to
1003 * sk_alloc() so need not be done here.
1004 */
1005 static int dccp_v6_init_sock(struct sock *sk)
1006 {
1007 static __u8 dccp_v6_ctl_sock_initialized;
1008 int err = dccp_init_sock(sk, dccp_v6_ctl_sock_initialized);
1009
1010 if (err == 0) {
1011 if (unlikely(!dccp_v6_ctl_sock_initialized))
1012 dccp_v6_ctl_sock_initialized = 1;
1013 inet_csk(sk)->icsk_af_ops = &dccp_ipv6_af_ops;
1014 }
1015
1016 return err;
1017 }
1018
1019 static void dccp_v6_destroy_sock(struct sock *sk)
1020 {
1021 dccp_destroy_sock(sk);
1022 inet6_destroy_sock(sk);
1023 }
1024
1025 static struct timewait_sock_ops dccp6_timewait_sock_ops = {
1026 .twsk_obj_size = sizeof(struct dccp6_timewait_sock),
1027 };
1028
1029 static struct proto dccp_v6_prot = {
1030 .name = "DCCPv6",
1031 .owner = THIS_MODULE,
1032 .close = dccp_close,
1033 .connect = dccp_v6_connect,
1034 .disconnect = dccp_disconnect,
1035 .ioctl = dccp_ioctl,
1036 .init = dccp_v6_init_sock,
1037 .setsockopt = dccp_setsockopt,
1038 .getsockopt = dccp_getsockopt,
1039 .sendmsg = dccp_sendmsg,
1040 .recvmsg = dccp_recvmsg,
1041 .backlog_rcv = dccp_v6_do_rcv,
1042 .hash = inet6_hash,
1043 .unhash = inet_unhash,
1044 .accept = inet_csk_accept,
1045 .get_port = inet_csk_get_port,
1046 .shutdown = dccp_shutdown,
1047 .destroy = dccp_v6_destroy_sock,
1048 .orphan_count = &dccp_orphan_count,
1049 .max_header = MAX_DCCP_HEADER,
1050 .obj_size = sizeof(struct dccp6_sock),
1051 .slab_flags = SLAB_TYPESAFE_BY_RCU,
1052 .rsk_prot = &dccp6_request_sock_ops,
1053 .twsk_prot = &dccp6_timewait_sock_ops,
1054 .h.hashinfo = &dccp_hashinfo,
1055 #ifdef CONFIG_COMPAT
1056 .compat_setsockopt = compat_dccp_setsockopt,
1057 .compat_getsockopt = compat_dccp_getsockopt,
1058 #endif
1059 };
1060
1061 static const struct inet6_protocol dccp_v6_protocol = {
1062 .handler = dccp_v6_rcv,
1063 .err_handler = dccp_v6_err,
1064 .flags = INET6_PROTO_NOPOLICY | INET6_PROTO_FINAL,
1065 };
1066
1067 static const struct proto_ops inet6_dccp_ops = {
1068 .family = PF_INET6,
1069 .owner = THIS_MODULE,
1070 .release = inet6_release,
1071 .bind = inet6_bind,
1072 .connect = inet_stream_connect,
1073 .socketpair = sock_no_socketpair,
1074 .accept = inet_accept,
1075 .getname = inet6_getname,
1076 .poll = dccp_poll,
1077 .ioctl = inet6_ioctl,
1078 .listen = inet_dccp_listen,
1079 .shutdown = inet_shutdown,
1080 .setsockopt = sock_common_setsockopt,
1081 .getsockopt = sock_common_getsockopt,
1082 .sendmsg = inet_sendmsg,
1083 .recvmsg = sock_common_recvmsg,
1084 .mmap = sock_no_mmap,
1085 .sendpage = sock_no_sendpage,
1086 #ifdef CONFIG_COMPAT
1087 .compat_setsockopt = compat_sock_common_setsockopt,
1088 .compat_getsockopt = compat_sock_common_getsockopt,
1089 #endif
1090 };
1091
1092 static struct inet_protosw dccp_v6_protosw = {
1093 .type = SOCK_DCCP,
1094 .protocol = IPPROTO_DCCP,
1095 .prot = &dccp_v6_prot,
1096 .ops = &inet6_dccp_ops,
1097 .flags = INET_PROTOSW_ICSK,
1098 };
1099
1100 static int __net_init dccp_v6_init_net(struct net *net)
1101 {
1102 if (dccp_hashinfo.bhash == NULL)
1103 return -ESOCKTNOSUPPORT;
1104
1105 return inet_ctl_sock_create(&net->dccp.v6_ctl_sk, PF_INET6,
1106 SOCK_DCCP, IPPROTO_DCCP, net);
1107 }
1108
1109 static void __net_exit dccp_v6_exit_net(struct net *net)
1110 {
1111 inet_ctl_sock_destroy(net->dccp.v6_ctl_sk);
1112 }
1113
1114 static void __net_exit dccp_v6_exit_batch(struct list_head *net_exit_list)
1115 {
1116 inet_twsk_purge(&dccp_hashinfo, AF_INET6);
1117 }
1118
1119 static struct pernet_operations dccp_v6_ops = {
1120 .init = dccp_v6_init_net,
1121 .exit = dccp_v6_exit_net,
1122 .exit_batch = dccp_v6_exit_batch,
1123 };
1124
1125 static int __init dccp_v6_init(void)
1126 {
1127 int err = proto_register(&dccp_v6_prot, 1);
1128
1129 if (err)
1130 goto out;
1131
1132 inet6_register_protosw(&dccp_v6_protosw);
1133
1134 err = register_pernet_subsys(&dccp_v6_ops);
1135 if (err)
1136 goto out_destroy_ctl_sock;
1137
1138 err = inet6_add_protocol(&dccp_v6_protocol, IPPROTO_DCCP);
1139 if (err)
1140 goto out_unregister_proto;
1141
1142 out:
1143 return err;
1144 out_unregister_proto:
1145 unregister_pernet_subsys(&dccp_v6_ops);
1146 out_destroy_ctl_sock:
1147 inet6_unregister_protosw(&dccp_v6_protosw);
1148 proto_unregister(&dccp_v6_prot);
1149 goto out;
1150 }
1151
1152 static void __exit dccp_v6_exit(void)
1153 {
1154 inet6_del_protocol(&dccp_v6_protocol, IPPROTO_DCCP);
1155 unregister_pernet_subsys(&dccp_v6_ops);
1156 inet6_unregister_protosw(&dccp_v6_protosw);
1157 proto_unregister(&dccp_v6_prot);
1158 }
1159
1160 module_init(dccp_v6_init);
1161 module_exit(dccp_v6_exit);
1162
1163 /*
1164 * __stringify doesn't likes enums, so use SOCK_DCCP (6) and IPPROTO_DCCP (33)
1165 * values directly, Also cover the case where the protocol is not specified,
1166 * i.e. net-pf-PF_INET6-proto-0-type-SOCK_DCCP
1167 */
1168 MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_INET6, 33, 6);
1169 MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_INET6, 0, 6);
1170 MODULE_LICENSE("GPL");
1171 MODULE_AUTHOR("Arnaldo Carvalho de Melo <acme@mandriva.com>");
1172 MODULE_DESCRIPTION("DCCPv6 - Datagram Congestion Controlled Protocol");