]> git.ipfire.org Git - thirdparty/kernel/linux.git/blame - net/ipv6/tcp_ipv6.c
net: qcom/emac: Add missing of_node_put()
[thirdparty/kernel/linux.git] / net / ipv6 / tcp_ipv6.c
CommitLineData
1da177e4
LT
1/*
2 * TCP over IPv6
1ab1457c 3 * Linux INET6 implementation
1da177e4
LT
4 *
5 * Authors:
1ab1457c 6 * Pedro Roque <roque@di.fc.ul.pt>
1da177e4 7 *
1ab1457c 8 * Based on:
1da177e4
LT
9 * linux/net/ipv4/tcp.c
10 * linux/net/ipv4/tcp_input.c
11 * linux/net/ipv4/tcp_output.c
12 *
13 * Fixes:
14 * Hideaki YOSHIFUJI : sin6_scope_id support
15 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
16 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
17 * a single port at the same time.
18 * YOSHIFUJI Hideaki @USAGI: convert /proc/net/tcp6 to seq_file.
19 *
20 * This program is free software; you can redistribute it and/or
21 * modify it under the terms of the GNU General Public License
22 * as published by the Free Software Foundation; either version
23 * 2 of the License, or (at your option) any later version.
24 */
25
eb4dea58 26#include <linux/bottom_half.h>
1da177e4 27#include <linux/module.h>
1da177e4
LT
28#include <linux/errno.h>
29#include <linux/types.h>
30#include <linux/socket.h>
31#include <linux/sockios.h>
32#include <linux/net.h>
33#include <linux/jiffies.h>
34#include <linux/in.h>
35#include <linux/in6.h>
36#include <linux/netdevice.h>
37#include <linux/init.h>
38#include <linux/jhash.h>
39#include <linux/ipsec.h>
40#include <linux/times.h>
5a0e3ad6 41#include <linux/slab.h>
4aa956d8 42#include <linux/uaccess.h>
1da177e4
LT
43#include <linux/ipv6.h>
44#include <linux/icmpv6.h>
45#include <linux/random.h>
46
47#include <net/tcp.h>
48#include <net/ndisc.h>
5324a040 49#include <net/inet6_hashtables.h>
8129765a 50#include <net/inet6_connection_sock.h>
1da177e4
LT
51#include <net/ipv6.h>
52#include <net/transp_v6.h>
53#include <net/addrconf.h>
54#include <net/ip6_route.h>
55#include <net/ip6_checksum.h>
56#include <net/inet_ecn.h>
57#include <net/protocol.h>
58#include <net/xfrm.h>
1da177e4
LT
59#include <net/snmp.h>
60#include <net/dsfield.h>
6d6ee43e 61#include <net/timewait_sock.h>
3d58b5fa 62#include <net/inet_common.h>
6e5714ea 63#include <net/secure_seq.h>
076bb0c8 64#include <net/busy_poll.h>
1da177e4 65
1da177e4
LT
66#include <linux/proc_fs.h>
67#include <linux/seq_file.h>
68
cf80e0e4 69#include <crypto/hash.h>
cfb6eeb4
YH
70#include <linux/scatterlist.h>
71
c24b14c4
SL
72#include <trace/events/tcp.h>
73
a00e7444
ED
74static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb);
75static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
6edafaaf 76 struct request_sock *req);
1da177e4
LT
77
78static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
1da177e4 79
3b401a81
SH
80static const struct inet_connection_sock_af_ops ipv6_mapped;
81static const struct inet_connection_sock_af_ops ipv6_specific;
a928630a 82#ifdef CONFIG_TCP_MD5SIG
b2e4b3de
SH
83static const struct tcp_sock_af_ops tcp_sock_ipv6_specific;
84static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
9501f972 85#else
51723935 86static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(const struct sock *sk,
b71d1d42 87 const struct in6_addr *addr)
9501f972
YH
88{
89 return NULL;
90}
a928630a 91#endif
1da177e4 92
fae6ef87
NC
93static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
94{
95 struct dst_entry *dst = skb_dst(skb);
fae6ef87 96
5037e9ef 97 if (dst && dst_hold_safe(dst)) {
ca777eff
ED
98 const struct rt6_info *rt = (const struct rt6_info *)dst;
99
ca777eff
ED
100 sk->sk_rx_dst = dst;
101 inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
b197df4f 102 inet6_sk(sk)->rx_dst_cookie = rt6_get_cookie(rt);
ca777eff 103 }
fae6ef87
NC
104}
105
84b114b9 106static u32 tcp_v6_init_seq(const struct sk_buff *skb)
1da177e4 107{
84b114b9
ED
108 return secure_tcpv6_seq(ipv6_hdr(skb)->daddr.s6_addr32,
109 ipv6_hdr(skb)->saddr.s6_addr32,
110 tcp_hdr(skb)->dest,
111 tcp_hdr(skb)->source);
112}
113
5d2ed052 114static u32 tcp_v6_init_ts_off(const struct net *net, const struct sk_buff *skb)
84b114b9 115{
5d2ed052 116 return secure_tcpv6_ts_off(net, ipv6_hdr(skb)->daddr.s6_addr32,
84b114b9 117 ipv6_hdr(skb)->saddr.s6_addr32);
1da177e4
LT
118}
119
d74bad4e
AI
120static int tcp_v6_pre_connect(struct sock *sk, struct sockaddr *uaddr,
121 int addr_len)
122{
123 /* This check is replicated from tcp_v6_connect() and intended to
124 * prevent BPF program called below from accessing bytes that are out
125 * of the bound specified by user in addr_len.
126 */
127 if (addr_len < SIN6_LEN_RFC2133)
128 return -EINVAL;
129
130 sock_owned_by_me(sk);
131
132 return BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr);
133}
134
1ab1457c 135static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
1da177e4
LT
136 int addr_len)
137{
138 struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
1ab1457c 139 struct inet_sock *inet = inet_sk(sk);
d83d8461 140 struct inet_connection_sock *icsk = inet_csk(sk);
1da177e4
LT
141 struct ipv6_pinfo *np = inet6_sk(sk);
142 struct tcp_sock *tp = tcp_sk(sk);
20c59de2 143 struct in6_addr *saddr = NULL, *final_p, final;
45f6fad8 144 struct ipv6_txoptions *opt;
4c9483b2 145 struct flowi6 fl6;
1da177e4
LT
146 struct dst_entry *dst;
147 int addr_type;
148 int err;
1946e672 149 struct inet_timewait_death_row *tcp_death_row = &sock_net(sk)->ipv4.tcp_death_row;
1da177e4 150
1ab1457c 151 if (addr_len < SIN6_LEN_RFC2133)
1da177e4
LT
152 return -EINVAL;
153
1ab1457c 154 if (usin->sin6_family != AF_INET6)
a02cec21 155 return -EAFNOSUPPORT;
1da177e4 156
4c9483b2 157 memset(&fl6, 0, sizeof(fl6));
1da177e4
LT
158
159 if (np->sndflow) {
4c9483b2
DM
160 fl6.flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
161 IP6_ECN_flow_init(fl6.flowlabel);
162 if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
1da177e4 163 struct ip6_flowlabel *flowlabel;
4c9483b2 164 flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
63159f29 165 if (!flowlabel)
1da177e4 166 return -EINVAL;
1da177e4
LT
167 fl6_sock_release(flowlabel);
168 }
169 }
170
171 /*
1ab1457c
YH
172 * connect() to INADDR_ANY means loopback (BSD'ism).
173 */
174
052d2369
JL
175 if (ipv6_addr_any(&usin->sin6_addr)) {
176 if (ipv6_addr_v4mapped(&sk->sk_v6_rcv_saddr))
177 ipv6_addr_set_v4mapped(htonl(INADDR_LOOPBACK),
178 &usin->sin6_addr);
179 else
180 usin->sin6_addr = in6addr_loopback;
181 }
1da177e4
LT
182
183 addr_type = ipv6_addr_type(&usin->sin6_addr);
184
4c99aa40 185 if (addr_type & IPV6_ADDR_MULTICAST)
1da177e4
LT
186 return -ENETUNREACH;
187
188 if (addr_type&IPV6_ADDR_LINKLOCAL) {
189 if (addr_len >= sizeof(struct sockaddr_in6) &&
190 usin->sin6_scope_id) {
191 /* If interface is set while binding, indices
192 * must coincide.
193 */
54dc3e33 194 if (!sk_dev_equal_l3scope(sk, usin->sin6_scope_id))
1da177e4
LT
195 return -EINVAL;
196
197 sk->sk_bound_dev_if = usin->sin6_scope_id;
198 }
199
200 /* Connect to link-local address requires an interface */
201 if (!sk->sk_bound_dev_if)
202 return -EINVAL;
203 }
204
205 if (tp->rx_opt.ts_recent_stamp &&
efe4208f 206 !ipv6_addr_equal(&sk->sk_v6_daddr, &usin->sin6_addr)) {
1da177e4
LT
207 tp->rx_opt.ts_recent = 0;
208 tp->rx_opt.ts_recent_stamp = 0;
209 tp->write_seq = 0;
210 }
211
efe4208f 212 sk->sk_v6_daddr = usin->sin6_addr;
4c9483b2 213 np->flow_label = fl6.flowlabel;
1da177e4
LT
214
215 /*
216 * TCP over IPv4
217 */
218
052d2369 219 if (addr_type & IPV6_ADDR_MAPPED) {
d83d8461 220 u32 exthdrlen = icsk->icsk_ext_hdr_len;
1da177e4
LT
221 struct sockaddr_in sin;
222
223 SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
224
225 if (__ipv6_only_sock(sk))
226 return -ENETUNREACH;
227
228 sin.sin_family = AF_INET;
229 sin.sin_port = usin->sin6_port;
230 sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
231
d83d8461 232 icsk->icsk_af_ops = &ipv6_mapped;
1da177e4 233 sk->sk_backlog_rcv = tcp_v4_do_rcv;
cfb6eeb4
YH
234#ifdef CONFIG_TCP_MD5SIG
235 tp->af_specific = &tcp_sock_ipv6_mapped_specific;
236#endif
1da177e4
LT
237
238 err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
239
240 if (err) {
d83d8461
ACM
241 icsk->icsk_ext_hdr_len = exthdrlen;
242 icsk->icsk_af_ops = &ipv6_specific;
1da177e4 243 sk->sk_backlog_rcv = tcp_v6_do_rcv;
cfb6eeb4
YH
244#ifdef CONFIG_TCP_MD5SIG
245 tp->af_specific = &tcp_sock_ipv6_specific;
246#endif
1da177e4 247 goto failure;
1da177e4 248 }
d1e559d0 249 np->saddr = sk->sk_v6_rcv_saddr;
1da177e4
LT
250
251 return err;
252 }
253
efe4208f
ED
254 if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr))
255 saddr = &sk->sk_v6_rcv_saddr;
1da177e4 256
4c9483b2 257 fl6.flowi6_proto = IPPROTO_TCP;
efe4208f 258 fl6.daddr = sk->sk_v6_daddr;
4e3fd7a0 259 fl6.saddr = saddr ? *saddr : np->saddr;
4c9483b2
DM
260 fl6.flowi6_oif = sk->sk_bound_dev_if;
261 fl6.flowi6_mark = sk->sk_mark;
1958b856
DM
262 fl6.fl6_dport = usin->sin6_port;
263 fl6.fl6_sport = inet->inet_sport;
e2d118a1 264 fl6.flowi6_uid = sk->sk_uid;
1da177e4 265
1e1d04e6 266 opt = rcu_dereference_protected(np->opt, lockdep_sock_is_held(sk));
45f6fad8 267 final_p = fl6_update_dst(&fl6, opt, &final);
1da177e4 268
4c9483b2 269 security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
beb8d13b 270
0e0d44ab 271 dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
68d0c6d3
DM
272 if (IS_ERR(dst)) {
273 err = PTR_ERR(dst);
1da177e4 274 goto failure;
14e50e57 275 }
1da177e4 276
63159f29 277 if (!saddr) {
4c9483b2 278 saddr = &fl6.saddr;
efe4208f 279 sk->sk_v6_rcv_saddr = *saddr;
1da177e4
LT
280 }
281
282 /* set the source address */
4e3fd7a0 283 np->saddr = *saddr;
c720c7e8 284 inet->inet_rcv_saddr = LOOPBACK4_IPV6;
1da177e4 285
f83ef8c0 286 sk->sk_gso_type = SKB_GSO_TCPV6;
6bd4f355 287 ip6_dst_store(sk, dst, NULL, NULL);
1da177e4 288
d83d8461 289 icsk->icsk_ext_hdr_len = 0;
45f6fad8
ED
290 if (opt)
291 icsk->icsk_ext_hdr_len = opt->opt_flen +
292 opt->opt_nflen;
1da177e4
LT
293
294 tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
295
c720c7e8 296 inet->inet_dport = usin->sin6_port;
1da177e4
LT
297
298 tcp_set_state(sk, TCP_SYN_SENT);
1946e672 299 err = inet6_hash_connect(tcp_death_row, sk);
1da177e4
LT
300 if (err)
301 goto late_failure;
302
877d1f62 303 sk_set_txhash(sk);
9e7ceb06 304
00355fa5 305 if (likely(!tp->repair)) {
00355fa5 306 if (!tp->write_seq)
84b114b9
ED
307 tp->write_seq = secure_tcpv6_seq(np->saddr.s6_addr32,
308 sk->sk_v6_daddr.s6_addr32,
309 inet->inet_sport,
310 inet->inet_dport);
5d2ed052
ED
311 tp->tsoffset = secure_tcpv6_ts_off(sock_net(sk),
312 np->saddr.s6_addr32,
84b114b9 313 sk->sk_v6_daddr.s6_addr32);
00355fa5 314 }
1da177e4 315
19f6d3f3
WW
316 if (tcp_fastopen_defer_connect(sk, &err))
317 return err;
318 if (err)
319 goto late_failure;
320
1da177e4
LT
321 err = tcp_connect(sk);
322 if (err)
323 goto late_failure;
324
325 return 0;
326
327late_failure:
328 tcp_set_state(sk, TCP_CLOSE);
1da177e4 329failure:
c720c7e8 330 inet->inet_dport = 0;
1da177e4
LT
331 sk->sk_route_caps = 0;
332 return err;
333}
334
563d34d0
ED
335static void tcp_v6_mtu_reduced(struct sock *sk)
336{
337 struct dst_entry *dst;
338
339 if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
340 return;
341
342 dst = inet6_csk_update_pmtu(sk, tcp_sk(sk)->mtu_info);
343 if (!dst)
344 return;
345
346 if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
347 tcp_sync_mss(sk, dst_mtu(dst));
348 tcp_simple_retransmit(sk);
349 }
350}
351
1da177e4 352static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
d5fdd6ba 353 u8 type, u8 code, int offset, __be32 info)
1da177e4 354{
4c99aa40 355 const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
505cbfc5 356 const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
2215089b
ED
357 struct net *net = dev_net(skb->dev);
358 struct request_sock *fastopen;
1da177e4 359 struct ipv6_pinfo *np;
1ab1457c 360 struct tcp_sock *tp;
0a672f74 361 __u32 seq, snd_una;
2215089b 362 struct sock *sk;
9cf74903 363 bool fatal;
2215089b 364 int err;
1da177e4 365
2215089b
ED
366 sk = __inet6_lookup_established(net, &tcp_hashinfo,
367 &hdr->daddr, th->dest,
368 &hdr->saddr, ntohs(th->source),
4297a0ef 369 skb->dev->ifindex, inet6_sdif(skb));
1da177e4 370
2215089b 371 if (!sk) {
a16292a0
ED
372 __ICMP6_INC_STATS(net, __in6_dev_get(skb->dev),
373 ICMP6_MIB_INERRORS);
1da177e4
LT
374 return;
375 }
376
377 if (sk->sk_state == TCP_TIME_WAIT) {
9469c7b4 378 inet_twsk_put(inet_twsk(sk));
1da177e4
LT
379 return;
380 }
2215089b 381 seq = ntohl(th->seq);
9cf74903 382 fatal = icmpv6_err_convert(type, code, &err);
2215089b 383 if (sk->sk_state == TCP_NEW_SYN_RECV)
9cf74903 384 return tcp_req_err(sk, seq, fatal);
1da177e4
LT
385
386 bh_lock_sock(sk);
563d34d0 387 if (sock_owned_by_user(sk) && type != ICMPV6_PKT_TOOBIG)
02a1d6e7 388 __NET_INC_STATS(net, LINUX_MIB_LOCKDROPPEDICMPS);
1da177e4
LT
389
390 if (sk->sk_state == TCP_CLOSE)
391 goto out;
392
e802af9c 393 if (ipv6_hdr(skb)->hop_limit < inet6_sk(sk)->min_hopcount) {
02a1d6e7 394 __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
e802af9c
SH
395 goto out;
396 }
397
1da177e4 398 tp = tcp_sk(sk);
0a672f74
YC
399 /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
400 fastopen = tp->fastopen_rsk;
401 snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
1da177e4 402 if (sk->sk_state != TCP_LISTEN &&
0a672f74 403 !between(seq, snd_una, tp->snd_nxt)) {
02a1d6e7 404 __NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
1da177e4
LT
405 goto out;
406 }
407
408 np = inet6_sk(sk);
409
ec18d9a2 410 if (type == NDISC_REDIRECT) {
45caeaa5
JM
411 if (!sock_owned_by_user(sk)) {
412 struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie);
ec18d9a2 413
45caeaa5
JM
414 if (dst)
415 dst->ops->redirect(dst, sk, skb);
416 }
50a75a89 417 goto out;
ec18d9a2
DM
418 }
419
1da177e4 420 if (type == ICMPV6_PKT_TOOBIG) {
0d4f0608
ED
421 /* We are not interested in TCP_LISTEN and open_requests
422 * (SYN-ACKs send out by Linux are always <576bytes so
423 * they should go through unfragmented).
424 */
425 if (sk->sk_state == TCP_LISTEN)
426 goto out;
427
93b36cf3
HFS
428 if (!ip6_sk_accept_pmtu(sk))
429 goto out;
430
563d34d0
ED
431 tp->mtu_info = ntohl(info);
432 if (!sock_owned_by_user(sk))
433 tcp_v6_mtu_reduced(sk);
d013ef2a 434 else if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED,
7aa5470c 435 &sk->sk_tsq_flags))
d013ef2a 436 sock_hold(sk);
1da177e4
LT
437 goto out;
438 }
439
1da177e4 440
60236fdd 441 /* Might be for an request_sock */
1da177e4 442 switch (sk->sk_state) {
1da177e4 443 case TCP_SYN_SENT:
0a672f74
YC
444 case TCP_SYN_RECV:
445 /* Only in fast or simultaneous open. If a fast open socket is
446 * is already accepted it is treated as a connected one below.
447 */
63159f29 448 if (fastopen && !fastopen->sk)
0a672f74
YC
449 break;
450
1da177e4 451 if (!sock_owned_by_user(sk)) {
1da177e4
LT
452 sk->sk_err = err;
453 sk->sk_error_report(sk); /* Wake people up to see the error (see connect in sock.c) */
454
455 tcp_done(sk);
456 } else
457 sk->sk_err_soft = err;
458 goto out;
459 }
460
461 if (!sock_owned_by_user(sk) && np->recverr) {
462 sk->sk_err = err;
463 sk->sk_error_report(sk);
464 } else
465 sk->sk_err_soft = err;
466
467out:
468 bh_unlock_sock(sk);
469 sock_put(sk);
470}
471
472
0f935dbe 473static int tcp_v6_send_synack(const struct sock *sk, struct dst_entry *dst,
d6274bd8 474 struct flowi *fl,
3840a06e 475 struct request_sock *req,
ca6fb065 476 struct tcp_fastopen_cookie *foc,
b3d05147 477 enum tcp_synack_type synack_type)
1da177e4 478{
634fb979 479 struct inet_request_sock *ireq = inet_rsk(req);
1da177e4 480 struct ipv6_pinfo *np = inet6_sk(sk);
56ac42bc 481 struct ipv6_txoptions *opt;
d6274bd8 482 struct flowi6 *fl6 = &fl->u.ip6;
4c99aa40 483 struct sk_buff *skb;
9494218f 484 int err = -ENOMEM;
1da177e4 485
9f10d3f6 486 /* First, grab a route. */
f76b33c3
ED
487 if (!dst && (dst = inet6_csk_route_req(sk, fl6, req,
488 IPPROTO_TCP)) == NULL)
fd80eb94 489 goto done;
9494218f 490
b3d05147 491 skb = tcp_make_synack(sk, dst, req, foc, synack_type);
9494218f 492
1da177e4 493 if (skb) {
634fb979
ED
494 __tcp_v6_send_check(skb, &ireq->ir_v6_loc_addr,
495 &ireq->ir_v6_rmt_addr);
1da177e4 496
634fb979 497 fl6->daddr = ireq->ir_v6_rmt_addr;
53b24b8f 498 if (np->repflow && ireq->pktopts)
df3687ff
FF
499 fl6->flowlabel = ip6_flowlabel(ipv6_hdr(ireq->pktopts));
500
3e4006f0 501 rcu_read_lock();
56ac42bc
HD
502 opt = ireq->ipv6_opt;
503 if (!opt)
504 opt = rcu_dereference(np->opt);
92e55f41 505 err = ip6_xmit(sk, skb, fl6, sk->sk_mark, opt, np->tclass);
3e4006f0 506 rcu_read_unlock();
b9df3cb8 507 err = net_xmit_eval(err);
1da177e4
LT
508 }
509
510done:
1da177e4
LT
511 return err;
512}
513
72659ecc 514
60236fdd 515static void tcp_v6_reqsk_destructor(struct request_sock *req)
1da177e4 516{
56ac42bc 517 kfree(inet_rsk(req)->ipv6_opt);
634fb979 518 kfree_skb(inet_rsk(req)->pktopts);
1da177e4
LT
519}
520
cfb6eeb4 521#ifdef CONFIG_TCP_MD5SIG
b83e3deb 522static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(const struct sock *sk,
b71d1d42 523 const struct in6_addr *addr)
cfb6eeb4 524{
a915da9b 525 return tcp_md5_do_lookup(sk, (union tcp_md5_addr *)addr, AF_INET6);
cfb6eeb4
YH
526}
527
b83e3deb 528static struct tcp_md5sig_key *tcp_v6_md5_lookup(const struct sock *sk,
fd3a154a 529 const struct sock *addr_sk)
cfb6eeb4 530{
efe4208f 531 return tcp_v6_md5_do_lookup(sk, &addr_sk->sk_v6_daddr);
cfb6eeb4
YH
532}
533
8917a777
ID
534static int tcp_v6_parse_md5_keys(struct sock *sk, int optname,
535 char __user *optval, int optlen)
cfb6eeb4
YH
536{
537 struct tcp_md5sig cmd;
538 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr;
8917a777 539 u8 prefixlen;
cfb6eeb4
YH
540
541 if (optlen < sizeof(cmd))
542 return -EINVAL;
543
544 if (copy_from_user(&cmd, optval, sizeof(cmd)))
545 return -EFAULT;
546
547 if (sin6->sin6_family != AF_INET6)
548 return -EINVAL;
549
8917a777
ID
550 if (optname == TCP_MD5SIG_EXT &&
551 cmd.tcpm_flags & TCP_MD5SIG_FLAG_PREFIX) {
552 prefixlen = cmd.tcpm_prefixlen;
553 if (prefixlen > 128 || (ipv6_addr_v4mapped(&sin6->sin6_addr) &&
554 prefixlen > 32))
555 return -EINVAL;
556 } else {
557 prefixlen = ipv6_addr_v4mapped(&sin6->sin6_addr) ? 32 : 128;
558 }
559
cfb6eeb4 560 if (!cmd.tcpm_keylen) {
e773e4fa 561 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
a915da9b 562 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
8917a777 563 AF_INET, prefixlen);
a915da9b 564 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
8917a777 565 AF_INET6, prefixlen);
cfb6eeb4
YH
566 }
567
568 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
569 return -EINVAL;
570
a915da9b
ED
571 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
572 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
8917a777 573 AF_INET, prefixlen, cmd.tcpm_key,
6797318e 574 cmd.tcpm_keylen, GFP_KERNEL);
cfb6eeb4 575
a915da9b 576 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
8917a777
ID
577 AF_INET6, prefixlen, cmd.tcpm_key,
578 cmd.tcpm_keylen, GFP_KERNEL);
cfb6eeb4
YH
579}
580
19689e38
ED
581static int tcp_v6_md5_hash_headers(struct tcp_md5sig_pool *hp,
582 const struct in6_addr *daddr,
583 const struct in6_addr *saddr,
584 const struct tcphdr *th, int nbytes)
cfb6eeb4 585{
cfb6eeb4 586 struct tcp6_pseudohdr *bp;
49a72dfb 587 struct scatterlist sg;
19689e38 588 struct tcphdr *_th;
8d26d76d 589
19689e38 590 bp = hp->scratch;
cfb6eeb4 591 /* 1. TCP pseudo-header (RFC2460) */
4e3fd7a0
AD
592 bp->saddr = *saddr;
593 bp->daddr = *daddr;
49a72dfb 594 bp->protocol = cpu_to_be32(IPPROTO_TCP);
00b1304c 595 bp->len = cpu_to_be32(nbytes);
cfb6eeb4 596
19689e38
ED
597 _th = (struct tcphdr *)(bp + 1);
598 memcpy(_th, th, sizeof(*th));
599 _th->check = 0;
600
601 sg_init_one(&sg, bp, sizeof(*bp) + sizeof(*th));
602 ahash_request_set_crypt(hp->md5_req, &sg, NULL,
603 sizeof(*bp) + sizeof(*th));
cf80e0e4 604 return crypto_ahash_update(hp->md5_req);
49a72dfb 605}
c7da57a1 606
19689e38 607static int tcp_v6_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
b71d1d42 608 const struct in6_addr *daddr, struct in6_addr *saddr,
318cf7aa 609 const struct tcphdr *th)
49a72dfb
AL
610{
611 struct tcp_md5sig_pool *hp;
cf80e0e4 612 struct ahash_request *req;
49a72dfb
AL
613
614 hp = tcp_get_md5sig_pool();
615 if (!hp)
616 goto clear_hash_noput;
cf80e0e4 617 req = hp->md5_req;
49a72dfb 618
cf80e0e4 619 if (crypto_ahash_init(req))
49a72dfb 620 goto clear_hash;
19689e38 621 if (tcp_v6_md5_hash_headers(hp, daddr, saddr, th, th->doff << 2))
49a72dfb
AL
622 goto clear_hash;
623 if (tcp_md5_hash_key(hp, key))
624 goto clear_hash;
cf80e0e4
HX
625 ahash_request_set_crypt(req, NULL, md5_hash, 0);
626 if (crypto_ahash_final(req))
cfb6eeb4 627 goto clear_hash;
cfb6eeb4 628
cfb6eeb4 629 tcp_put_md5sig_pool();
cfb6eeb4 630 return 0;
49a72dfb 631
cfb6eeb4
YH
632clear_hash:
633 tcp_put_md5sig_pool();
634clear_hash_noput:
635 memset(md5_hash, 0, 16);
49a72dfb 636 return 1;
cfb6eeb4
YH
637}
638
39f8e58e
ED
639static int tcp_v6_md5_hash_skb(char *md5_hash,
640 const struct tcp_md5sig_key *key,
318cf7aa 641 const struct sock *sk,
318cf7aa 642 const struct sk_buff *skb)
cfb6eeb4 643{
b71d1d42 644 const struct in6_addr *saddr, *daddr;
49a72dfb 645 struct tcp_md5sig_pool *hp;
cf80e0e4 646 struct ahash_request *req;
318cf7aa 647 const struct tcphdr *th = tcp_hdr(skb);
cfb6eeb4 648
39f8e58e
ED
649 if (sk) { /* valid for establish/request sockets */
650 saddr = &sk->sk_v6_rcv_saddr;
efe4208f 651 daddr = &sk->sk_v6_daddr;
49a72dfb 652 } else {
b71d1d42 653 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
49a72dfb
AL
654 saddr = &ip6h->saddr;
655 daddr = &ip6h->daddr;
cfb6eeb4 656 }
49a72dfb
AL
657
658 hp = tcp_get_md5sig_pool();
659 if (!hp)
660 goto clear_hash_noput;
cf80e0e4 661 req = hp->md5_req;
49a72dfb 662
cf80e0e4 663 if (crypto_ahash_init(req))
49a72dfb
AL
664 goto clear_hash;
665
19689e38 666 if (tcp_v6_md5_hash_headers(hp, daddr, saddr, th, skb->len))
49a72dfb
AL
667 goto clear_hash;
668 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
669 goto clear_hash;
670 if (tcp_md5_hash_key(hp, key))
671 goto clear_hash;
cf80e0e4
HX
672 ahash_request_set_crypt(req, NULL, md5_hash, 0);
673 if (crypto_ahash_final(req))
49a72dfb
AL
674 goto clear_hash;
675
676 tcp_put_md5sig_pool();
677 return 0;
678
679clear_hash:
680 tcp_put_md5sig_pool();
681clear_hash_noput:
682 memset(md5_hash, 0, 16);
683 return 1;
cfb6eeb4
YH
684}
685
ba8e275a
ED
686#endif
687
688static bool tcp_v6_inbound_md5_hash(const struct sock *sk,
689 const struct sk_buff *skb)
cfb6eeb4 690{
ba8e275a 691#ifdef CONFIG_TCP_MD5SIG
cf533ea5 692 const __u8 *hash_location = NULL;
cfb6eeb4 693 struct tcp_md5sig_key *hash_expected;
b71d1d42 694 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
318cf7aa 695 const struct tcphdr *th = tcp_hdr(skb);
cfb6eeb4 696 int genhash;
cfb6eeb4
YH
697 u8 newhash[16];
698
699 hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr);
7d5d5525 700 hash_location = tcp_parse_md5sig_option(th);
cfb6eeb4 701
785957d3
DM
702 /* We've parsed the options - do we have a hash? */
703 if (!hash_expected && !hash_location)
ff74e23f 704 return false;
785957d3
DM
705
706 if (hash_expected && !hash_location) {
c10d9310 707 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
ff74e23f 708 return true;
cfb6eeb4
YH
709 }
710
785957d3 711 if (!hash_expected && hash_location) {
c10d9310 712 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
ff74e23f 713 return true;
cfb6eeb4
YH
714 }
715
716 /* check the signature */
49a72dfb
AL
717 genhash = tcp_v6_md5_hash_skb(newhash,
718 hash_expected,
39f8e58e 719 NULL, skb);
49a72dfb 720
cfb6eeb4 721 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
72145a68 722 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5FAILURE);
e87cc472
JP
723 net_info_ratelimited("MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n",
724 genhash ? "failed" : "mismatch",
725 &ip6h->saddr, ntohs(th->source),
726 &ip6h->daddr, ntohs(th->dest));
ff74e23f 727 return true;
cfb6eeb4 728 }
ba8e275a 729#endif
ff74e23f 730 return false;
cfb6eeb4 731}
cfb6eeb4 732
b40cf18e
ED
733static void tcp_v6_init_req(struct request_sock *req,
734 const struct sock *sk_listener,
16bea70a
OP
735 struct sk_buff *skb)
736{
737 struct inet_request_sock *ireq = inet_rsk(req);
b40cf18e 738 const struct ipv6_pinfo *np = inet6_sk(sk_listener);
16bea70a
OP
739
740 ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
741 ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
742
16bea70a 743 /* So that link locals have meaning */
b40cf18e 744 if (!sk_listener->sk_bound_dev_if &&
16bea70a 745 ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL)
870c3151 746 ireq->ir_iif = tcp_v6_iif(skb);
16bea70a 747
04317daf 748 if (!TCP_SKB_CB(skb)->tcp_tw_isn &&
b40cf18e 749 (ipv6_opt_accepted(sk_listener, skb, &TCP_SKB_CB(skb)->header.h6) ||
a224772d 750 np->rxopt.bits.rxinfo ||
16bea70a
OP
751 np->rxopt.bits.rxoinfo || np->rxopt.bits.rxhlim ||
752 np->rxopt.bits.rxohlim || np->repflow)) {
63354797 753 refcount_inc(&skb->users);
16bea70a
OP
754 ireq->pktopts = skb;
755 }
756}
757
f964629e
ED
758static struct dst_entry *tcp_v6_route_req(const struct sock *sk,
759 struct flowi *fl,
4396e461 760 const struct request_sock *req)
d94e0417 761{
f76b33c3 762 return inet6_csk_route_req(sk, &fl->u.ip6, req, IPPROTO_TCP);
d94e0417
OP
763}
764
c6aefafb 765struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
1da177e4 766 .family = AF_INET6,
2e6599cb 767 .obj_size = sizeof(struct tcp6_request_sock),
5db92c99 768 .rtx_syn_ack = tcp_rtx_synack,
60236fdd
ACM
769 .send_ack = tcp_v6_reqsk_send_ack,
770 .destructor = tcp_v6_reqsk_destructor,
72659ecc 771 .send_reset = tcp_v6_send_reset,
4aa956d8 772 .syn_ack_timeout = tcp_syn_ack_timeout,
1da177e4
LT
773};
774
b2e4b3de 775static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
2aec4a29
OP
776 .mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) -
777 sizeof(struct ipv6hdr),
16bea70a 778#ifdef CONFIG_TCP_MD5SIG
fd3a154a 779 .req_md5_lookup = tcp_v6_md5_lookup,
e3afe7b7 780 .calc_md5_hash = tcp_v6_md5_hash_skb,
b6332e6c 781#endif
16bea70a 782 .init_req = tcp_v6_init_req,
fb7b37a7
OP
783#ifdef CONFIG_SYN_COOKIES
784 .cookie_init_seq = cookie_v6_init_sequence,
785#endif
d94e0417 786 .route_req = tcp_v6_route_req,
84b114b9
ED
787 .init_seq = tcp_v6_init_seq,
788 .init_ts_off = tcp_v6_init_ts_off,
d6274bd8 789 .send_synack = tcp_v6_send_synack,
16bea70a 790};
cfb6eeb4 791
a00e7444 792static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32 seq,
0f85feae
ED
793 u32 ack, u32 win, u32 tsval, u32 tsecr,
794 int oif, struct tcp_md5sig_key *key, int rst,
5119bd16 795 u8 tclass, __be32 label)
1da177e4 796{
cf533ea5
ED
797 const struct tcphdr *th = tcp_hdr(skb);
798 struct tcphdr *t1;
1da177e4 799 struct sk_buff *buff;
4c9483b2 800 struct flowi6 fl6;
0f85feae 801 struct net *net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
e5047992 802 struct sock *ctl_sk = net->ipv6.tcp_sk;
77c676da 803 unsigned int tot_len = sizeof(struct tcphdr);
adf30907 804 struct dst_entry *dst;
81ada62d 805 __be32 *topt;
00483690 806 __u32 mark = 0;
1da177e4 807
ee684b6f 808 if (tsecr)
626e264d 809 tot_len += TCPOLEN_TSTAMP_ALIGNED;
cfb6eeb4 810#ifdef CONFIG_TCP_MD5SIG
cfb6eeb4
YH
811 if (key)
812 tot_len += TCPOLEN_MD5SIG_ALIGNED;
813#endif
814
cfb6eeb4 815 buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
1da177e4 816 GFP_ATOMIC);
63159f29 817 if (!buff)
1ab1457c 818 return;
1da177e4 819
cfb6eeb4 820 skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
1da177e4 821
d58ff351 822 t1 = skb_push(buff, tot_len);
6651ffc8 823 skb_reset_transport_header(buff);
1da177e4
LT
824
825 /* Swap the send and the receive. */
826 memset(t1, 0, sizeof(*t1));
827 t1->dest = th->source;
828 t1->source = th->dest;
cfb6eeb4 829 t1->doff = tot_len / 4;
626e264d
IJ
830 t1->seq = htonl(seq);
831 t1->ack_seq = htonl(ack);
832 t1->ack = !rst || !th->ack;
833 t1->rst = rst;
834 t1->window = htons(win);
1da177e4 835
81ada62d
IJ
836 topt = (__be32 *)(t1 + 1);
837
ee684b6f 838 if (tsecr) {
626e264d
IJ
839 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
840 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
ee684b6f
AV
841 *topt++ = htonl(tsval);
842 *topt++ = htonl(tsecr);
626e264d
IJ
843 }
844
cfb6eeb4
YH
845#ifdef CONFIG_TCP_MD5SIG
846 if (key) {
81ada62d
IJ
847 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
848 (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
849 tcp_v6_md5_hash_hdr((__u8 *)topt, key,
78e645cb
IJ
850 &ipv6_hdr(skb)->saddr,
851 &ipv6_hdr(skb)->daddr, t1);
cfb6eeb4
YH
852 }
853#endif
854
4c9483b2 855 memset(&fl6, 0, sizeof(fl6));
4e3fd7a0
AD
856 fl6.daddr = ipv6_hdr(skb)->saddr;
857 fl6.saddr = ipv6_hdr(skb)->daddr;
1d13a96c 858 fl6.flowlabel = label;
1da177e4 859
e5700aff
DM
860 buff->ip_summed = CHECKSUM_PARTIAL;
861 buff->csum = 0;
862
4c9483b2 863 __tcp_v6_send_check(buff, &fl6.saddr, &fl6.daddr);
1da177e4 864
4c9483b2 865 fl6.flowi6_proto = IPPROTO_TCP;
a36dbdb2 866 if (rt6_need_strict(&fl6.daddr) && !oif)
870c3151 867 fl6.flowi6_oif = tcp_v6_iif(skb);
9b6c14d5
DA
868 else {
869 if (!oif && netif_index_is_l3_master(net, skb->skb_iif))
870 oif = skb->skb_iif;
871
872 fl6.flowi6_oif = oif;
873 }
1d2f7b2d 874
00483690
JM
875 if (sk)
876 mark = (sk->sk_state == TCP_TIME_WAIT) ?
877 inet_twsk(sk)->tw_mark : sk->sk_mark;
878 fl6.flowi6_mark = IP6_REPLY_MARK(net, skb->mark) ?: mark;
1958b856
DM
879 fl6.fl6_dport = t1->dest;
880 fl6.fl6_sport = t1->source;
e2d118a1 881 fl6.flowi6_uid = sock_net_uid(net, sk && sk_fullsock(sk) ? sk : NULL);
4c9483b2 882 security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
1da177e4 883
c20121ae
DL
884 /* Pass a socket to ip6_dst_lookup either it is for RST
885 * Underlying function will use this to retrieve the network
886 * namespace
887 */
0e0d44ab 888 dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL);
68d0c6d3
DM
889 if (!IS_ERR(dst)) {
890 skb_dst_set(buff, dst);
92e55f41 891 ip6_xmit(ctl_sk, buff, &fl6, fl6.flowi6_mark, NULL, tclass);
c10d9310 892 TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
68d0c6d3 893 if (rst)
c10d9310 894 TCP_INC_STATS(net, TCP_MIB_OUTRSTS);
68d0c6d3 895 return;
1da177e4
LT
896 }
897
898 kfree_skb(buff);
899}
900
a00e7444 901static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb)
1da177e4 902{
cf533ea5 903 const struct tcphdr *th = tcp_hdr(skb);
626e264d 904 u32 seq = 0, ack_seq = 0;
fa3e5b4e 905 struct tcp_md5sig_key *key = NULL;
658ddaaf
SL
906#ifdef CONFIG_TCP_MD5SIG
907 const __u8 *hash_location = NULL;
908 struct ipv6hdr *ipv6h = ipv6_hdr(skb);
909 unsigned char newhash[16];
910 int genhash;
911 struct sock *sk1 = NULL;
912#endif
c24b14c4 913 int oif = 0;
1da177e4 914
626e264d 915 if (th->rst)
1da177e4
LT
916 return;
917
c3658e8d
ED
918 /* If sk not NULL, it means we did a successful lookup and incoming
919 * route had to be correct. prequeue might have dropped our dst.
920 */
921 if (!sk && !ipv6_unicast_destination(skb))
626e264d 922 return;
1da177e4 923
cfb6eeb4 924#ifdef CONFIG_TCP_MD5SIG
3b24d854 925 rcu_read_lock();
658ddaaf 926 hash_location = tcp_parse_md5sig_option(th);
271c3b9b 927 if (sk && sk_fullsock(sk)) {
e46787f0
FW
928 key = tcp_v6_md5_do_lookup(sk, &ipv6h->saddr);
929 } else if (hash_location) {
658ddaaf
SL
930 /*
931 * active side is lost. Try to find listening socket through
932 * source port, and then find md5 key through listening socket.
933 * we are not loose security here:
934 * Incoming packet is checked with md5 hash with finding key,
935 * no RST generated if md5 hash doesn't match.
936 */
937 sk1 = inet6_lookup_listener(dev_net(skb_dst(skb)->dev),
a583636a
CG
938 &tcp_hashinfo, NULL, 0,
939 &ipv6h->saddr,
5ba24953 940 th->source, &ipv6h->daddr,
4297a0ef
DA
941 ntohs(th->source), tcp_v6_iif(skb),
942 tcp_v6_sdif(skb));
658ddaaf 943 if (!sk1)
3b24d854 944 goto out;
658ddaaf 945
658ddaaf
SL
946 key = tcp_v6_md5_do_lookup(sk1, &ipv6h->saddr);
947 if (!key)
3b24d854 948 goto out;
658ddaaf 949
39f8e58e 950 genhash = tcp_v6_md5_hash_skb(newhash, key, NULL, skb);
658ddaaf 951 if (genhash || memcmp(hash_location, newhash, 16) != 0)
3b24d854 952 goto out;
658ddaaf 953 }
cfb6eeb4
YH
954#endif
955
626e264d
IJ
956 if (th->ack)
957 seq = ntohl(th->ack_seq);
958 else
959 ack_seq = ntohl(th->seq) + th->syn + th->fin + skb->len -
960 (th->doff << 2);
1da177e4 961
c24b14c4
SL
962 if (sk) {
963 oif = sk->sk_bound_dev_if;
5c487bb9
SL
964 if (sk_fullsock(sk))
965 trace_tcp_send_reset(sk, skb);
c24b14c4
SL
966 }
967
0f85feae 968 tcp_v6_send_response(sk, skb, seq, ack_seq, 0, 0, 0, oif, key, 1, 0, 0);
658ddaaf
SL
969
970#ifdef CONFIG_TCP_MD5SIG
3b24d854
ED
971out:
972 rcu_read_unlock();
658ddaaf 973#endif
626e264d 974}
1da177e4 975
a00e7444 976static void tcp_v6_send_ack(const struct sock *sk, struct sk_buff *skb, u32 seq,
0f85feae 977 u32 ack, u32 win, u32 tsval, u32 tsecr, int oif,
1d13a96c 978 struct tcp_md5sig_key *key, u8 tclass,
5119bd16 979 __be32 label)
626e264d 980{
0f85feae
ED
981 tcp_v6_send_response(sk, skb, seq, ack, win, tsval, tsecr, oif, key, 0,
982 tclass, label);
1da177e4
LT
983}
984
985static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
986{
8feaf0c0 987 struct inet_timewait_sock *tw = inet_twsk(sk);
cfb6eeb4 988 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
1da177e4 989
0f85feae 990 tcp_v6_send_ack(sk, skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
8feaf0c0 991 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
9a568de4 992 tcp_time_stamp_raw() + tcptw->tw_ts_offset,
9c76a114 993 tcptw->tw_ts_recent, tw->tw_bound_dev_if, tcp_twsk_md5_key(tcptw),
21858cd0 994 tw->tw_tclass, cpu_to_be32(tw->tw_flowlabel));
1da177e4 995
8feaf0c0 996 inet_twsk_put(tw);
1da177e4
LT
997}
998
a00e7444 999static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
6edafaaf 1000 struct request_sock *req)
1da177e4 1001{
3a19ce0e
DL
1002 /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
1003 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
1004 */
20a2b49f
ED
1005 /* RFC 7323 2.3
1006 * The window field (SEG.WND) of every outgoing segment, with the
1007 * exception of <SYN> segments, MUST be right-shifted by
1008 * Rcv.Wind.Shift bits:
1009 */
0f85feae 1010 tcp_v6_send_ack(sk, skb, (sk->sk_state == TCP_LISTEN) ?
3a19ce0e 1011 tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
20a2b49f
ED
1012 tcp_rsk(req)->rcv_nxt,
1013 req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale,
9a568de4 1014 tcp_time_stamp_raw() + tcp_rsk(req)->ts_off,
95a22cae 1015 req->ts_recent, sk->sk_bound_dev_if,
30791ac4 1016 tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->saddr),
1d13a96c 1017 0, 0);
1da177e4
LT
1018}
1019
1020
079096f1 1021static struct sock *tcp_v6_cookie_check(struct sock *sk, struct sk_buff *skb)
1da177e4 1022{
079096f1 1023#ifdef CONFIG_SYN_COOKIES
aa8223c7 1024 const struct tcphdr *th = tcp_hdr(skb);
1da177e4 1025
af9b4738 1026 if (!th->syn)
c6aefafb 1027 sk = cookie_v6_check(sk, skb);
1da177e4
LT
1028#endif
1029 return sk;
1030}
1031
1da177e4
LT
1032static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1033{
1da177e4
LT
1034 if (skb->protocol == htons(ETH_P_IP))
1035 return tcp_v4_conn_request(sk, skb);
1036
1037 if (!ipv6_unicast_destination(skb))
1ab1457c 1038 goto drop;
1da177e4 1039
1fb6f159
OP
1040 return tcp_conn_request(&tcp6_request_sock_ops,
1041 &tcp_request_sock_ipv6_ops, sk, skb);
1da177e4
LT
1042
1043drop:
9caad864 1044 tcp_listendrop(sk);
1da177e4
LT
1045 return 0; /* don't send reset */
1046}
1047
ebf6c9cb
ED
1048static void tcp_v6_restore_cb(struct sk_buff *skb)
1049{
1050 /* We need to move header back to the beginning if xfrm6_policy_check()
1051 * and tcp_v6_fill_cb() are going to be called again.
1052 * ip6_datagram_recv_specific_ctl() also expects IP6CB to be there.
1053 */
1054 memmove(IP6CB(skb), &TCP_SKB_CB(skb)->header.h6,
1055 sizeof(struct inet6_skb_parm));
1056}
1057
0c27171e 1058static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
4c99aa40 1059 struct request_sock *req,
5e0724d0
ED
1060 struct dst_entry *dst,
1061 struct request_sock *req_unhash,
1062 bool *own_req)
1da177e4 1063{
634fb979 1064 struct inet_request_sock *ireq;
0c27171e
ED
1065 struct ipv6_pinfo *newnp;
1066 const struct ipv6_pinfo *np = inet6_sk(sk);
45f6fad8 1067 struct ipv6_txoptions *opt;
1da177e4
LT
1068 struct tcp6_sock *newtcp6sk;
1069 struct inet_sock *newinet;
1070 struct tcp_sock *newtp;
1071 struct sock *newsk;
cfb6eeb4
YH
1072#ifdef CONFIG_TCP_MD5SIG
1073 struct tcp_md5sig_key *key;
1074#endif
3840a06e 1075 struct flowi6 fl6;
1da177e4
LT
1076
1077 if (skb->protocol == htons(ETH_P_IP)) {
1078 /*
1079 * v6 mapped
1080 */
1081
5e0724d0
ED
1082 newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst,
1083 req_unhash, own_req);
1da177e4 1084
63159f29 1085 if (!newsk)
1da177e4
LT
1086 return NULL;
1087
1088 newtcp6sk = (struct tcp6_sock *)newsk;
1089 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1090
1091 newinet = inet_sk(newsk);
1092 newnp = inet6_sk(newsk);
1093 newtp = tcp_sk(newsk);
1094
1095 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1096
d1e559d0 1097 newnp->saddr = newsk->sk_v6_rcv_saddr;
1da177e4 1098
8292a17a 1099 inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
1da177e4 1100 newsk->sk_backlog_rcv = tcp_v4_do_rcv;
cfb6eeb4
YH
1101#ifdef CONFIG_TCP_MD5SIG
1102 newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
1103#endif
1104
83eaddab 1105 newnp->ipv6_mc_list = NULL;
676a1184
YZ
1106 newnp->ipv6_ac_list = NULL;
1107 newnp->ipv6_fl_list = NULL;
1da177e4
LT
1108 newnp->pktoptions = NULL;
1109 newnp->opt = NULL;
870c3151 1110 newnp->mcast_oif = tcp_v6_iif(skb);
0660e03f 1111 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1397ed35 1112 newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
df3687ff
FF
1113 if (np->repflow)
1114 newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
1da177e4 1115
e6848976
ACM
1116 /*
1117 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1118 * here, tcp_create_openreq_child now does this for us, see the comment in
1119 * that function for the gory details. -acme
1da177e4 1120 */
1da177e4
LT
1121
1122 /* It is tricky place. Until this moment IPv4 tcp
8292a17a 1123 worked with IPv6 icsk.icsk_af_ops.
1da177e4
LT
1124 Sync it now.
1125 */
d83d8461 1126 tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
1da177e4
LT
1127
1128 return newsk;
1129 }
1130
634fb979 1131 ireq = inet_rsk(req);
1da177e4
LT
1132
1133 if (sk_acceptq_is_full(sk))
1134 goto out_overflow;
1135
493f377d 1136 if (!dst) {
f76b33c3 1137 dst = inet6_csk_route_req(sk, &fl6, req, IPPROTO_TCP);
493f377d 1138 if (!dst)
1da177e4 1139 goto out;
1ab1457c 1140 }
1da177e4
LT
1141
1142 newsk = tcp_create_openreq_child(sk, req, skb);
63159f29 1143 if (!newsk)
093d2823 1144 goto out_nonewsk;
1da177e4 1145
e6848976
ACM
1146 /*
1147 * No need to charge this sock to the relevant IPv6 refcnt debug socks
1148 * count here, tcp_create_openreq_child now does this for us, see the
1149 * comment in that function for the gory details. -acme
1150 */
1da177e4 1151
59eed279 1152 newsk->sk_gso_type = SKB_GSO_TCPV6;
6bd4f355 1153 ip6_dst_store(newsk, dst, NULL, NULL);
fae6ef87 1154 inet6_sk_rx_dst_set(newsk, skb);
1da177e4
LT
1155
1156 newtcp6sk = (struct tcp6_sock *)newsk;
1157 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1158
1159 newtp = tcp_sk(newsk);
1160 newinet = inet_sk(newsk);
1161 newnp = inet6_sk(newsk);
1162
1163 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1164
634fb979
ED
1165 newsk->sk_v6_daddr = ireq->ir_v6_rmt_addr;
1166 newnp->saddr = ireq->ir_v6_loc_addr;
1167 newsk->sk_v6_rcv_saddr = ireq->ir_v6_loc_addr;
1168 newsk->sk_bound_dev_if = ireq->ir_iif;
1da177e4 1169
1ab1457c 1170 /* Now IPv6 options...
1da177e4
LT
1171
1172 First: no IPv4 options.
1173 */
f6d8bd05 1174 newinet->inet_opt = NULL;
83eaddab 1175 newnp->ipv6_mc_list = NULL;
676a1184 1176 newnp->ipv6_ac_list = NULL;
d35690be 1177 newnp->ipv6_fl_list = NULL;
1da177e4
LT
1178
1179 /* Clone RX bits */
1180 newnp->rxopt.all = np->rxopt.all;
1181
1da177e4 1182 newnp->pktoptions = NULL;
1da177e4 1183 newnp->opt = NULL;
870c3151 1184 newnp->mcast_oif = tcp_v6_iif(skb);
0660e03f 1185 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1397ed35 1186 newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
df3687ff
FF
1187 if (np->repflow)
1188 newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
1da177e4
LT
1189
1190 /* Clone native IPv6 options from listening socket (if any)
1191
1192 Yes, keeping reference count would be much more clever,
1193 but we make one more one thing there: reattach optmem
1194 to newsk.
1195 */
56ac42bc
HD
1196 opt = ireq->ipv6_opt;
1197 if (!opt)
1198 opt = rcu_dereference(np->opt);
45f6fad8
ED
1199 if (opt) {
1200 opt = ipv6_dup_options(newsk, opt);
1201 RCU_INIT_POINTER(newnp->opt, opt);
1202 }
d83d8461 1203 inet_csk(newsk)->icsk_ext_hdr_len = 0;
45f6fad8
ED
1204 if (opt)
1205 inet_csk(newsk)->icsk_ext_hdr_len = opt->opt_nflen +
1206 opt->opt_flen;
1da177e4 1207
81164413
DB
1208 tcp_ca_openreq_child(newsk, dst);
1209
1da177e4 1210 tcp_sync_mss(newsk, dst_mtu(dst));
3541f9e8 1211 newtp->advmss = tcp_mss_clamp(tcp_sk(sk), dst_metric_advmss(dst));
d135c522 1212
1da177e4
LT
1213 tcp_initialize_rcv_mss(newsk);
1214
c720c7e8
ED
1215 newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
1216 newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
1da177e4 1217
cfb6eeb4
YH
1218#ifdef CONFIG_TCP_MD5SIG
1219 /* Copy over the MD5 key from the original socket */
4aa956d8 1220 key = tcp_v6_md5_do_lookup(sk, &newsk->sk_v6_daddr);
53b24b8f 1221 if (key) {
cfb6eeb4
YH
1222 /* We're using one, so create a matching key
1223 * on the newsk structure. If we fail to get
1224 * memory, then we end up not copying the key
1225 * across. Shucks.
1226 */
efe4208f 1227 tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newsk->sk_v6_daddr,
6797318e 1228 AF_INET6, 128, key->key, key->keylen,
7450aaf6 1229 sk_gfp_mask(sk, GFP_ATOMIC));
cfb6eeb4
YH
1230 }
1231#endif
1232
093d2823 1233 if (__inet_inherit_port(sk, newsk) < 0) {
e337e24d
CP
1234 inet_csk_prepare_forced_close(newsk);
1235 tcp_done(newsk);
093d2823
BS
1236 goto out;
1237 }
5e0724d0 1238 *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash));
805c4bc0 1239 if (*own_req) {
49a496c9 1240 tcp_move_syn(newtp, req);
805c4bc0
ED
1241
1242 /* Clone pktoptions received with SYN, if we own the req */
1243 if (ireq->pktopts) {
1244 newnp->pktoptions = skb_clone(ireq->pktopts,
7450aaf6 1245 sk_gfp_mask(sk, GFP_ATOMIC));
805c4bc0
ED
1246 consume_skb(ireq->pktopts);
1247 ireq->pktopts = NULL;
ebf6c9cb
ED
1248 if (newnp->pktoptions) {
1249 tcp_v6_restore_cb(newnp->pktoptions);
805c4bc0 1250 skb_set_owner_r(newnp->pktoptions, newsk);
ebf6c9cb 1251 }
805c4bc0 1252 }
ce105008 1253 }
1da177e4
LT
1254
1255 return newsk;
1256
1257out_overflow:
02a1d6e7 1258 __NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
093d2823 1259out_nonewsk:
1da177e4 1260 dst_release(dst);
093d2823 1261out:
9caad864 1262 tcp_listendrop(sk);
1da177e4
LT
1263 return NULL;
1264}
1265
1da177e4 1266/* The socket must have it's spinlock held when we get
e994b2f0 1267 * here, unless it is a TCP_LISTEN socket.
1da177e4
LT
1268 *
1269 * We have a potential double-lock case here, so even when
1270 * doing backlog processing we use the BH locking scheme.
1271 * This is because we cannot sleep with the original spinlock
1272 * held.
1273 */
1274static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1275{
1276 struct ipv6_pinfo *np = inet6_sk(sk);
1277 struct tcp_sock *tp;
1278 struct sk_buff *opt_skb = NULL;
1279
1280 /* Imagine: socket is IPv6. IPv4 packet arrives,
1281 goes to IPv4 receive handler and backlogged.
1282 From backlog it always goes here. Kerboom...
1283 Fortunately, tcp_rcv_established and rcv_established
1284 handle them correctly, but it is not case with
1285 tcp_v6_hnd_req and tcp_v6_send_reset(). --ANK
1286 */
1287
1288 if (skb->protocol == htons(ETH_P_IP))
1289 return tcp_v4_do_rcv(sk, skb);
1290
1da177e4
LT
1291 /*
1292 * socket locking is here for SMP purposes as backlog rcv
1293 * is currently called with bh processing disabled.
1294 */
1295
1296 /* Do Stevens' IPV6_PKTOPTIONS.
1297
1298 Yes, guys, it is the only place in our code, where we
1299 may make it not affecting IPv4.
1300 The rest of code is protocol independent,
1301 and I do not like idea to uglify IPv4.
1302
1303 Actually, all the idea behind IPV6_PKTOPTIONS
1304 looks not very well thought. For now we latch
1305 options, received in the last packet, enqueued
1306 by tcp. Feel free to propose better solution.
1ab1457c 1307 --ANK (980728)
1da177e4
LT
1308 */
1309 if (np->rxopt.all)
7450aaf6 1310 opt_skb = skb_clone(skb, sk_gfp_mask(sk, GFP_ATOMIC));
1da177e4
LT
1311
1312 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
5d299f3d
ED
1313 struct dst_entry *dst = sk->sk_rx_dst;
1314
bdeab991 1315 sock_rps_save_rxhash(sk, skb);
3d97379a 1316 sk_mark_napi_id(sk, skb);
5d299f3d
ED
1317 if (dst) {
1318 if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1319 dst->ops->check(dst, np->rx_dst_cookie) == NULL) {
1320 dst_release(dst);
1321 sk->sk_rx_dst = NULL;
1322 }
1323 }
1324
3d97d88e 1325 tcp_rcv_established(sk, skb);
1da177e4
LT
1326 if (opt_skb)
1327 goto ipv6_pktoptions;
1328 return 0;
1329 }
1330
12e25e10 1331 if (tcp_checksum_complete(skb))
1da177e4
LT
1332 goto csum_err;
1333
1ab1457c 1334 if (sk->sk_state == TCP_LISTEN) {
079096f1
ED
1335 struct sock *nsk = tcp_v6_cookie_check(sk, skb);
1336
1da177e4
LT
1337 if (!nsk)
1338 goto discard;
1339
4c99aa40 1340 if (nsk != sk) {
1da177e4
LT
1341 if (tcp_child_process(sk, nsk, skb))
1342 goto reset;
1343 if (opt_skb)
1344 __kfree_skb(opt_skb);
1345 return 0;
1346 }
47482f13 1347 } else
bdeab991 1348 sock_rps_save_rxhash(sk, skb);
1da177e4 1349
72ab4a86 1350 if (tcp_rcv_state_process(sk, skb))
1da177e4 1351 goto reset;
1da177e4
LT
1352 if (opt_skb)
1353 goto ipv6_pktoptions;
1354 return 0;
1355
1356reset:
cfb6eeb4 1357 tcp_v6_send_reset(sk, skb);
1da177e4
LT
1358discard:
1359 if (opt_skb)
1360 __kfree_skb(opt_skb);
1361 kfree_skb(skb);
1362 return 0;
1363csum_err:
c10d9310
ED
1364 TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS);
1365 TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
1da177e4
LT
1366 goto discard;
1367
1368
1369ipv6_pktoptions:
1370 /* Do you ask, what is it?
1371
1372 1. skb was enqueued by tcp.
1373 2. skb is added to tail of read queue, rather than out of order.
1374 3. socket is not in passive state.
1375 4. Finally, it really contains options, which user wants to receive.
1376 */
1377 tp = tcp_sk(sk);
1378 if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
1379 !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
333fad53 1380 if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
870c3151 1381 np->mcast_oif = tcp_v6_iif(opt_skb);
333fad53 1382 if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
0660e03f 1383 np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit;
82e9f105 1384 if (np->rxopt.bits.rxflow || np->rxopt.bits.rxtclass)
1397ed35 1385 np->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(opt_skb));
df3687ff
FF
1386 if (np->repflow)
1387 np->flow_label = ip6_flowlabel(ipv6_hdr(opt_skb));
a224772d 1388 if (ipv6_opt_accepted(sk, opt_skb, &TCP_SKB_CB(opt_skb)->header.h6)) {
1da177e4 1389 skb_set_owner_r(opt_skb, sk);
8ce48623 1390 tcp_v6_restore_cb(opt_skb);
1da177e4
LT
1391 opt_skb = xchg(&np->pktoptions, opt_skb);
1392 } else {
1393 __kfree_skb(opt_skb);
1394 opt_skb = xchg(&np->pktoptions, NULL);
1395 }
1396 }
1397
800d55f1 1398 kfree_skb(opt_skb);
1da177e4
LT
1399 return 0;
1400}
1401
2dc49d16
ND
1402static void tcp_v6_fill_cb(struct sk_buff *skb, const struct ipv6hdr *hdr,
1403 const struct tcphdr *th)
1404{
1405 /* This is tricky: we move IP6CB at its correct location into
1406 * TCP_SKB_CB(). It must be done after xfrm6_policy_check(), because
1407 * _decode_session6() uses IP6CB().
1408 * barrier() makes sure compiler won't play aliasing games.
1409 */
1410 memmove(&TCP_SKB_CB(skb)->header.h6, IP6CB(skb),
1411 sizeof(struct inet6_skb_parm));
1412 barrier();
1413
1414 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1415 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1416 skb->len - th->doff*4);
1417 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1418 TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
1419 TCP_SKB_CB(skb)->tcp_tw_isn = 0;
1420 TCP_SKB_CB(skb)->ip_dsfield = ipv6_get_dsfield(hdr);
1421 TCP_SKB_CB(skb)->sacked = 0;
98aaa913
MM
1422 TCP_SKB_CB(skb)->has_rxtstamp =
1423 skb->tstamp || skb_hwtstamps(skb)->hwtstamp;
2dc49d16
ND
1424}
1425
e5bbef20 1426static int tcp_v6_rcv(struct sk_buff *skb)
1da177e4 1427{
4297a0ef 1428 int sdif = inet6_sdif(skb);
cf533ea5 1429 const struct tcphdr *th;
b71d1d42 1430 const struct ipv6hdr *hdr;
3b24d854 1431 bool refcounted;
1da177e4
LT
1432 struct sock *sk;
1433 int ret;
a86b1e30 1434 struct net *net = dev_net(skb->dev);
1da177e4
LT
1435
1436 if (skb->pkt_type != PACKET_HOST)
1437 goto discard_it;
1438
1439 /*
1440 * Count it even if it's bad.
1441 */
90bbcc60 1442 __TCP_INC_STATS(net, TCP_MIB_INSEGS);
1da177e4
LT
1443
1444 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1445 goto discard_it;
1446
ea1627c2 1447 th = (const struct tcphdr *)skb->data;
1da177e4 1448
ea1627c2 1449 if (unlikely(th->doff < sizeof(struct tcphdr)/4))
1da177e4
LT
1450 goto bad_packet;
1451 if (!pskb_may_pull(skb, th->doff*4))
1452 goto discard_it;
1453
e4f45b7f 1454 if (skb_checksum_init(skb, IPPROTO_TCP, ip6_compute_pseudo))
6a5dc9e5 1455 goto csum_error;
1da177e4 1456
ea1627c2 1457 th = (const struct tcphdr *)skb->data;
e802af9c 1458 hdr = ipv6_hdr(skb);
1da177e4 1459
4bdc3d66 1460lookup:
a583636a 1461 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, __tcp_hdrlen(th),
4297a0ef 1462 th->source, th->dest, inet6_iif(skb), sdif,
3b24d854 1463 &refcounted);
1da177e4
LT
1464 if (!sk)
1465 goto no_tcp_socket;
1466
1467process:
1468 if (sk->sk_state == TCP_TIME_WAIT)
1469 goto do_time_wait;
1470
079096f1
ED
1471 if (sk->sk_state == TCP_NEW_SYN_RECV) {
1472 struct request_sock *req = inet_reqsk(sk);
e0f9759f 1473 bool req_stolen = false;
7716682c 1474 struct sock *nsk;
079096f1
ED
1475
1476 sk = req->rsk_listener;
079096f1 1477 if (tcp_v6_inbound_md5_hash(sk, skb)) {
e65c332d 1478 sk_drops_add(sk, skb);
079096f1
ED
1479 reqsk_put(req);
1480 goto discard_it;
1481 }
7716682c 1482 if (unlikely(sk->sk_state != TCP_LISTEN)) {
f03f2e15 1483 inet_csk_reqsk_queue_drop_and_put(sk, req);
4bdc3d66
ED
1484 goto lookup;
1485 }
7716682c 1486 sock_hold(sk);
3b24d854 1487 refcounted = true;
1f3b359f 1488 nsk = NULL;
eeea10b8
ED
1489 if (!tcp_filter(sk, skb)) {
1490 th = (const struct tcphdr *)skb->data;
1491 hdr = ipv6_hdr(skb);
1492 tcp_v6_fill_cb(skb, hdr, th);
e0f9759f 1493 nsk = tcp_check_req(sk, skb, req, false, &req_stolen);
eeea10b8 1494 }
079096f1
ED
1495 if (!nsk) {
1496 reqsk_put(req);
e0f9759f
ED
1497 if (req_stolen) {
1498 /* Another cpu got exclusive access to req
1499 * and created a full blown socket.
1500 * Try to feed this packet to this socket
1501 * instead of discarding it.
1502 */
1503 tcp_v6_restore_cb(skb);
1504 sock_put(sk);
1505 goto lookup;
1506 }
7716682c 1507 goto discard_and_relse;
079096f1
ED
1508 }
1509 if (nsk == sk) {
079096f1
ED
1510 reqsk_put(req);
1511 tcp_v6_restore_cb(skb);
1512 } else if (tcp_child_process(sk, nsk, skb)) {
1513 tcp_v6_send_reset(nsk, skb);
7716682c 1514 goto discard_and_relse;
079096f1 1515 } else {
7716682c 1516 sock_put(sk);
079096f1
ED
1517 return 0;
1518 }
1519 }
e802af9c 1520 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
02a1d6e7 1521 __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
e802af9c
SH
1522 goto discard_and_relse;
1523 }
1524
1da177e4
LT
1525 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1526 goto discard_and_relse;
1527
9ea88a15
DP
1528 if (tcp_v6_inbound_md5_hash(sk, skb))
1529 goto discard_and_relse;
9ea88a15 1530
ac6e7800 1531 if (tcp_filter(sk, skb))
1da177e4 1532 goto discard_and_relse;
ac6e7800
ED
1533 th = (const struct tcphdr *)skb->data;
1534 hdr = ipv6_hdr(skb);
eeea10b8 1535 tcp_v6_fill_cb(skb, hdr, th);
1da177e4
LT
1536
1537 skb->dev = NULL;
1538
e994b2f0
ED
1539 if (sk->sk_state == TCP_LISTEN) {
1540 ret = tcp_v6_do_rcv(sk, skb);
1541 goto put_and_return;
1542 }
1543
1544 sk_incoming_cpu_update(sk);
1545
293b9c42 1546 bh_lock_sock_nested(sk);
a44d6eac 1547 tcp_segs_in(tcp_sk(sk), skb);
1da177e4
LT
1548 ret = 0;
1549 if (!sock_owned_by_user(sk)) {
e7942d06 1550 ret = tcp_v6_do_rcv(sk, skb);
c9c33212 1551 } else if (tcp_add_backlog(sk, skb)) {
6b03a53a
ZY
1552 goto discard_and_relse;
1553 }
1da177e4
LT
1554 bh_unlock_sock(sk);
1555
e994b2f0 1556put_and_return:
3b24d854
ED
1557 if (refcounted)
1558 sock_put(sk);
1da177e4
LT
1559 return ret ? -1 : 0;
1560
1561no_tcp_socket:
1562 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1563 goto discard_it;
1564
2dc49d16
ND
1565 tcp_v6_fill_cb(skb, hdr, th);
1566
12e25e10 1567 if (tcp_checksum_complete(skb)) {
6a5dc9e5 1568csum_error:
90bbcc60 1569 __TCP_INC_STATS(net, TCP_MIB_CSUMERRORS);
1da177e4 1570bad_packet:
90bbcc60 1571 __TCP_INC_STATS(net, TCP_MIB_INERRS);
1da177e4 1572 } else {
cfb6eeb4 1573 tcp_v6_send_reset(NULL, skb);
1da177e4
LT
1574 }
1575
1576discard_it:
1da177e4
LT
1577 kfree_skb(skb);
1578 return 0;
1579
1580discard_and_relse:
532182cd 1581 sk_drops_add(sk, skb);
3b24d854
ED
1582 if (refcounted)
1583 sock_put(sk);
1da177e4
LT
1584 goto discard_it;
1585
1586do_time_wait:
1587 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
9469c7b4 1588 inet_twsk_put(inet_twsk(sk));
1da177e4
LT
1589 goto discard_it;
1590 }
1591
2dc49d16
ND
1592 tcp_v6_fill_cb(skb, hdr, th);
1593
6a5dc9e5
ED
1594 if (tcp_checksum_complete(skb)) {
1595 inet_twsk_put(inet_twsk(sk));
1596 goto csum_error;
1da177e4
LT
1597 }
1598
9469c7b4 1599 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1da177e4
LT
1600 case TCP_TW_SYN:
1601 {
1602 struct sock *sk2;
1603
c346dca1 1604 sk2 = inet6_lookup_listener(dev_net(skb->dev), &tcp_hashinfo,
a583636a 1605 skb, __tcp_hdrlen(th),
5ba24953 1606 &ipv6_hdr(skb)->saddr, th->source,
0660e03f 1607 &ipv6_hdr(skb)->daddr,
4297a0ef
DA
1608 ntohs(th->dest), tcp_v6_iif(skb),
1609 sdif);
53b24b8f 1610 if (sk2) {
295ff7ed 1611 struct inet_timewait_sock *tw = inet_twsk(sk);
dbe7faa4 1612 inet_twsk_deschedule_put(tw);
1da177e4 1613 sk = sk2;
4ad19de8 1614 tcp_v6_restore_cb(skb);
3b24d854 1615 refcounted = false;
1da177e4
LT
1616 goto process;
1617 }
1da177e4 1618 }
275757e6
GS
1619 /* to ACK */
1620 /* fall through */
1da177e4
LT
1621 case TCP_TW_ACK:
1622 tcp_v6_timewait_ack(sk, skb);
1623 break;
1624 case TCP_TW_RST:
271c3b9b
FW
1625 tcp_v6_send_reset(sk, skb);
1626 inet_twsk_deschedule_put(inet_twsk(sk));
1627 goto discard_it;
4aa956d8
WY
1628 case TCP_TW_SUCCESS:
1629 ;
1da177e4
LT
1630 }
1631 goto discard_it;
1632}
1633
c7109986
ED
1634static void tcp_v6_early_demux(struct sk_buff *skb)
1635{
1636 const struct ipv6hdr *hdr;
1637 const struct tcphdr *th;
1638 struct sock *sk;
1639
1640 if (skb->pkt_type != PACKET_HOST)
1641 return;
1642
1643 if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1644 return;
1645
1646 hdr = ipv6_hdr(skb);
1647 th = tcp_hdr(skb);
1648
1649 if (th->doff < sizeof(struct tcphdr) / 4)
1650 return;
1651
870c3151 1652 /* Note : We use inet6_iif() here, not tcp_v6_iif() */
c7109986
ED
1653 sk = __inet6_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
1654 &hdr->saddr, th->source,
1655 &hdr->daddr, ntohs(th->dest),
4297a0ef 1656 inet6_iif(skb), inet6_sdif(skb));
c7109986
ED
1657 if (sk) {
1658 skb->sk = sk;
1659 skb->destructor = sock_edemux;
f7e4eb03 1660 if (sk_fullsock(sk)) {
d0c294c5 1661 struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst);
f3f12135 1662
c7109986 1663 if (dst)
5d299f3d 1664 dst = dst_check(dst, inet6_sk(sk)->rx_dst_cookie);
c7109986 1665 if (dst &&
f3f12135 1666 inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
c7109986
ED
1667 skb_dst_set_noref(skb, dst);
1668 }
1669 }
1670}
1671
ccb7c410
DM
1672static struct timewait_sock_ops tcp6_timewait_sock_ops = {
1673 .twsk_obj_size = sizeof(struct tcp6_timewait_sock),
1674 .twsk_unique = tcp_twsk_unique,
4aa956d8 1675 .twsk_destructor = tcp_twsk_destructor,
ccb7c410
DM
1676};
1677
3b401a81 1678static const struct inet_connection_sock_af_ops ipv6_specific = {
543d9cfe
ACM
1679 .queue_xmit = inet6_csk_xmit,
1680 .send_check = tcp_v6_send_check,
1681 .rebuild_header = inet6_sk_rebuild_header,
5d299f3d 1682 .sk_rx_dst_set = inet6_sk_rx_dst_set,
543d9cfe
ACM
1683 .conn_request = tcp_v6_conn_request,
1684 .syn_recv_sock = tcp_v6_syn_recv_sock,
543d9cfe 1685 .net_header_len = sizeof(struct ipv6hdr),
67469601 1686 .net_frag_header_len = sizeof(struct frag_hdr),
543d9cfe
ACM
1687 .setsockopt = ipv6_setsockopt,
1688 .getsockopt = ipv6_getsockopt,
1689 .addr2sockaddr = inet6_csk_addr2sockaddr,
1690 .sockaddr_len = sizeof(struct sockaddr_in6),
3fdadf7d 1691#ifdef CONFIG_COMPAT
543d9cfe
ACM
1692 .compat_setsockopt = compat_ipv6_setsockopt,
1693 .compat_getsockopt = compat_ipv6_getsockopt,
3fdadf7d 1694#endif
4fab9071 1695 .mtu_reduced = tcp_v6_mtu_reduced,
1da177e4
LT
1696};
1697
cfb6eeb4 1698#ifdef CONFIG_TCP_MD5SIG
b2e4b3de 1699static const struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
cfb6eeb4 1700 .md5_lookup = tcp_v6_md5_lookup,
49a72dfb 1701 .calc_md5_hash = tcp_v6_md5_hash_skb,
cfb6eeb4 1702 .md5_parse = tcp_v6_parse_md5_keys,
cfb6eeb4 1703};
a928630a 1704#endif
cfb6eeb4 1705
1da177e4
LT
1706/*
1707 * TCP over IPv4 via INET6 API
1708 */
3b401a81 1709static const struct inet_connection_sock_af_ops ipv6_mapped = {
543d9cfe
ACM
1710 .queue_xmit = ip_queue_xmit,
1711 .send_check = tcp_v4_send_check,
1712 .rebuild_header = inet_sk_rebuild_header,
63d02d15 1713 .sk_rx_dst_set = inet_sk_rx_dst_set,
543d9cfe
ACM
1714 .conn_request = tcp_v6_conn_request,
1715 .syn_recv_sock = tcp_v6_syn_recv_sock,
543d9cfe
ACM
1716 .net_header_len = sizeof(struct iphdr),
1717 .setsockopt = ipv6_setsockopt,
1718 .getsockopt = ipv6_getsockopt,
1719 .addr2sockaddr = inet6_csk_addr2sockaddr,
1720 .sockaddr_len = sizeof(struct sockaddr_in6),
3fdadf7d 1721#ifdef CONFIG_COMPAT
543d9cfe
ACM
1722 .compat_setsockopt = compat_ipv6_setsockopt,
1723 .compat_getsockopt = compat_ipv6_getsockopt,
3fdadf7d 1724#endif
4fab9071 1725 .mtu_reduced = tcp_v4_mtu_reduced,
1da177e4
LT
1726};
1727
cfb6eeb4 1728#ifdef CONFIG_TCP_MD5SIG
b2e4b3de 1729static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
cfb6eeb4 1730 .md5_lookup = tcp_v4_md5_lookup,
49a72dfb 1731 .calc_md5_hash = tcp_v4_md5_hash_skb,
cfb6eeb4 1732 .md5_parse = tcp_v6_parse_md5_keys,
cfb6eeb4 1733};
a928630a 1734#endif
cfb6eeb4 1735
1da177e4
LT
1736/* NOTE: A lot of things set to zero explicitly by call to
1737 * sk_alloc() so need not be done here.
1738 */
1739static int tcp_v6_init_sock(struct sock *sk)
1740{
6687e988 1741 struct inet_connection_sock *icsk = inet_csk(sk);
1da177e4 1742
900f65d3 1743 tcp_init_sock(sk);
1da177e4 1744
8292a17a 1745 icsk->icsk_af_ops = &ipv6_specific;
1da177e4 1746
cfb6eeb4 1747#ifdef CONFIG_TCP_MD5SIG
ac807fa8 1748 tcp_sk(sk)->af_specific = &tcp_sock_ipv6_specific;
cfb6eeb4
YH
1749#endif
1750
1da177e4
LT
1751 return 0;
1752}
1753
7d06b2e0 1754static void tcp_v6_destroy_sock(struct sock *sk)
1da177e4 1755{
1da177e4 1756 tcp_v4_destroy_sock(sk);
7d06b2e0 1757 inet6_destroy_sock(sk);
1da177e4
LT
1758}
1759
952a10be 1760#ifdef CONFIG_PROC_FS
1da177e4 1761/* Proc filesystem TCPv6 sock list dumping. */
1ab1457c 1762static void get_openreq6(struct seq_file *seq,
aa3a0c8c 1763 const struct request_sock *req, int i)
1da177e4 1764{
fa76ce73 1765 long ttd = req->rsk_timer.expires - jiffies;
634fb979
ED
1766 const struct in6_addr *src = &inet_rsk(req)->ir_v6_loc_addr;
1767 const struct in6_addr *dest = &inet_rsk(req)->ir_v6_rmt_addr;
1da177e4
LT
1768
1769 if (ttd < 0)
1770 ttd = 0;
1771
1da177e4
LT
1772 seq_printf(seq,
1773 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
d14c5ab6 1774 "%02X %08X:%08X %02X:%08lX %08X %5u %8d %d %d %pK\n",
1da177e4
LT
1775 i,
1776 src->s6_addr32[0], src->s6_addr32[1],
1777 src->s6_addr32[2], src->s6_addr32[3],
b44084c2 1778 inet_rsk(req)->ir_num,
1da177e4
LT
1779 dest->s6_addr32[0], dest->s6_addr32[1],
1780 dest->s6_addr32[2], dest->s6_addr32[3],
634fb979 1781 ntohs(inet_rsk(req)->ir_rmt_port),
1da177e4 1782 TCP_SYN_RECV,
4c99aa40 1783 0, 0, /* could print option size, but that is af dependent. */
1ab1457c
YH
1784 1, /* timers active (only the expire timer) */
1785 jiffies_to_clock_t(ttd),
e6c022a4 1786 req->num_timeout,
aa3a0c8c
ED
1787 from_kuid_munged(seq_user_ns(seq),
1788 sock_i_uid(req->rsk_listener)),
1ab1457c 1789 0, /* non standard timer */
1da177e4
LT
1790 0, /* open_requests have no inode */
1791 0, req);
1792}
1793
1794static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
1795{
b71d1d42 1796 const struct in6_addr *dest, *src;
1da177e4
LT
1797 __u16 destp, srcp;
1798 int timer_active;
1799 unsigned long timer_expires;
cf533ea5
ED
1800 const struct inet_sock *inet = inet_sk(sp);
1801 const struct tcp_sock *tp = tcp_sk(sp);
463c84b9 1802 const struct inet_connection_sock *icsk = inet_csk(sp);
0536fcc0 1803 const struct fastopen_queue *fastopenq = &icsk->icsk_accept_queue.fastopenq;
00fd38d9
ED
1804 int rx_queue;
1805 int state;
1da177e4 1806
efe4208f
ED
1807 dest = &sp->sk_v6_daddr;
1808 src = &sp->sk_v6_rcv_saddr;
c720c7e8
ED
1809 destp = ntohs(inet->inet_dport);
1810 srcp = ntohs(inet->inet_sport);
463c84b9 1811
ce3cf4ec 1812 if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
57dde7f7 1813 icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT ||
ce3cf4ec 1814 icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
1da177e4 1815 timer_active = 1;
463c84b9
ACM
1816 timer_expires = icsk->icsk_timeout;
1817 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
1da177e4 1818 timer_active = 4;
463c84b9 1819 timer_expires = icsk->icsk_timeout;
1da177e4
LT
1820 } else if (timer_pending(&sp->sk_timer)) {
1821 timer_active = 2;
1822 timer_expires = sp->sk_timer.expires;
1823 } else {
1824 timer_active = 0;
1825 timer_expires = jiffies;
1826 }
1827
986ffdfd 1828 state = inet_sk_state_load(sp);
00fd38d9
ED
1829 if (state == TCP_LISTEN)
1830 rx_queue = sp->sk_ack_backlog;
1831 else
1832 /* Because we don't lock the socket,
1833 * we might find a transient negative value.
1834 */
1835 rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
1836
1da177e4
LT
1837 seq_printf(seq,
1838 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
d14c5ab6 1839 "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %lu %lu %u %u %d\n",
1da177e4
LT
1840 i,
1841 src->s6_addr32[0], src->s6_addr32[1],
1842 src->s6_addr32[2], src->s6_addr32[3], srcp,
1843 dest->s6_addr32[0], dest->s6_addr32[1],
1844 dest->s6_addr32[2], dest->s6_addr32[3], destp,
00fd38d9
ED
1845 state,
1846 tp->write_seq - tp->snd_una,
1847 rx_queue,
1da177e4 1848 timer_active,
a399a805 1849 jiffies_delta_to_clock_t(timer_expires - jiffies),
463c84b9 1850 icsk->icsk_retransmits,
a7cb5a49 1851 from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
6687e988 1852 icsk->icsk_probes_out,
1da177e4 1853 sock_i_ino(sp),
41c6d650 1854 refcount_read(&sp->sk_refcnt), sp,
7be87351
SH
1855 jiffies_to_clock_t(icsk->icsk_rto),
1856 jiffies_to_clock_t(icsk->icsk_ack.ato),
4c99aa40 1857 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
0b6a05c1 1858 tp->snd_cwnd,
00fd38d9 1859 state == TCP_LISTEN ?
0536fcc0 1860 fastopenq->max_qlen :
0a672f74 1861 (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh)
1da177e4
LT
1862 );
1863}
1864
1ab1457c 1865static void get_timewait6_sock(struct seq_file *seq,
8feaf0c0 1866 struct inet_timewait_sock *tw, int i)
1da177e4 1867{
789f558c 1868 long delta = tw->tw_timer.expires - jiffies;
b71d1d42 1869 const struct in6_addr *dest, *src;
1da177e4 1870 __u16 destp, srcp;
1da177e4 1871
efe4208f
ED
1872 dest = &tw->tw_v6_daddr;
1873 src = &tw->tw_v6_rcv_saddr;
1da177e4
LT
1874 destp = ntohs(tw->tw_dport);
1875 srcp = ntohs(tw->tw_sport);
1876
1877 seq_printf(seq,
1878 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
71338aa7 1879 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
1da177e4
LT
1880 i,
1881 src->s6_addr32[0], src->s6_addr32[1],
1882 src->s6_addr32[2], src->s6_addr32[3], srcp,
1883 dest->s6_addr32[0], dest->s6_addr32[1],
1884 dest->s6_addr32[2], dest->s6_addr32[3], destp,
1885 tw->tw_substate, 0, 0,
a399a805 1886 3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
41c6d650 1887 refcount_read(&tw->tw_refcnt), tw);
1da177e4
LT
1888}
1889
1da177e4
LT
1890static int tcp6_seq_show(struct seq_file *seq, void *v)
1891{
1892 struct tcp_iter_state *st;
05dbc7b5 1893 struct sock *sk = v;
1da177e4
LT
1894
1895 if (v == SEQ_START_TOKEN) {
1896 seq_puts(seq,
1897 " sl "
1898 "local_address "
1899 "remote_address "
1900 "st tx_queue rx_queue tr tm->when retrnsmt"
1901 " uid timeout inode\n");
1902 goto out;
1903 }
1904 st = seq->private;
1905
079096f1
ED
1906 if (sk->sk_state == TCP_TIME_WAIT)
1907 get_timewait6_sock(seq, v, st->num);
1908 else if (sk->sk_state == TCP_NEW_SYN_RECV)
aa3a0c8c 1909 get_openreq6(seq, v, st->num);
079096f1
ED
1910 else
1911 get_tcp6_sock(seq, v, st->num);
1da177e4
LT
1912out:
1913 return 0;
1914}
1915
37d849bb
CH
1916static const struct seq_operations tcp6_seq_ops = {
1917 .show = tcp6_seq_show,
1918 .start = tcp_seq_start,
1919 .next = tcp_seq_next,
1920 .stop = tcp_seq_stop,
1921};
1922
1da177e4 1923static struct tcp_seq_afinfo tcp6_seq_afinfo = {
1da177e4 1924 .family = AF_INET6,
1da177e4
LT
1925};
1926
2c8c1e72 1927int __net_init tcp6_proc_init(struct net *net)
1da177e4 1928{
c3506372
CH
1929 if (!proc_create_net_data("tcp6", 0444, net->proc_net, &tcp6_seq_ops,
1930 sizeof(struct tcp_iter_state), &tcp6_seq_afinfo))
37d849bb
CH
1931 return -ENOMEM;
1932 return 0;
1da177e4
LT
1933}
1934
6f8b13bc 1935void tcp6_proc_exit(struct net *net)
1da177e4 1936{
37d849bb 1937 remove_proc_entry("tcp6", net->proc_net);
1da177e4
LT
1938}
1939#endif
1940
1941struct proto tcpv6_prot = {
1942 .name = "TCPv6",
1943 .owner = THIS_MODULE,
1944 .close = tcp_close,
d74bad4e 1945 .pre_connect = tcp_v6_pre_connect,
1da177e4
LT
1946 .connect = tcp_v6_connect,
1947 .disconnect = tcp_disconnect,
463c84b9 1948 .accept = inet_csk_accept,
1da177e4
LT
1949 .ioctl = tcp_ioctl,
1950 .init = tcp_v6_init_sock,
1951 .destroy = tcp_v6_destroy_sock,
1952 .shutdown = tcp_shutdown,
1953 .setsockopt = tcp_setsockopt,
1954 .getsockopt = tcp_getsockopt,
4b9d07a4 1955 .keepalive = tcp_set_keepalive,
1da177e4 1956 .recvmsg = tcp_recvmsg,
7ba42910
CG
1957 .sendmsg = tcp_sendmsg,
1958 .sendpage = tcp_sendpage,
1da177e4 1959 .backlog_rcv = tcp_v6_do_rcv,
46d3ceab 1960 .release_cb = tcp_release_cb,
496611d7 1961 .hash = inet6_hash,
ab1e0a13
ACM
1962 .unhash = inet_unhash,
1963 .get_port = inet_csk_get_port,
1da177e4 1964 .enter_memory_pressure = tcp_enter_memory_pressure,
06044751 1965 .leave_memory_pressure = tcp_leave_memory_pressure,
c9bee3b7 1966 .stream_memory_free = tcp_stream_memory_free,
1da177e4
LT
1967 .sockets_allocated = &tcp_sockets_allocated,
1968 .memory_allocated = &tcp_memory_allocated,
1969 .memory_pressure = &tcp_memory_pressure,
0a5578cf 1970 .orphan_count = &tcp_orphan_count,
a4fe34bf 1971 .sysctl_mem = sysctl_tcp_mem,
356d1833
ED
1972 .sysctl_wmem_offset = offsetof(struct net, ipv4.sysctl_tcp_wmem),
1973 .sysctl_rmem_offset = offsetof(struct net, ipv4.sysctl_tcp_rmem),
1da177e4
LT
1974 .max_header = MAX_TCP_HEADER,
1975 .obj_size = sizeof(struct tcp6_sock),
5f0d5a3a 1976 .slab_flags = SLAB_TYPESAFE_BY_RCU,
6d6ee43e 1977 .twsk_prot = &tcp6_timewait_sock_ops,
60236fdd 1978 .rsk_prot = &tcp6_request_sock_ops,
39d8cda7 1979 .h.hashinfo = &tcp_hashinfo,
7ba42910 1980 .no_autobind = true,
543d9cfe
ACM
1981#ifdef CONFIG_COMPAT
1982 .compat_setsockopt = compat_tcp_setsockopt,
1983 .compat_getsockopt = compat_tcp_getsockopt,
d1a4c0b3 1984#endif
c1e64e29 1985 .diag_destroy = tcp_abort,
1da177e4
LT
1986};
1987
a8e3bb34
DA
1988/* thinking of making this const? Don't.
1989 * early_demux can change based on sysctl.
1990 */
39294c3d 1991static struct inet6_protocol tcpv6_protocol = {
c7109986 1992 .early_demux = tcp_v6_early_demux,
dddb64bc 1993 .early_demux_handler = tcp_v6_early_demux,
1da177e4
LT
1994 .handler = tcp_v6_rcv,
1995 .err_handler = tcp_v6_err,
1996 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
1997};
1998
1da177e4
LT
1999static struct inet_protosw tcpv6_protosw = {
2000 .type = SOCK_STREAM,
2001 .protocol = IPPROTO_TCP,
2002 .prot = &tcpv6_prot,
2003 .ops = &inet6_stream_ops,
d83d8461
ACM
2004 .flags = INET_PROTOSW_PERMANENT |
2005 INET_PROTOSW_ICSK,
1da177e4
LT
2006};
2007
2c8c1e72 2008static int __net_init tcpv6_net_init(struct net *net)
93ec926b 2009{
5677242f
DL
2010 return inet_ctl_sock_create(&net->ipv6.tcp_sk, PF_INET6,
2011 SOCK_RAW, IPPROTO_TCP, net);
93ec926b
DL
2012}
2013
2c8c1e72 2014static void __net_exit tcpv6_net_exit(struct net *net)
93ec926b 2015{
5677242f 2016 inet_ctl_sock_destroy(net->ipv6.tcp_sk);
b099ce26
EB
2017}
2018
2c8c1e72 2019static void __net_exit tcpv6_net_exit_batch(struct list_head *net_exit_list)
b099ce26 2020{
1946e672 2021 inet_twsk_purge(&tcp_hashinfo, AF_INET6);
93ec926b
DL
2022}
2023
2024static struct pernet_operations tcpv6_net_ops = {
b099ce26
EB
2025 .init = tcpv6_net_init,
2026 .exit = tcpv6_net_exit,
2027 .exit_batch = tcpv6_net_exit_batch,
93ec926b
DL
2028};
2029
7f4e4868 2030int __init tcpv6_init(void)
1da177e4 2031{
7f4e4868
DL
2032 int ret;
2033
3336288a
VY
2034 ret = inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP);
2035 if (ret)
c6b641a4 2036 goto out;
3336288a 2037
1da177e4 2038 /* register inet6 protocol */
7f4e4868
DL
2039 ret = inet6_register_protosw(&tcpv6_protosw);
2040 if (ret)
2041 goto out_tcpv6_protocol;
2042
93ec926b 2043 ret = register_pernet_subsys(&tcpv6_net_ops);
7f4e4868
DL
2044 if (ret)
2045 goto out_tcpv6_protosw;
2046out:
2047 return ret;
ae0f7d5f 2048
7f4e4868
DL
2049out_tcpv6_protosw:
2050 inet6_unregister_protosw(&tcpv6_protosw);
3336288a
VY
2051out_tcpv6_protocol:
2052 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
7f4e4868
DL
2053 goto out;
2054}
2055
09f7709f 2056void tcpv6_exit(void)
7f4e4868 2057{
93ec926b 2058 unregister_pernet_subsys(&tcpv6_net_ops);
7f4e4868
DL
2059 inet6_unregister_protosw(&tcpv6_protosw);
2060 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
1da177e4 2061}