]> git.ipfire.org Git - people/ms/linux.git/blame - net/ipv4/tcp_ipv4.c
tcp: md5: add more const attributes
[people/ms/linux.git] / net / ipv4 / tcp_ipv4.c
CommitLineData
1da177e4
LT
1/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Implementation of the Transmission Control Protocol(TCP).
7 *
1da177e4
LT
8 * IPv4 specific functions
9 *
10 *
11 * code split from:
12 * linux/ipv4/tcp.c
13 * linux/ipv4/tcp_input.c
14 * linux/ipv4/tcp_output.c
15 *
16 * See tcp.c for author information
17 *
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License
20 * as published by the Free Software Foundation; either version
21 * 2 of the License, or (at your option) any later version.
22 */
23
24/*
25 * Changes:
26 * David S. Miller : New socket lookup architecture.
27 * This code is dedicated to John Dyson.
28 * David S. Miller : Change semantics of established hash,
29 * half is devoted to TIME_WAIT sockets
30 * and the rest go in the other half.
31 * Andi Kleen : Add support for syncookies and fixed
32 * some bugs: ip options weren't passed to
33 * the TCP layer, missed a check for an
34 * ACK bit.
35 * Andi Kleen : Implemented fast path mtu discovery.
36 * Fixed many serious bugs in the
60236fdd 37 * request_sock handling and moved
1da177e4
LT
38 * most of it into the af independent code.
39 * Added tail drop and some other bugfixes.
caa20d9a 40 * Added new listen semantics.
1da177e4
LT
41 * Mike McLagan : Routing by source
42 * Juan Jose Ciarlante: ip_dynaddr bits
43 * Andi Kleen: various fixes.
44 * Vitaly E. Lavrov : Transparent proxy revived after year
45 * coma.
46 * Andi Kleen : Fix new listen.
47 * Andi Kleen : Fix accept error reporting.
48 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
49 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
50 * a single port at the same time.
51 */
52
1da177e4 53
eb4dea58 54#include <linux/bottom_half.h>
1da177e4
LT
55#include <linux/types.h>
56#include <linux/fcntl.h>
57#include <linux/module.h>
58#include <linux/random.h>
59#include <linux/cache.h>
60#include <linux/jhash.h>
61#include <linux/init.h>
62#include <linux/times.h>
5a0e3ad6 63#include <linux/slab.h>
1da177e4 64
457c4cbc 65#include <net/net_namespace.h>
1da177e4 66#include <net/icmp.h>
304a1618 67#include <net/inet_hashtables.h>
1da177e4 68#include <net/tcp.h>
20380731 69#include <net/transp_v6.h>
1da177e4
LT
70#include <net/ipv6.h>
71#include <net/inet_common.h>
6d6ee43e 72#include <net/timewait_sock.h>
1da177e4 73#include <net/xfrm.h>
1a2449a8 74#include <net/netdma.h>
6e5714ea 75#include <net/secure_seq.h>
1da177e4
LT
76
77#include <linux/inet.h>
78#include <linux/ipv6.h>
79#include <linux/stddef.h>
80#include <linux/proc_fs.h>
81#include <linux/seq_file.h>
82
cfb6eeb4
YH
83#include <linux/crypto.h>
84#include <linux/scatterlist.h>
85
ab32ea5d
BH
86int sysctl_tcp_tw_reuse __read_mostly;
87int sysctl_tcp_low_latency __read_mostly;
4bc2f18b 88EXPORT_SYMBOL(sysctl_tcp_low_latency);
1da177e4 89
1da177e4 90
cfb6eeb4 91#ifdef CONFIG_TCP_MD5SIG
7174259e
ACM
92static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk,
93 __be32 addr);
49a72dfb 94static int tcp_v4_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key,
318cf7aa 95 __be32 daddr, __be32 saddr, const struct tcphdr *th);
9501f972
YH
96#else
97static inline
98struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk, __be32 addr)
99{
100 return NULL;
101}
cfb6eeb4
YH
102#endif
103
5caea4ea 104struct inet_hashinfo tcp_hashinfo;
4bc2f18b 105EXPORT_SYMBOL(tcp_hashinfo);
1da177e4 106
cf533ea5 107static inline __u32 tcp_v4_init_sequence(const struct sk_buff *skb)
1da177e4 108{
eddc9ec5
ACM
109 return secure_tcp_sequence_number(ip_hdr(skb)->daddr,
110 ip_hdr(skb)->saddr,
aa8223c7
ACM
111 tcp_hdr(skb)->dest,
112 tcp_hdr(skb)->source);
1da177e4
LT
113}
114
6d6ee43e
ACM
115int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
116{
117 const struct tcp_timewait_sock *tcptw = tcp_twsk(sktw);
118 struct tcp_sock *tp = tcp_sk(sk);
119
120 /* With PAWS, it is safe from the viewpoint
121 of data integrity. Even without PAWS it is safe provided sequence
122 spaces do not overlap i.e. at data rates <= 80Mbit/sec.
123
124 Actually, the idea is close to VJ's one, only timestamp cache is
125 held not per host, but per port pair and TW bucket is used as state
126 holder.
127
128 If TW bucket has been already destroyed we fall back to VJ's scheme
129 and use initial timestamp retrieved from peer table.
130 */
131 if (tcptw->tw_ts_recent_stamp &&
132 (twp == NULL || (sysctl_tcp_tw_reuse &&
9d729f72 133 get_seconds() - tcptw->tw_ts_recent_stamp > 1))) {
6d6ee43e
ACM
134 tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2;
135 if (tp->write_seq == 0)
136 tp->write_seq = 1;
137 tp->rx_opt.ts_recent = tcptw->tw_ts_recent;
138 tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
139 sock_hold(sktw);
140 return 1;
141 }
142
143 return 0;
144}
6d6ee43e
ACM
145EXPORT_SYMBOL_GPL(tcp_twsk_unique);
146
1da177e4
LT
147/* This will initiate an outgoing connection. */
148int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
149{
2d7192d6 150 struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
1da177e4
LT
151 struct inet_sock *inet = inet_sk(sk);
152 struct tcp_sock *tp = tcp_sk(sk);
dca8b089 153 __be16 orig_sport, orig_dport;
bada8adc 154 __be32 daddr, nexthop;
da905bd1 155 struct flowi4 *fl4;
2d7192d6 156 struct rtable *rt;
1da177e4 157 int err;
f6d8bd05 158 struct ip_options_rcu *inet_opt;
1da177e4
LT
159
160 if (addr_len < sizeof(struct sockaddr_in))
161 return -EINVAL;
162
163 if (usin->sin_family != AF_INET)
164 return -EAFNOSUPPORT;
165
166 nexthop = daddr = usin->sin_addr.s_addr;
f6d8bd05
ED
167 inet_opt = rcu_dereference_protected(inet->inet_opt,
168 sock_owned_by_user(sk));
169 if (inet_opt && inet_opt->opt.srr) {
1da177e4
LT
170 if (!daddr)
171 return -EINVAL;
f6d8bd05 172 nexthop = inet_opt->opt.faddr;
1da177e4
LT
173 }
174
dca8b089
DM
175 orig_sport = inet->inet_sport;
176 orig_dport = usin->sin_port;
da905bd1
DM
177 fl4 = &inet->cork.fl.u.ip4;
178 rt = ip_route_connect(fl4, nexthop, inet->inet_saddr,
b23dd4fe
DM
179 RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
180 IPPROTO_TCP,
181 orig_sport, orig_dport, sk, true);
182 if (IS_ERR(rt)) {
183 err = PTR_ERR(rt);
184 if (err == -ENETUNREACH)
7c73a6fa 185 IP_INC_STATS_BH(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
b23dd4fe 186 return err;
584bdf8c 187 }
1da177e4
LT
188
189 if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
190 ip_rt_put(rt);
191 return -ENETUNREACH;
192 }
193
f6d8bd05 194 if (!inet_opt || !inet_opt->opt.srr)
da905bd1 195 daddr = fl4->daddr;
1da177e4 196
c720c7e8 197 if (!inet->inet_saddr)
da905bd1 198 inet->inet_saddr = fl4->saddr;
c720c7e8 199 inet->inet_rcv_saddr = inet->inet_saddr;
1da177e4 200
c720c7e8 201 if (tp->rx_opt.ts_recent_stamp && inet->inet_daddr != daddr) {
1da177e4
LT
202 /* Reset inherited state */
203 tp->rx_opt.ts_recent = 0;
204 tp->rx_opt.ts_recent_stamp = 0;
205 tp->write_seq = 0;
206 }
207
295ff7ed 208 if (tcp_death_row.sysctl_tw_recycle &&
da905bd1 209 !tp->rx_opt.ts_recent_stamp && fl4->daddr == daddr) {
ed2361e6 210 struct inet_peer *peer = rt_get_peer(rt, fl4->daddr);
7174259e
ACM
211 /*
212 * VJ's idea. We save last timestamp seen from
213 * the destination in peer table, when entering state
214 * TIME-WAIT * and initialize rx_opt.ts_recent from it,
215 * when trying new connection.
1da177e4 216 */
317fe0e6
ED
217 if (peer) {
218 inet_peer_refcheck(peer);
219 if ((u32)get_seconds() - peer->tcp_ts_stamp <= TCP_PAWS_MSL) {
220 tp->rx_opt.ts_recent_stamp = peer->tcp_ts_stamp;
221 tp->rx_opt.ts_recent = peer->tcp_ts;
222 }
1da177e4
LT
223 }
224 }
225
c720c7e8
ED
226 inet->inet_dport = usin->sin_port;
227 inet->inet_daddr = daddr;
1da177e4 228
d83d8461 229 inet_csk(sk)->icsk_ext_hdr_len = 0;
f6d8bd05
ED
230 if (inet_opt)
231 inet_csk(sk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
1da177e4 232
bee7ca9e 233 tp->rx_opt.mss_clamp = TCP_MSS_DEFAULT;
1da177e4
LT
234
235 /* Socket identity is still unknown (sport may be zero).
236 * However we set state to SYN-SENT and not releasing socket
237 * lock select source port, enter ourselves into the hash tables and
238 * complete initialization after this.
239 */
240 tcp_set_state(sk, TCP_SYN_SENT);
a7f5e7f1 241 err = inet_hash_connect(&tcp_death_row, sk);
1da177e4
LT
242 if (err)
243 goto failure;
244
da905bd1 245 rt = ip_route_newports(fl4, rt, orig_sport, orig_dport,
b23dd4fe
DM
246 inet->inet_sport, inet->inet_dport, sk);
247 if (IS_ERR(rt)) {
248 err = PTR_ERR(rt);
249 rt = NULL;
1da177e4 250 goto failure;
b23dd4fe 251 }
1da177e4 252 /* OK, now commit destination to socket. */
bcd76111 253 sk->sk_gso_type = SKB_GSO_TCPV4;
d8d1f30b 254 sk_setup_caps(sk, &rt->dst);
1da177e4
LT
255
256 if (!tp->write_seq)
c720c7e8
ED
257 tp->write_seq = secure_tcp_sequence_number(inet->inet_saddr,
258 inet->inet_daddr,
259 inet->inet_sport,
1da177e4
LT
260 usin->sin_port);
261
c720c7e8 262 inet->inet_id = tp->write_seq ^ jiffies;
1da177e4
LT
263
264 err = tcp_connect(sk);
265 rt = NULL;
266 if (err)
267 goto failure;
268
269 return 0;
270
271failure:
7174259e
ACM
272 /*
273 * This unhashes the socket and releases the local port,
274 * if necessary.
275 */
1da177e4
LT
276 tcp_set_state(sk, TCP_CLOSE);
277 ip_rt_put(rt);
278 sk->sk_route_caps = 0;
c720c7e8 279 inet->inet_dport = 0;
1da177e4
LT
280 return err;
281}
4bc2f18b 282EXPORT_SYMBOL(tcp_v4_connect);
1da177e4 283
1da177e4
LT
284/*
285 * This routine does path mtu discovery as defined in RFC1191.
286 */
b71d1d42 287static void do_pmtu_discovery(struct sock *sk, const struct iphdr *iph, u32 mtu)
1da177e4
LT
288{
289 struct dst_entry *dst;
290 struct inet_sock *inet = inet_sk(sk);
1da177e4
LT
291
292 /* We are not interested in TCP_LISTEN and open_requests (SYN-ACKs
293 * send out by Linux are always <576bytes so they should go through
294 * unfragmented).
295 */
296 if (sk->sk_state == TCP_LISTEN)
297 return;
298
299 /* We don't check in the destentry if pmtu discovery is forbidden
300 * on this route. We just assume that no packet_to_big packets
301 * are send back when pmtu discovery is not active.
e905a9ed 302 * There is a small race when the user changes this flag in the
1da177e4
LT
303 * route, but I think that's acceptable.
304 */
305 if ((dst = __sk_dst_check(sk, 0)) == NULL)
306 return;
307
308 dst->ops->update_pmtu(dst, mtu);
309
310 /* Something is about to be wrong... Remember soft error
311 * for the case, if this connection will not able to recover.
312 */
313 if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst))
314 sk->sk_err_soft = EMSGSIZE;
315
316 mtu = dst_mtu(dst);
317
318 if (inet->pmtudisc != IP_PMTUDISC_DONT &&
d83d8461 319 inet_csk(sk)->icsk_pmtu_cookie > mtu) {
1da177e4
LT
320 tcp_sync_mss(sk, mtu);
321
322 /* Resend the TCP packet because it's
323 * clear that the old packet has been
324 * dropped. This is the new "fast" path mtu
325 * discovery.
326 */
327 tcp_simple_retransmit(sk);
328 } /* else let the usual retransmit timer handle it */
329}
330
331/*
332 * This routine is called by the ICMP module when it gets some
333 * sort of error condition. If err < 0 then the socket should
334 * be closed and the error returned to the user. If err > 0
335 * it's just the icmp type << 8 | icmp code. After adjustment
336 * header points to the first 8 bytes of the tcp header. We need
337 * to find the appropriate port.
338 *
339 * The locking strategy used here is very "optimistic". When
340 * someone else accesses the socket the ICMP is just dropped
341 * and for some paths there is no check at all.
342 * A more general error queue to queue errors for later handling
343 * is probably better.
344 *
345 */
346
4d1a2d9e 347void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
1da177e4 348{
b71d1d42 349 const struct iphdr *iph = (const struct iphdr *)icmp_skb->data;
4d1a2d9e 350 struct tcphdr *th = (struct tcphdr *)(icmp_skb->data + (iph->ihl << 2));
f1ecd5d9 351 struct inet_connection_sock *icsk;
1da177e4
LT
352 struct tcp_sock *tp;
353 struct inet_sock *inet;
4d1a2d9e
DL
354 const int type = icmp_hdr(icmp_skb)->type;
355 const int code = icmp_hdr(icmp_skb)->code;
1da177e4 356 struct sock *sk;
f1ecd5d9 357 struct sk_buff *skb;
1da177e4 358 __u32 seq;
f1ecd5d9 359 __u32 remaining;
1da177e4 360 int err;
4d1a2d9e 361 struct net *net = dev_net(icmp_skb->dev);
1da177e4 362
4d1a2d9e 363 if (icmp_skb->len < (iph->ihl << 2) + 8) {
dcfc23ca 364 ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
1da177e4
LT
365 return;
366 }
367
fd54d716 368 sk = inet_lookup(net, &tcp_hashinfo, iph->daddr, th->dest,
4d1a2d9e 369 iph->saddr, th->source, inet_iif(icmp_skb));
1da177e4 370 if (!sk) {
dcfc23ca 371 ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
1da177e4
LT
372 return;
373 }
374 if (sk->sk_state == TCP_TIME_WAIT) {
9469c7b4 375 inet_twsk_put(inet_twsk(sk));
1da177e4
LT
376 return;
377 }
378
379 bh_lock_sock(sk);
380 /* If too many ICMPs get dropped on busy
381 * servers this needs to be solved differently.
382 */
383 if (sock_owned_by_user(sk))
de0744af 384 NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
1da177e4
LT
385
386 if (sk->sk_state == TCP_CLOSE)
387 goto out;
388
97e3ecd1 389 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
390 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
391 goto out;
392 }
393
f1ecd5d9 394 icsk = inet_csk(sk);
1da177e4
LT
395 tp = tcp_sk(sk);
396 seq = ntohl(th->seq);
397 if (sk->sk_state != TCP_LISTEN &&
398 !between(seq, tp->snd_una, tp->snd_nxt)) {
de0744af 399 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
1da177e4
LT
400 goto out;
401 }
402
403 switch (type) {
404 case ICMP_SOURCE_QUENCH:
405 /* Just silently ignore these. */
406 goto out;
407 case ICMP_PARAMETERPROB:
408 err = EPROTO;
409 break;
410 case ICMP_DEST_UNREACH:
411 if (code > NR_ICMP_UNREACH)
412 goto out;
413
414 if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
415 if (!sock_owned_by_user(sk))
416 do_pmtu_discovery(sk, iph, info);
417 goto out;
418 }
419
420 err = icmp_err_convert[code].errno;
f1ecd5d9
DL
421 /* check if icmp_skb allows revert of backoff
422 * (see draft-zimmermann-tcp-lcd) */
423 if (code != ICMP_NET_UNREACH && code != ICMP_HOST_UNREACH)
424 break;
425 if (seq != tp->snd_una || !icsk->icsk_retransmits ||
426 !icsk->icsk_backoff)
427 break;
428
8f49c270
DM
429 if (sock_owned_by_user(sk))
430 break;
431
f1ecd5d9 432 icsk->icsk_backoff--;
9ad7c049
JC
433 inet_csk(sk)->icsk_rto = (tp->srtt ? __tcp_set_rto(tp) :
434 TCP_TIMEOUT_INIT) << icsk->icsk_backoff;
f1ecd5d9
DL
435 tcp_bound_rto(sk);
436
437 skb = tcp_write_queue_head(sk);
438 BUG_ON(!skb);
439
440 remaining = icsk->icsk_rto - min(icsk->icsk_rto,
441 tcp_time_stamp - TCP_SKB_CB(skb)->when);
442
443 if (remaining) {
444 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
445 remaining, TCP_RTO_MAX);
f1ecd5d9
DL
446 } else {
447 /* RTO revert clocked out retransmission.
448 * Will retransmit now */
449 tcp_retransmit_timer(sk);
450 }
451
1da177e4
LT
452 break;
453 case ICMP_TIME_EXCEEDED:
454 err = EHOSTUNREACH;
455 break;
456 default:
457 goto out;
458 }
459
460 switch (sk->sk_state) {
60236fdd 461 struct request_sock *req, **prev;
1da177e4
LT
462 case TCP_LISTEN:
463 if (sock_owned_by_user(sk))
464 goto out;
465
463c84b9
ACM
466 req = inet_csk_search_req(sk, &prev, th->dest,
467 iph->daddr, iph->saddr);
1da177e4
LT
468 if (!req)
469 goto out;
470
471 /* ICMPs are not backlogged, hence we cannot get
472 an established socket here.
473 */
547b792c 474 WARN_ON(req->sk);
1da177e4 475
2e6599cb 476 if (seq != tcp_rsk(req)->snt_isn) {
de0744af 477 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
1da177e4
LT
478 goto out;
479 }
480
481 /*
482 * Still in SYN_RECV, just remove it silently.
483 * There is no good way to pass the error to the newly
484 * created socket, and POSIX does not want network
485 * errors returned from accept().
486 */
463c84b9 487 inet_csk_reqsk_queue_drop(sk, req, prev);
1da177e4
LT
488 goto out;
489
490 case TCP_SYN_SENT:
491 case TCP_SYN_RECV: /* Cannot happen.
492 It can f.e. if SYNs crossed.
493 */
494 if (!sock_owned_by_user(sk)) {
1da177e4
LT
495 sk->sk_err = err;
496
497 sk->sk_error_report(sk);
498
499 tcp_done(sk);
500 } else {
501 sk->sk_err_soft = err;
502 }
503 goto out;
504 }
505
506 /* If we've already connected we will keep trying
507 * until we time out, or the user gives up.
508 *
509 * rfc1122 4.2.3.9 allows to consider as hard errors
510 * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
511 * but it is obsoleted by pmtu discovery).
512 *
513 * Note, that in modern internet, where routing is unreliable
514 * and in each dark corner broken firewalls sit, sending random
515 * errors ordered by their masters even this two messages finally lose
516 * their original sense (even Linux sends invalid PORT_UNREACHs)
517 *
518 * Now we are in compliance with RFCs.
519 * --ANK (980905)
520 */
521
522 inet = inet_sk(sk);
523 if (!sock_owned_by_user(sk) && inet->recverr) {
524 sk->sk_err = err;
525 sk->sk_error_report(sk);
526 } else { /* Only an error on timeout */
527 sk->sk_err_soft = err;
528 }
529
530out:
531 bh_unlock_sock(sk);
532 sock_put(sk);
533}
534
419f9f89
HX
535static void __tcp_v4_send_check(struct sk_buff *skb,
536 __be32 saddr, __be32 daddr)
1da177e4 537{
aa8223c7 538 struct tcphdr *th = tcp_hdr(skb);
1da177e4 539
84fa7933 540 if (skb->ip_summed == CHECKSUM_PARTIAL) {
419f9f89 541 th->check = ~tcp_v4_check(skb->len, saddr, daddr, 0);
663ead3b 542 skb->csum_start = skb_transport_header(skb) - skb->head;
ff1dcadb 543 skb->csum_offset = offsetof(struct tcphdr, check);
1da177e4 544 } else {
419f9f89 545 th->check = tcp_v4_check(skb->len, saddr, daddr,
07f0757a 546 csum_partial(th,
1da177e4
LT
547 th->doff << 2,
548 skb->csum));
549 }
550}
551
419f9f89 552/* This routine computes an IPv4 TCP checksum. */
bb296246 553void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb)
419f9f89 554{
cf533ea5 555 const struct inet_sock *inet = inet_sk(sk);
419f9f89
HX
556
557 __tcp_v4_send_check(skb, inet->inet_saddr, inet->inet_daddr);
558}
4bc2f18b 559EXPORT_SYMBOL(tcp_v4_send_check);
419f9f89 560
a430a43d
HX
561int tcp_v4_gso_send_check(struct sk_buff *skb)
562{
eddc9ec5 563 const struct iphdr *iph;
a430a43d
HX
564 struct tcphdr *th;
565
566 if (!pskb_may_pull(skb, sizeof(*th)))
567 return -EINVAL;
568
eddc9ec5 569 iph = ip_hdr(skb);
aa8223c7 570 th = tcp_hdr(skb);
a430a43d
HX
571
572 th->check = 0;
84fa7933 573 skb->ip_summed = CHECKSUM_PARTIAL;
419f9f89 574 __tcp_v4_send_check(skb, iph->saddr, iph->daddr);
a430a43d
HX
575 return 0;
576}
577
1da177e4
LT
578/*
579 * This routine will send an RST to the other tcp.
580 *
581 * Someone asks: why I NEVER use socket parameters (TOS, TTL etc.)
582 * for reset.
583 * Answer: if a packet caused RST, it is not for a socket
584 * existing in our system, if it is matched to a socket,
585 * it is just duplicate segment or bug in other side's TCP.
586 * So that we build reply only basing on parameters
587 * arrived with segment.
588 * Exception: precedence violation. We do not implement it in any case.
589 */
590
cfb6eeb4 591static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
1da177e4 592{
cf533ea5 593 const struct tcphdr *th = tcp_hdr(skb);
cfb6eeb4
YH
594 struct {
595 struct tcphdr th;
596#ifdef CONFIG_TCP_MD5SIG
714e85be 597 __be32 opt[(TCPOLEN_MD5SIG_ALIGNED >> 2)];
cfb6eeb4
YH
598#endif
599 } rep;
1da177e4 600 struct ip_reply_arg arg;
cfb6eeb4
YH
601#ifdef CONFIG_TCP_MD5SIG
602 struct tcp_md5sig_key *key;
603#endif
a86b1e30 604 struct net *net;
1da177e4
LT
605
606 /* Never send a reset in response to a reset. */
607 if (th->rst)
608 return;
609
511c3f92 610 if (skb_rtable(skb)->rt_type != RTN_LOCAL)
1da177e4
LT
611 return;
612
613 /* Swap the send and the receive. */
cfb6eeb4
YH
614 memset(&rep, 0, sizeof(rep));
615 rep.th.dest = th->source;
616 rep.th.source = th->dest;
617 rep.th.doff = sizeof(struct tcphdr) / 4;
618 rep.th.rst = 1;
1da177e4
LT
619
620 if (th->ack) {
cfb6eeb4 621 rep.th.seq = th->ack_seq;
1da177e4 622 } else {
cfb6eeb4
YH
623 rep.th.ack = 1;
624 rep.th.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin +
625 skb->len - (th->doff << 2));
1da177e4
LT
626 }
627
7174259e 628 memset(&arg, 0, sizeof(arg));
cfb6eeb4
YH
629 arg.iov[0].iov_base = (unsigned char *)&rep;
630 arg.iov[0].iov_len = sizeof(rep.th);
631
632#ifdef CONFIG_TCP_MD5SIG
eddc9ec5 633 key = sk ? tcp_v4_md5_do_lookup(sk, ip_hdr(skb)->daddr) : NULL;
cfb6eeb4
YH
634 if (key) {
635 rep.opt[0] = htonl((TCPOPT_NOP << 24) |
636 (TCPOPT_NOP << 16) |
637 (TCPOPT_MD5SIG << 8) |
638 TCPOLEN_MD5SIG);
639 /* Update length and the length the header thinks exists */
640 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
641 rep.th.doff = arg.iov[0].iov_len / 4;
642
49a72dfb 643 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[1],
78e645cb
IJ
644 key, ip_hdr(skb)->saddr,
645 ip_hdr(skb)->daddr, &rep.th);
cfb6eeb4
YH
646 }
647#endif
eddc9ec5
ACM
648 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
649 ip_hdr(skb)->saddr, /* XXX */
52cd5750 650 arg.iov[0].iov_len, IPPROTO_TCP, 0);
1da177e4 651 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
88ef4a5a 652 arg.flags = (sk && inet_sk(sk)->transparent) ? IP_REPLY_ARG_NOSRCCHECK : 0;
1da177e4 653
adf30907 654 net = dev_net(skb_dst(skb)->dev);
0a5ebb80 655 ip_send_reply(net->ipv4.tcp_sock, skb, ip_hdr(skb)->saddr,
7feb49c8 656 &arg, arg.iov[0].iov_len);
1da177e4 657
63231bdd
PE
658 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
659 TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
1da177e4
LT
660}
661
662/* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
663 outside socket context is ugly, certainly. What can I do?
664 */
665
9501f972
YH
666static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
667 u32 win, u32 ts, int oif,
88ef4a5a
KK
668 struct tcp_md5sig_key *key,
669 int reply_flags)
1da177e4 670{
cf533ea5 671 const struct tcphdr *th = tcp_hdr(skb);
1da177e4
LT
672 struct {
673 struct tcphdr th;
714e85be 674 __be32 opt[(TCPOLEN_TSTAMP_ALIGNED >> 2)
cfb6eeb4 675#ifdef CONFIG_TCP_MD5SIG
714e85be 676 + (TCPOLEN_MD5SIG_ALIGNED >> 2)
cfb6eeb4
YH
677#endif
678 ];
1da177e4
LT
679 } rep;
680 struct ip_reply_arg arg;
adf30907 681 struct net *net = dev_net(skb_dst(skb)->dev);
1da177e4
LT
682
683 memset(&rep.th, 0, sizeof(struct tcphdr));
7174259e 684 memset(&arg, 0, sizeof(arg));
1da177e4
LT
685
686 arg.iov[0].iov_base = (unsigned char *)&rep;
687 arg.iov[0].iov_len = sizeof(rep.th);
688 if (ts) {
cfb6eeb4
YH
689 rep.opt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
690 (TCPOPT_TIMESTAMP << 8) |
691 TCPOLEN_TIMESTAMP);
692 rep.opt[1] = htonl(tcp_time_stamp);
693 rep.opt[2] = htonl(ts);
cb48cfe8 694 arg.iov[0].iov_len += TCPOLEN_TSTAMP_ALIGNED;
1da177e4
LT
695 }
696
697 /* Swap the send and the receive. */
698 rep.th.dest = th->source;
699 rep.th.source = th->dest;
700 rep.th.doff = arg.iov[0].iov_len / 4;
701 rep.th.seq = htonl(seq);
702 rep.th.ack_seq = htonl(ack);
703 rep.th.ack = 1;
704 rep.th.window = htons(win);
705
cfb6eeb4 706#ifdef CONFIG_TCP_MD5SIG
cfb6eeb4
YH
707 if (key) {
708 int offset = (ts) ? 3 : 0;
709
710 rep.opt[offset++] = htonl((TCPOPT_NOP << 24) |
711 (TCPOPT_NOP << 16) |
712 (TCPOPT_MD5SIG << 8) |
713 TCPOLEN_MD5SIG);
714 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
715 rep.th.doff = arg.iov[0].iov_len/4;
716
49a72dfb 717 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[offset],
90b7e112
AL
718 key, ip_hdr(skb)->saddr,
719 ip_hdr(skb)->daddr, &rep.th);
cfb6eeb4
YH
720 }
721#endif
88ef4a5a 722 arg.flags = reply_flags;
eddc9ec5
ACM
723 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
724 ip_hdr(skb)->saddr, /* XXX */
1da177e4
LT
725 arg.iov[0].iov_len, IPPROTO_TCP, 0);
726 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
9501f972
YH
727 if (oif)
728 arg.bound_dev_if = oif;
1da177e4 729
0a5ebb80 730 ip_send_reply(net->ipv4.tcp_sock, skb, ip_hdr(skb)->saddr,
7feb49c8 731 &arg, arg.iov[0].iov_len);
1da177e4 732
63231bdd 733 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
1da177e4
LT
734}
735
736static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
737{
8feaf0c0 738 struct inet_timewait_sock *tw = inet_twsk(sk);
cfb6eeb4 739 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
1da177e4 740
9501f972 741 tcp_v4_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
7174259e 742 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
9501f972
YH
743 tcptw->tw_ts_recent,
744 tw->tw_bound_dev_if,
88ef4a5a
KK
745 tcp_twsk_md5_key(tcptw),
746 tw->tw_transparent ? IP_REPLY_ARG_NOSRCCHECK : 0
9501f972 747 );
1da177e4 748
8feaf0c0 749 inet_twsk_put(tw);
1da177e4
LT
750}
751
6edafaaf 752static void tcp_v4_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
7174259e 753 struct request_sock *req)
1da177e4 754{
9501f972 755 tcp_v4_send_ack(skb, tcp_rsk(req)->snt_isn + 1,
cfb6eeb4 756 tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd,
9501f972
YH
757 req->ts_recent,
758 0,
88ef4a5a
KK
759 tcp_v4_md5_do_lookup(sk, ip_hdr(skb)->daddr),
760 inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0);
1da177e4
LT
761}
762
1da177e4 763/*
9bf1d83e 764 * Send a SYN-ACK after having received a SYN.
60236fdd 765 * This still operates on a request_sock only, not on a big
1da177e4
LT
766 * socket.
767 */
72659ecc
OP
768static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
769 struct request_sock *req,
770 struct request_values *rvp)
1da177e4 771{
2e6599cb 772 const struct inet_request_sock *ireq = inet_rsk(req);
6bd023f3 773 struct flowi4 fl4;
1da177e4
LT
774 int err = -1;
775 struct sk_buff * skb;
776
777 /* First, grab a route. */
6bd023f3 778 if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL)
fd80eb94 779 return -1;
1da177e4 780
e6b4d113 781 skb = tcp_make_synack(sk, dst, req, rvp);
1da177e4
LT
782
783 if (skb) {
419f9f89 784 __tcp_v4_send_check(skb, ireq->loc_addr, ireq->rmt_addr);
1da177e4 785
2e6599cb
ACM
786 err = ip_build_and_send_pkt(skb, sk, ireq->loc_addr,
787 ireq->rmt_addr,
788 ireq->opt);
b9df3cb8 789 err = net_xmit_eval(err);
1da177e4
LT
790 }
791
1da177e4
LT
792 dst_release(dst);
793 return err;
794}
795
72659ecc 796static int tcp_v4_rtx_synack(struct sock *sk, struct request_sock *req,
e6b4d113 797 struct request_values *rvp)
fd80eb94 798{
72659ecc
OP
799 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
800 return tcp_v4_send_synack(sk, NULL, req, rvp);
fd80eb94
DL
801}
802
1da177e4 803/*
60236fdd 804 * IPv4 request_sock destructor.
1da177e4 805 */
60236fdd 806static void tcp_v4_reqsk_destructor(struct request_sock *req)
1da177e4 807{
a51482bd 808 kfree(inet_rsk(req)->opt);
1da177e4
LT
809}
810
946cedcc
ED
811/*
812 * Return 1 if a syncookie should be sent
813 */
814int tcp_syn_flood_action(struct sock *sk,
815 const struct sk_buff *skb,
816 const char *proto)
1da177e4 817{
946cedcc
ED
818 const char *msg = "Dropping request";
819 int want_cookie = 0;
820 struct listen_sock *lopt;
821
822
1da177e4 823
2a1d4bd4 824#ifdef CONFIG_SYN_COOKIES
946cedcc 825 if (sysctl_tcp_syncookies) {
2a1d4bd4 826 msg = "Sending cookies";
946cedcc
ED
827 want_cookie = 1;
828 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDOCOOKIES);
829 } else
80e40daa 830#endif
946cedcc
ED
831 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDROP);
832
833 lopt = inet_csk(sk)->icsk_accept_queue.listen_opt;
834 if (!lopt->synflood_warned) {
835 lopt->synflood_warned = 1;
836 pr_info("%s: Possible SYN flooding on port %d. %s. "
837 " Check SNMP counters.\n",
838 proto, ntohs(tcp_hdr(skb)->dest), msg);
839 }
840 return want_cookie;
2a1d4bd4 841}
946cedcc 842EXPORT_SYMBOL(tcp_syn_flood_action);
1da177e4
LT
843
844/*
60236fdd 845 * Save and compile IPv4 options into the request_sock if needed.
1da177e4 846 */
f6d8bd05
ED
847static struct ip_options_rcu *tcp_v4_save_options(struct sock *sk,
848 struct sk_buff *skb)
1da177e4 849{
f6d8bd05
ED
850 const struct ip_options *opt = &(IPCB(skb)->opt);
851 struct ip_options_rcu *dopt = NULL;
1da177e4
LT
852
853 if (opt && opt->optlen) {
f6d8bd05
ED
854 int opt_size = sizeof(*dopt) + opt->optlen;
855
1da177e4
LT
856 dopt = kmalloc(opt_size, GFP_ATOMIC);
857 if (dopt) {
f6d8bd05 858 if (ip_options_echo(&dopt->opt, skb)) {
1da177e4
LT
859 kfree(dopt);
860 dopt = NULL;
861 }
862 }
863 }
864 return dopt;
865}
866
cfb6eeb4
YH
867#ifdef CONFIG_TCP_MD5SIG
868/*
869 * RFC2385 MD5 checksumming requires a mapping of
870 * IP address->MD5 Key.
871 * We need to maintain these in the sk structure.
872 */
873
874/* Find the Key structure for an address. */
7174259e
ACM
875static struct tcp_md5sig_key *
876 tcp_v4_md5_do_lookup(struct sock *sk, __be32 addr)
cfb6eeb4
YH
877{
878 struct tcp_sock *tp = tcp_sk(sk);
879 int i;
880
881 if (!tp->md5sig_info || !tp->md5sig_info->entries4)
882 return NULL;
883 for (i = 0; i < tp->md5sig_info->entries4; i++) {
884 if (tp->md5sig_info->keys4[i].addr == addr)
f8ab18d2 885 return &tp->md5sig_info->keys4[i].base;
cfb6eeb4
YH
886 }
887 return NULL;
888}
889
890struct tcp_md5sig_key *tcp_v4_md5_lookup(struct sock *sk,
891 struct sock *addr_sk)
892{
c720c7e8 893 return tcp_v4_md5_do_lookup(sk, inet_sk(addr_sk)->inet_daddr);
cfb6eeb4 894}
cfb6eeb4
YH
895EXPORT_SYMBOL(tcp_v4_md5_lookup);
896
f5b99bcd
AB
897static struct tcp_md5sig_key *tcp_v4_reqsk_md5_lookup(struct sock *sk,
898 struct request_sock *req)
cfb6eeb4
YH
899{
900 return tcp_v4_md5_do_lookup(sk, inet_rsk(req)->rmt_addr);
901}
902
903/* This can be called on a newly created socket, from other files */
904int tcp_v4_md5_do_add(struct sock *sk, __be32 addr,
905 u8 *newkey, u8 newkeylen)
906{
907 /* Add Key to the list */
b0a713e9 908 struct tcp_md5sig_key *key;
cfb6eeb4
YH
909 struct tcp_sock *tp = tcp_sk(sk);
910 struct tcp4_md5sig_key *keys;
911
b0a713e9 912 key = tcp_v4_md5_do_lookup(sk, addr);
cfb6eeb4
YH
913 if (key) {
914 /* Pre-existing entry - just update that one. */
b0a713e9
MD
915 kfree(key->key);
916 key->key = newkey;
917 key->keylen = newkeylen;
cfb6eeb4 918 } else {
f6685938
ACM
919 struct tcp_md5sig_info *md5sig;
920
cfb6eeb4 921 if (!tp->md5sig_info) {
f6685938
ACM
922 tp->md5sig_info = kzalloc(sizeof(*tp->md5sig_info),
923 GFP_ATOMIC);
cfb6eeb4
YH
924 if (!tp->md5sig_info) {
925 kfree(newkey);
926 return -ENOMEM;
927 }
a465419b 928 sk_nocaps_add(sk, NETIF_F_GSO_MASK);
cfb6eeb4 929 }
260fcbeb
YZ
930
931 md5sig = tp->md5sig_info;
932 if (md5sig->entries4 == 0 &&
933 tcp_alloc_md5sig_pool(sk) == NULL) {
cfb6eeb4
YH
934 kfree(newkey);
935 return -ENOMEM;
936 }
f6685938
ACM
937
938 if (md5sig->alloced4 == md5sig->entries4) {
939 keys = kmalloc((sizeof(*keys) *
e905a9ed 940 (md5sig->entries4 + 1)), GFP_ATOMIC);
cfb6eeb4
YH
941 if (!keys) {
942 kfree(newkey);
260fcbeb
YZ
943 if (md5sig->entries4 == 0)
944 tcp_free_md5sig_pool();
cfb6eeb4
YH
945 return -ENOMEM;
946 }
947
f6685938
ACM
948 if (md5sig->entries4)
949 memcpy(keys, md5sig->keys4,
950 sizeof(*keys) * md5sig->entries4);
cfb6eeb4
YH
951
952 /* Free old key list, and reference new one */
a80cc20d 953 kfree(md5sig->keys4);
f6685938
ACM
954 md5sig->keys4 = keys;
955 md5sig->alloced4++;
cfb6eeb4 956 }
f6685938 957 md5sig->entries4++;
f8ab18d2
DM
958 md5sig->keys4[md5sig->entries4 - 1].addr = addr;
959 md5sig->keys4[md5sig->entries4 - 1].base.key = newkey;
960 md5sig->keys4[md5sig->entries4 - 1].base.keylen = newkeylen;
cfb6eeb4
YH
961 }
962 return 0;
963}
cfb6eeb4
YH
964EXPORT_SYMBOL(tcp_v4_md5_do_add);
965
966static int tcp_v4_md5_add_func(struct sock *sk, struct sock *addr_sk,
967 u8 *newkey, u8 newkeylen)
968{
c720c7e8 969 return tcp_v4_md5_do_add(sk, inet_sk(addr_sk)->inet_daddr,
cfb6eeb4
YH
970 newkey, newkeylen);
971}
972
973int tcp_v4_md5_do_del(struct sock *sk, __be32 addr)
974{
975 struct tcp_sock *tp = tcp_sk(sk);
976 int i;
977
978 for (i = 0; i < tp->md5sig_info->entries4; i++) {
979 if (tp->md5sig_info->keys4[i].addr == addr) {
980 /* Free the key */
f8ab18d2 981 kfree(tp->md5sig_info->keys4[i].base.key);
cfb6eeb4
YH
982 tp->md5sig_info->entries4--;
983
984 if (tp->md5sig_info->entries4 == 0) {
985 kfree(tp->md5sig_info->keys4);
986 tp->md5sig_info->keys4 = NULL;
8228a18d 987 tp->md5sig_info->alloced4 = 0;
260fcbeb 988 tcp_free_md5sig_pool();
7174259e 989 } else if (tp->md5sig_info->entries4 != i) {
cfb6eeb4 990 /* Need to do some manipulation */
354faf09
YH
991 memmove(&tp->md5sig_info->keys4[i],
992 &tp->md5sig_info->keys4[i+1],
993 (tp->md5sig_info->entries4 - i) *
994 sizeof(struct tcp4_md5sig_key));
cfb6eeb4 995 }
cfb6eeb4
YH
996 return 0;
997 }
998 }
999 return -ENOENT;
1000}
cfb6eeb4
YH
1001EXPORT_SYMBOL(tcp_v4_md5_do_del);
1002
7174259e 1003static void tcp_v4_clear_md5_list(struct sock *sk)
cfb6eeb4
YH
1004{
1005 struct tcp_sock *tp = tcp_sk(sk);
1006
1007 /* Free each key, then the set of key keys,
1008 * the crypto element, and then decrement our
1009 * hold on the last resort crypto.
1010 */
1011 if (tp->md5sig_info->entries4) {
1012 int i;
1013 for (i = 0; i < tp->md5sig_info->entries4; i++)
f8ab18d2 1014 kfree(tp->md5sig_info->keys4[i].base.key);
cfb6eeb4
YH
1015 tp->md5sig_info->entries4 = 0;
1016 tcp_free_md5sig_pool();
1017 }
1018 if (tp->md5sig_info->keys4) {
1019 kfree(tp->md5sig_info->keys4);
1020 tp->md5sig_info->keys4 = NULL;
1021 tp->md5sig_info->alloced4 = 0;
1022 }
1023}
1024
7174259e
ACM
1025static int tcp_v4_parse_md5_keys(struct sock *sk, char __user *optval,
1026 int optlen)
cfb6eeb4
YH
1027{
1028 struct tcp_md5sig cmd;
1029 struct sockaddr_in *sin = (struct sockaddr_in *)&cmd.tcpm_addr;
1030 u8 *newkey;
1031
1032 if (optlen < sizeof(cmd))
1033 return -EINVAL;
1034
7174259e 1035 if (copy_from_user(&cmd, optval, sizeof(cmd)))
cfb6eeb4
YH
1036 return -EFAULT;
1037
1038 if (sin->sin_family != AF_INET)
1039 return -EINVAL;
1040
1041 if (!cmd.tcpm_key || !cmd.tcpm_keylen) {
1042 if (!tcp_sk(sk)->md5sig_info)
1043 return -ENOENT;
1044 return tcp_v4_md5_do_del(sk, sin->sin_addr.s_addr);
1045 }
1046
1047 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
1048 return -EINVAL;
1049
1050 if (!tcp_sk(sk)->md5sig_info) {
1051 struct tcp_sock *tp = tcp_sk(sk);
aa133076 1052 struct tcp_md5sig_info *p;
cfb6eeb4 1053
aa133076 1054 p = kzalloc(sizeof(*p), sk->sk_allocation);
cfb6eeb4
YH
1055 if (!p)
1056 return -EINVAL;
1057
1058 tp->md5sig_info = p;
a465419b 1059 sk_nocaps_add(sk, NETIF_F_GSO_MASK);
cfb6eeb4
YH
1060 }
1061
aa133076 1062 newkey = kmemdup(cmd.tcpm_key, cmd.tcpm_keylen, sk->sk_allocation);
cfb6eeb4
YH
1063 if (!newkey)
1064 return -ENOMEM;
cfb6eeb4
YH
1065 return tcp_v4_md5_do_add(sk, sin->sin_addr.s_addr,
1066 newkey, cmd.tcpm_keylen);
1067}
1068
49a72dfb
AL
1069static int tcp_v4_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
1070 __be32 daddr, __be32 saddr, int nbytes)
cfb6eeb4 1071{
cfb6eeb4 1072 struct tcp4_pseudohdr *bp;
49a72dfb 1073 struct scatterlist sg;
cfb6eeb4
YH
1074
1075 bp = &hp->md5_blk.ip4;
cfb6eeb4
YH
1076
1077 /*
49a72dfb 1078 * 1. the TCP pseudo-header (in the order: source IP address,
cfb6eeb4
YH
1079 * destination IP address, zero-padded protocol number, and
1080 * segment length)
1081 */
1082 bp->saddr = saddr;
1083 bp->daddr = daddr;
1084 bp->pad = 0;
076fb722 1085 bp->protocol = IPPROTO_TCP;
49a72dfb 1086 bp->len = cpu_to_be16(nbytes);
c7da57a1 1087
49a72dfb
AL
1088 sg_init_one(&sg, bp, sizeof(*bp));
1089 return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp));
1090}
1091
1092static int tcp_v4_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key,
318cf7aa 1093 __be32 daddr, __be32 saddr, const struct tcphdr *th)
49a72dfb
AL
1094{
1095 struct tcp_md5sig_pool *hp;
1096 struct hash_desc *desc;
1097
1098 hp = tcp_get_md5sig_pool();
1099 if (!hp)
1100 goto clear_hash_noput;
1101 desc = &hp->md5_desc;
1102
1103 if (crypto_hash_init(desc))
1104 goto clear_hash;
1105 if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
1106 goto clear_hash;
1107 if (tcp_md5_hash_header(hp, th))
1108 goto clear_hash;
1109 if (tcp_md5_hash_key(hp, key))
1110 goto clear_hash;
1111 if (crypto_hash_final(desc, md5_hash))
cfb6eeb4
YH
1112 goto clear_hash;
1113
cfb6eeb4 1114 tcp_put_md5sig_pool();
cfb6eeb4 1115 return 0;
49a72dfb 1116
cfb6eeb4
YH
1117clear_hash:
1118 tcp_put_md5sig_pool();
1119clear_hash_noput:
1120 memset(md5_hash, 0, 16);
49a72dfb 1121 return 1;
cfb6eeb4
YH
1122}
1123
49a72dfb 1124int tcp_v4_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
318cf7aa
ED
1125 const struct sock *sk, const struct request_sock *req,
1126 const struct sk_buff *skb)
cfb6eeb4 1127{
49a72dfb
AL
1128 struct tcp_md5sig_pool *hp;
1129 struct hash_desc *desc;
318cf7aa 1130 const struct tcphdr *th = tcp_hdr(skb);
cfb6eeb4
YH
1131 __be32 saddr, daddr;
1132
1133 if (sk) {
c720c7e8
ED
1134 saddr = inet_sk(sk)->inet_saddr;
1135 daddr = inet_sk(sk)->inet_daddr;
49a72dfb
AL
1136 } else if (req) {
1137 saddr = inet_rsk(req)->loc_addr;
1138 daddr = inet_rsk(req)->rmt_addr;
cfb6eeb4 1139 } else {
49a72dfb
AL
1140 const struct iphdr *iph = ip_hdr(skb);
1141 saddr = iph->saddr;
1142 daddr = iph->daddr;
cfb6eeb4 1143 }
49a72dfb
AL
1144
1145 hp = tcp_get_md5sig_pool();
1146 if (!hp)
1147 goto clear_hash_noput;
1148 desc = &hp->md5_desc;
1149
1150 if (crypto_hash_init(desc))
1151 goto clear_hash;
1152
1153 if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
1154 goto clear_hash;
1155 if (tcp_md5_hash_header(hp, th))
1156 goto clear_hash;
1157 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
1158 goto clear_hash;
1159 if (tcp_md5_hash_key(hp, key))
1160 goto clear_hash;
1161 if (crypto_hash_final(desc, md5_hash))
1162 goto clear_hash;
1163
1164 tcp_put_md5sig_pool();
1165 return 0;
1166
1167clear_hash:
1168 tcp_put_md5sig_pool();
1169clear_hash_noput:
1170 memset(md5_hash, 0, 16);
1171 return 1;
cfb6eeb4 1172}
49a72dfb 1173EXPORT_SYMBOL(tcp_v4_md5_hash_skb);
cfb6eeb4 1174
318cf7aa 1175static int tcp_v4_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb)
cfb6eeb4
YH
1176{
1177 /*
1178 * This gets called for each TCP segment that arrives
1179 * so we want to be efficient.
1180 * We have 3 drop cases:
1181 * o No MD5 hash and one expected.
1182 * o MD5 hash and we're not expecting one.
1183 * o MD5 hash and its wrong.
1184 */
cf533ea5 1185 const __u8 *hash_location = NULL;
cfb6eeb4 1186 struct tcp_md5sig_key *hash_expected;
eddc9ec5 1187 const struct iphdr *iph = ip_hdr(skb);
cf533ea5 1188 const struct tcphdr *th = tcp_hdr(skb);
cfb6eeb4 1189 int genhash;
cfb6eeb4
YH
1190 unsigned char newhash[16];
1191
1192 hash_expected = tcp_v4_md5_do_lookup(sk, iph->saddr);
7d5d5525 1193 hash_location = tcp_parse_md5sig_option(th);
cfb6eeb4 1194
cfb6eeb4
YH
1195 /* We've parsed the options - do we have a hash? */
1196 if (!hash_expected && !hash_location)
1197 return 0;
1198
1199 if (hash_expected && !hash_location) {
785957d3 1200 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
cfb6eeb4
YH
1201 return 1;
1202 }
1203
1204 if (!hash_expected && hash_location) {
785957d3 1205 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
cfb6eeb4
YH
1206 return 1;
1207 }
1208
1209 /* Okay, so this is hash_expected and hash_location -
1210 * so we need to calculate the checksum.
1211 */
49a72dfb
AL
1212 genhash = tcp_v4_md5_hash_skb(newhash,
1213 hash_expected,
1214 NULL, NULL, skb);
cfb6eeb4
YH
1215
1216 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
1217 if (net_ratelimit()) {
673d57e7
HH
1218 printk(KERN_INFO "MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s\n",
1219 &iph->saddr, ntohs(th->source),
1220 &iph->daddr, ntohs(th->dest),
cfb6eeb4 1221 genhash ? " tcp_v4_calc_md5_hash failed" : "");
cfb6eeb4
YH
1222 }
1223 return 1;
1224 }
1225 return 0;
1226}
1227
1228#endif
1229
72a3effa 1230struct request_sock_ops tcp_request_sock_ops __read_mostly = {
1da177e4 1231 .family = PF_INET,
2e6599cb 1232 .obj_size = sizeof(struct tcp_request_sock),
72659ecc 1233 .rtx_syn_ack = tcp_v4_rtx_synack,
60236fdd
ACM
1234 .send_ack = tcp_v4_reqsk_send_ack,
1235 .destructor = tcp_v4_reqsk_destructor,
1da177e4 1236 .send_reset = tcp_v4_send_reset,
72659ecc 1237 .syn_ack_timeout = tcp_syn_ack_timeout,
1da177e4
LT
1238};
1239
cfb6eeb4 1240#ifdef CONFIG_TCP_MD5SIG
b2e4b3de 1241static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
cfb6eeb4 1242 .md5_lookup = tcp_v4_reqsk_md5_lookup,
e3afe7b7 1243 .calc_md5_hash = tcp_v4_md5_hash_skb,
cfb6eeb4 1244};
b6332e6c 1245#endif
cfb6eeb4 1246
1da177e4
LT
1247int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1248{
4957faad 1249 struct tcp_extend_values tmp_ext;
1da177e4 1250 struct tcp_options_received tmp_opt;
cf533ea5 1251 const u8 *hash_location;
60236fdd 1252 struct request_sock *req;
e6b4d113 1253 struct inet_request_sock *ireq;
4957faad 1254 struct tcp_sock *tp = tcp_sk(sk);
e6b4d113 1255 struct dst_entry *dst = NULL;
eddc9ec5
ACM
1256 __be32 saddr = ip_hdr(skb)->saddr;
1257 __be32 daddr = ip_hdr(skb)->daddr;
1da177e4 1258 __u32 isn = TCP_SKB_CB(skb)->when;
1da177e4 1259 int want_cookie = 0;
1da177e4
LT
1260
1261 /* Never answer to SYNs send to broadcast or multicast */
511c3f92 1262 if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
1da177e4
LT
1263 goto drop;
1264
1265 /* TW buckets are converted to open requests without
1266 * limitations, they conserve resources and peer is
1267 * evidently real one.
1268 */
463c84b9 1269 if (inet_csk_reqsk_queue_is_full(sk) && !isn) {
946cedcc
ED
1270 want_cookie = tcp_syn_flood_action(sk, skb, "TCP");
1271 if (!want_cookie)
1272 goto drop;
1da177e4
LT
1273 }
1274
1275 /* Accept backlog is full. If we have already queued enough
1276 * of warm entries in syn queue, drop request. It is better than
1277 * clogging syn queue with openreqs with exponentially increasing
1278 * timeout.
1279 */
463c84b9 1280 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
1da177e4
LT
1281 goto drop;
1282
ce4a7d0d 1283 req = inet_reqsk_alloc(&tcp_request_sock_ops);
1da177e4
LT
1284 if (!req)
1285 goto drop;
1286
cfb6eeb4
YH
1287#ifdef CONFIG_TCP_MD5SIG
1288 tcp_rsk(req)->af_specific = &tcp_request_sock_ipv4_ops;
1289#endif
1290
1da177e4 1291 tcp_clear_options(&tmp_opt);
bee7ca9e 1292 tmp_opt.mss_clamp = TCP_MSS_DEFAULT;
4957faad 1293 tmp_opt.user_mss = tp->rx_opt.user_mss;
bb5b7c11 1294 tcp_parse_options(skb, &tmp_opt, &hash_location, 0);
4957faad
WAS
1295
1296 if (tmp_opt.cookie_plus > 0 &&
1297 tmp_opt.saw_tstamp &&
1298 !tp->rx_opt.cookie_out_never &&
1299 (sysctl_tcp_cookie_size > 0 ||
1300 (tp->cookie_values != NULL &&
1301 tp->cookie_values->cookie_desired > 0))) {
1302 u8 *c;
1303 u32 *mess = &tmp_ext.cookie_bakery[COOKIE_DIGEST_WORDS];
1304 int l = tmp_opt.cookie_plus - TCPOLEN_COOKIE_BASE;
1305
1306 if (tcp_cookie_generator(&tmp_ext.cookie_bakery[0]) != 0)
1307 goto drop_and_release;
1308
1309 /* Secret recipe starts with IP addresses */
0eae88f3
ED
1310 *mess++ ^= (__force u32)daddr;
1311 *mess++ ^= (__force u32)saddr;
1da177e4 1312
4957faad
WAS
1313 /* plus variable length Initiator Cookie */
1314 c = (u8 *)mess;
1315 while (l-- > 0)
1316 *c++ ^= *hash_location++;
1317
4957faad 1318 want_cookie = 0; /* not our kind of cookie */
4957faad
WAS
1319 tmp_ext.cookie_out_never = 0; /* false */
1320 tmp_ext.cookie_plus = tmp_opt.cookie_plus;
1321 } else if (!tp->rx_opt.cookie_in_always) {
1322 /* redundant indications, but ensure initialization. */
1323 tmp_ext.cookie_out_never = 1; /* true */
1324 tmp_ext.cookie_plus = 0;
1325 } else {
1326 goto drop_and_release;
1327 }
1328 tmp_ext.cookie_in_always = tp->rx_opt.cookie_in_always;
1da177e4 1329
4dfc2817 1330 if (want_cookie && !tmp_opt.saw_tstamp)
1da177e4 1331 tcp_clear_options(&tmp_opt);
1da177e4 1332
1da177e4 1333 tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
1da177e4
LT
1334 tcp_openreq_init(req, &tmp_opt, skb);
1335
bb5b7c11
DM
1336 ireq = inet_rsk(req);
1337 ireq->loc_addr = daddr;
1338 ireq->rmt_addr = saddr;
1339 ireq->no_srccheck = inet_sk(sk)->transparent;
1340 ireq->opt = tcp_v4_save_options(sk, skb);
1341
284904aa 1342 if (security_inet_conn_request(sk, skb, req))
bb5b7c11 1343 goto drop_and_free;
284904aa 1344
172d69e6 1345 if (!want_cookie || tmp_opt.tstamp_ok)
aa8223c7 1346 TCP_ECN_create_request(req, tcp_hdr(skb));
1da177e4
LT
1347
1348 if (want_cookie) {
1da177e4 1349 isn = cookie_v4_init_sequence(sk, skb, &req->mss);
172d69e6 1350 req->cookie_ts = tmp_opt.tstamp_ok;
1da177e4
LT
1351 } else if (!isn) {
1352 struct inet_peer *peer = NULL;
6bd023f3 1353 struct flowi4 fl4;
1da177e4
LT
1354
1355 /* VJ's idea. We save last timestamp seen
1356 * from the destination in peer table, when entering
1357 * state TIME-WAIT, and check against it before
1358 * accepting new connection request.
1359 *
1360 * If "isn" is not zero, this request hit alive
1361 * timewait bucket, so that all the necessary checks
1362 * are made in the function processing timewait state.
1363 */
1364 if (tmp_opt.saw_tstamp &&
295ff7ed 1365 tcp_death_row.sysctl_tw_recycle &&
6bd023f3 1366 (dst = inet_csk_route_req(sk, &fl4, req)) != NULL &&
ed2361e6
DM
1367 fl4.daddr == saddr &&
1368 (peer = rt_get_peer((struct rtable *)dst, fl4.daddr)) != NULL) {
317fe0e6 1369 inet_peer_refcheck(peer);
2c1409a0 1370 if ((u32)get_seconds() - peer->tcp_ts_stamp < TCP_PAWS_MSL &&
1da177e4
LT
1371 (s32)(peer->tcp_ts - req->ts_recent) >
1372 TCP_PAWS_WINDOW) {
de0744af 1373 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED);
7cd04fa7 1374 goto drop_and_release;
1da177e4
LT
1375 }
1376 }
1377 /* Kill the following clause, if you dislike this way. */
1378 else if (!sysctl_tcp_syncookies &&
463c84b9 1379 (sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) <
1da177e4
LT
1380 (sysctl_max_syn_backlog >> 2)) &&
1381 (!peer || !peer->tcp_ts_stamp) &&
1382 (!dst || !dst_metric(dst, RTAX_RTT))) {
1383 /* Without syncookies last quarter of
1384 * backlog is filled with destinations,
1385 * proven to be alive.
1386 * It means that we continue to communicate
1387 * to destinations, already remembered
1388 * to the moment of synflood.
1389 */
673d57e7
HH
1390 LIMIT_NETDEBUG(KERN_DEBUG "TCP: drop open request from %pI4/%u\n",
1391 &saddr, ntohs(tcp_hdr(skb)->source));
7cd04fa7 1392 goto drop_and_release;
1da177e4
LT
1393 }
1394
a94f723d 1395 isn = tcp_v4_init_sequence(skb);
1da177e4 1396 }
2e6599cb 1397 tcp_rsk(req)->snt_isn = isn;
9ad7c049 1398 tcp_rsk(req)->snt_synack = tcp_time_stamp;
1da177e4 1399
72659ecc
OP
1400 if (tcp_v4_send_synack(sk, dst, req,
1401 (struct request_values *)&tmp_ext) ||
4957faad 1402 want_cookie)
1da177e4
LT
1403 goto drop_and_free;
1404
7cd04fa7 1405 inet_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
1da177e4
LT
1406 return 0;
1407
7cd04fa7
DL
1408drop_and_release:
1409 dst_release(dst);
1da177e4 1410drop_and_free:
60236fdd 1411 reqsk_free(req);
1da177e4 1412drop:
1da177e4
LT
1413 return 0;
1414}
4bc2f18b 1415EXPORT_SYMBOL(tcp_v4_conn_request);
1da177e4
LT
1416
1417
1418/*
1419 * The three way handshake has completed - we got a valid synack -
1420 * now create the new socket.
1421 */
1422struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
60236fdd 1423 struct request_sock *req,
1da177e4
LT
1424 struct dst_entry *dst)
1425{
2e6599cb 1426 struct inet_request_sock *ireq;
1da177e4
LT
1427 struct inet_sock *newinet;
1428 struct tcp_sock *newtp;
1429 struct sock *newsk;
cfb6eeb4
YH
1430#ifdef CONFIG_TCP_MD5SIG
1431 struct tcp_md5sig_key *key;
1432#endif
f6d8bd05 1433 struct ip_options_rcu *inet_opt;
1da177e4
LT
1434
1435 if (sk_acceptq_is_full(sk))
1436 goto exit_overflow;
1437
1da177e4
LT
1438 newsk = tcp_create_openreq_child(sk, req, skb);
1439 if (!newsk)
093d2823 1440 goto exit_nonewsk;
1da177e4 1441
bcd76111 1442 newsk->sk_gso_type = SKB_GSO_TCPV4;
1da177e4
LT
1443
1444 newtp = tcp_sk(newsk);
1445 newinet = inet_sk(newsk);
2e6599cb 1446 ireq = inet_rsk(req);
c720c7e8
ED
1447 newinet->inet_daddr = ireq->rmt_addr;
1448 newinet->inet_rcv_saddr = ireq->loc_addr;
1449 newinet->inet_saddr = ireq->loc_addr;
f6d8bd05
ED
1450 inet_opt = ireq->opt;
1451 rcu_assign_pointer(newinet->inet_opt, inet_opt);
2e6599cb 1452 ireq->opt = NULL;
463c84b9 1453 newinet->mc_index = inet_iif(skb);
eddc9ec5 1454 newinet->mc_ttl = ip_hdr(skb)->ttl;
d83d8461 1455 inet_csk(newsk)->icsk_ext_hdr_len = 0;
f6d8bd05
ED
1456 if (inet_opt)
1457 inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
c720c7e8 1458 newinet->inet_id = newtp->write_seq ^ jiffies;
1da177e4 1459
0e734419
DM
1460 if (!dst && (dst = inet_csk_route_child_sock(sk, newsk, req)) == NULL)
1461 goto put_and_exit;
1462
1463 sk_setup_caps(newsk, dst);
1464
5d424d5a 1465 tcp_mtup_init(newsk);
1da177e4 1466 tcp_sync_mss(newsk, dst_mtu(dst));
0dbaee3b 1467 newtp->advmss = dst_metric_advmss(dst);
f5fff5dc
TQ
1468 if (tcp_sk(sk)->rx_opt.user_mss &&
1469 tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
1470 newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
1471
1da177e4 1472 tcp_initialize_rcv_mss(newsk);
9ad7c049
JC
1473 if (tcp_rsk(req)->snt_synack)
1474 tcp_valid_rtt_meas(newsk,
1475 tcp_time_stamp - tcp_rsk(req)->snt_synack);
1476 newtp->total_retrans = req->retrans;
1da177e4 1477
cfb6eeb4
YH
1478#ifdef CONFIG_TCP_MD5SIG
1479 /* Copy over the MD5 key from the original socket */
c720c7e8
ED
1480 key = tcp_v4_md5_do_lookup(sk, newinet->inet_daddr);
1481 if (key != NULL) {
cfb6eeb4
YH
1482 /*
1483 * We're using one, so create a matching key
1484 * on the newsk structure. If we fail to get
1485 * memory, then we end up not copying the key
1486 * across. Shucks.
1487 */
f6685938
ACM
1488 char *newkey = kmemdup(key->key, key->keylen, GFP_ATOMIC);
1489 if (newkey != NULL)
c720c7e8 1490 tcp_v4_md5_do_add(newsk, newinet->inet_daddr,
cfb6eeb4 1491 newkey, key->keylen);
a465419b 1492 sk_nocaps_add(newsk, NETIF_F_GSO_MASK);
cfb6eeb4
YH
1493 }
1494#endif
1495
0e734419
DM
1496 if (__inet_inherit_port(sk, newsk) < 0)
1497 goto put_and_exit;
9327f705 1498 __inet_hash_nolisten(newsk, NULL);
1da177e4
LT
1499
1500 return newsk;
1501
1502exit_overflow:
de0744af 1503 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
093d2823
BS
1504exit_nonewsk:
1505 dst_release(dst);
1da177e4 1506exit:
de0744af 1507 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1da177e4 1508 return NULL;
0e734419
DM
1509put_and_exit:
1510 sock_put(newsk);
1511 goto exit;
1da177e4 1512}
4bc2f18b 1513EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
1da177e4
LT
1514
1515static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
1516{
aa8223c7 1517 struct tcphdr *th = tcp_hdr(skb);
eddc9ec5 1518 const struct iphdr *iph = ip_hdr(skb);
1da177e4 1519 struct sock *nsk;
60236fdd 1520 struct request_sock **prev;
1da177e4 1521 /* Find possible connection requests. */
463c84b9
ACM
1522 struct request_sock *req = inet_csk_search_req(sk, &prev, th->source,
1523 iph->saddr, iph->daddr);
1da177e4
LT
1524 if (req)
1525 return tcp_check_req(sk, skb, req, prev);
1526
3b1e0a65 1527 nsk = inet_lookup_established(sock_net(sk), &tcp_hashinfo, iph->saddr,
c67499c0 1528 th->source, iph->daddr, th->dest, inet_iif(skb));
1da177e4
LT
1529
1530 if (nsk) {
1531 if (nsk->sk_state != TCP_TIME_WAIT) {
1532 bh_lock_sock(nsk);
1533 return nsk;
1534 }
9469c7b4 1535 inet_twsk_put(inet_twsk(nsk));
1da177e4
LT
1536 return NULL;
1537 }
1538
1539#ifdef CONFIG_SYN_COOKIES
af9b4738 1540 if (!th->syn)
1da177e4
LT
1541 sk = cookie_v4_check(sk, skb, &(IPCB(skb)->opt));
1542#endif
1543 return sk;
1544}
1545
b51655b9 1546static __sum16 tcp_v4_checksum_init(struct sk_buff *skb)
1da177e4 1547{
eddc9ec5
ACM
1548 const struct iphdr *iph = ip_hdr(skb);
1549
84fa7933 1550 if (skb->ip_summed == CHECKSUM_COMPLETE) {
eddc9ec5
ACM
1551 if (!tcp_v4_check(skb->len, iph->saddr,
1552 iph->daddr, skb->csum)) {
fb286bb2 1553 skb->ip_summed = CHECKSUM_UNNECESSARY;
1da177e4 1554 return 0;
fb286bb2 1555 }
1da177e4 1556 }
fb286bb2 1557
eddc9ec5 1558 skb->csum = csum_tcpudp_nofold(iph->saddr, iph->daddr,
fb286bb2
HX
1559 skb->len, IPPROTO_TCP, 0);
1560
1da177e4 1561 if (skb->len <= 76) {
fb286bb2 1562 return __skb_checksum_complete(skb);
1da177e4
LT
1563 }
1564 return 0;
1565}
1566
1567
1568/* The socket must have it's spinlock held when we get
1569 * here.
1570 *
1571 * We have a potential double-lock case here, so even when
1572 * doing backlog processing we use the BH locking scheme.
1573 * This is because we cannot sleep with the original spinlock
1574 * held.
1575 */
1576int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1577{
cfb6eeb4
YH
1578 struct sock *rsk;
1579#ifdef CONFIG_TCP_MD5SIG
1580 /*
1581 * We really want to reject the packet as early as possible
1582 * if:
1583 * o We're expecting an MD5'd packet and this is no MD5 tcp option
1584 * o There is an MD5 option and we're not expecting one
1585 */
7174259e 1586 if (tcp_v4_inbound_md5_hash(sk, skb))
cfb6eeb4
YH
1587 goto discard;
1588#endif
1589
1da177e4 1590 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
bdeab991 1591 sock_rps_save_rxhash(sk, skb);
aa8223c7 1592 if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len)) {
cfb6eeb4 1593 rsk = sk;
1da177e4 1594 goto reset;
cfb6eeb4 1595 }
1da177e4
LT
1596 return 0;
1597 }
1598
ab6a5bb6 1599 if (skb->len < tcp_hdrlen(skb) || tcp_checksum_complete(skb))
1da177e4
LT
1600 goto csum_err;
1601
1602 if (sk->sk_state == TCP_LISTEN) {
1603 struct sock *nsk = tcp_v4_hnd_req(sk, skb);
1604 if (!nsk)
1605 goto discard;
1606
1607 if (nsk != sk) {
bdeab991 1608 sock_rps_save_rxhash(nsk, skb);
cfb6eeb4
YH
1609 if (tcp_child_process(sk, nsk, skb)) {
1610 rsk = nsk;
1da177e4 1611 goto reset;
cfb6eeb4 1612 }
1da177e4
LT
1613 return 0;
1614 }
ca55158c 1615 } else
bdeab991 1616 sock_rps_save_rxhash(sk, skb);
ca55158c 1617
aa8223c7 1618 if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len)) {
cfb6eeb4 1619 rsk = sk;
1da177e4 1620 goto reset;
cfb6eeb4 1621 }
1da177e4
LT
1622 return 0;
1623
1624reset:
cfb6eeb4 1625 tcp_v4_send_reset(rsk, skb);
1da177e4
LT
1626discard:
1627 kfree_skb(skb);
1628 /* Be careful here. If this function gets more complicated and
1629 * gcc suffers from register pressure on the x86, sk (in %ebx)
1630 * might be destroyed here. This current version compiles correctly,
1631 * but you have been warned.
1632 */
1633 return 0;
1634
1635csum_err:
63231bdd 1636 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
1da177e4
LT
1637 goto discard;
1638}
4bc2f18b 1639EXPORT_SYMBOL(tcp_v4_do_rcv);
1da177e4
LT
1640
1641/*
1642 * From tcp_input.c
1643 */
1644
1645int tcp_v4_rcv(struct sk_buff *skb)
1646{
eddc9ec5 1647 const struct iphdr *iph;
cf533ea5 1648 const struct tcphdr *th;
1da177e4
LT
1649 struct sock *sk;
1650 int ret;
a86b1e30 1651 struct net *net = dev_net(skb->dev);
1da177e4
LT
1652
1653 if (skb->pkt_type != PACKET_HOST)
1654 goto discard_it;
1655
1656 /* Count it even if it's bad */
63231bdd 1657 TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
1da177e4
LT
1658
1659 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1660 goto discard_it;
1661
aa8223c7 1662 th = tcp_hdr(skb);
1da177e4
LT
1663
1664 if (th->doff < sizeof(struct tcphdr) / 4)
1665 goto bad_packet;
1666 if (!pskb_may_pull(skb, th->doff * 4))
1667 goto discard_it;
1668
1669 /* An explanation is required here, I think.
1670 * Packet length and doff are validated by header prediction,
caa20d9a 1671 * provided case of th->doff==0 is eliminated.
1da177e4 1672 * So, we defer the checks. */
60476372 1673 if (!skb_csum_unnecessary(skb) && tcp_v4_checksum_init(skb))
1da177e4
LT
1674 goto bad_packet;
1675
aa8223c7 1676 th = tcp_hdr(skb);
eddc9ec5 1677 iph = ip_hdr(skb);
1da177e4
LT
1678 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1679 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1680 skb->len - th->doff * 4);
1681 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1682 TCP_SKB_CB(skb)->when = 0;
b82d1bb4 1683 TCP_SKB_CB(skb)->ip_dsfield = ipv4_get_dsfield(iph);
1da177e4
LT
1684 TCP_SKB_CB(skb)->sacked = 0;
1685
9a1f27c4 1686 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
1da177e4
LT
1687 if (!sk)
1688 goto no_tcp_socket;
1689
bb134d5d
ED
1690process:
1691 if (sk->sk_state == TCP_TIME_WAIT)
1692 goto do_time_wait;
1693
6cce09f8
ED
1694 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
1695 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
d218d111 1696 goto discard_and_relse;
6cce09f8 1697 }
d218d111 1698
1da177e4
LT
1699 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
1700 goto discard_and_relse;
b59c2701 1701 nf_reset(skb);
1da177e4 1702
fda9ef5d 1703 if (sk_filter(sk, skb))
1da177e4
LT
1704 goto discard_and_relse;
1705
1706 skb->dev = NULL;
1707
c6366184 1708 bh_lock_sock_nested(sk);
1da177e4
LT
1709 ret = 0;
1710 if (!sock_owned_by_user(sk)) {
1a2449a8
CL
1711#ifdef CONFIG_NET_DMA
1712 struct tcp_sock *tp = tcp_sk(sk);
1713 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
f67b4599 1714 tp->ucopy.dma_chan = dma_find_channel(DMA_MEMCPY);
1a2449a8 1715 if (tp->ucopy.dma_chan)
1da177e4 1716 ret = tcp_v4_do_rcv(sk, skb);
1a2449a8
CL
1717 else
1718#endif
1719 {
1720 if (!tcp_prequeue(sk, skb))
ae8d7f88 1721 ret = tcp_v4_do_rcv(sk, skb);
1a2449a8 1722 }
6cce09f8 1723 } else if (unlikely(sk_add_backlog(sk, skb))) {
6b03a53a 1724 bh_unlock_sock(sk);
6cce09f8 1725 NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
6b03a53a
ZY
1726 goto discard_and_relse;
1727 }
1da177e4
LT
1728 bh_unlock_sock(sk);
1729
1730 sock_put(sk);
1731
1732 return ret;
1733
1734no_tcp_socket:
1735 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
1736 goto discard_it;
1737
1738 if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
1739bad_packet:
63231bdd 1740 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1da177e4 1741 } else {
cfb6eeb4 1742 tcp_v4_send_reset(NULL, skb);
1da177e4
LT
1743 }
1744
1745discard_it:
1746 /* Discard frame. */
1747 kfree_skb(skb);
e905a9ed 1748 return 0;
1da177e4
LT
1749
1750discard_and_relse:
1751 sock_put(sk);
1752 goto discard_it;
1753
1754do_time_wait:
1755 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
9469c7b4 1756 inet_twsk_put(inet_twsk(sk));
1da177e4
LT
1757 goto discard_it;
1758 }
1759
1760 if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
63231bdd 1761 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
9469c7b4 1762 inet_twsk_put(inet_twsk(sk));
1da177e4
LT
1763 goto discard_it;
1764 }
9469c7b4 1765 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1da177e4 1766 case TCP_TW_SYN: {
c346dca1 1767 struct sock *sk2 = inet_lookup_listener(dev_net(skb->dev),
c67499c0 1768 &tcp_hashinfo,
eddc9ec5 1769 iph->daddr, th->dest,
463c84b9 1770 inet_iif(skb));
1da177e4 1771 if (sk2) {
9469c7b4
YH
1772 inet_twsk_deschedule(inet_twsk(sk), &tcp_death_row);
1773 inet_twsk_put(inet_twsk(sk));
1da177e4
LT
1774 sk = sk2;
1775 goto process;
1776 }
1777 /* Fall through to ACK */
1778 }
1779 case TCP_TW_ACK:
1780 tcp_v4_timewait_ack(sk, skb);
1781 break;
1782 case TCP_TW_RST:
1783 goto no_tcp_socket;
1784 case TCP_TW_SUCCESS:;
1785 }
1786 goto discard_it;
1787}
1788
3f419d2d 1789struct inet_peer *tcp_v4_get_peer(struct sock *sk, bool *release_it)
1da177e4 1790{
3f419d2d 1791 struct rtable *rt = (struct rtable *) __sk_dst_get(sk);
1da177e4 1792 struct inet_sock *inet = inet_sk(sk);
3f419d2d 1793 struct inet_peer *peer;
1da177e4 1794
c5216cc7
DM
1795 if (!rt ||
1796 inet->cork.fl.u.ip4.daddr != inet->inet_daddr) {
b534ecf1 1797 peer = inet_getpeer_v4(inet->inet_daddr, 1);
3f419d2d 1798 *release_it = true;
1da177e4
LT
1799 } else {
1800 if (!rt->peer)
a48eff12 1801 rt_bind_peer(rt, inet->inet_daddr, 1);
1da177e4 1802 peer = rt->peer;
3f419d2d 1803 *release_it = false;
1da177e4
LT
1804 }
1805
3f419d2d 1806 return peer;
1da177e4 1807}
3f419d2d 1808EXPORT_SYMBOL(tcp_v4_get_peer);
1da177e4 1809
ccb7c410 1810void *tcp_v4_tw_get_peer(struct sock *sk)
1da177e4 1811{
cf533ea5 1812 const struct inet_timewait_sock *tw = inet_twsk(sk);
1da177e4 1813
ccb7c410 1814 return inet_getpeer_v4(tw->tw_daddr, 1);
1da177e4 1815}
ccb7c410
DM
1816EXPORT_SYMBOL(tcp_v4_tw_get_peer);
1817
1818static struct timewait_sock_ops tcp_timewait_sock_ops = {
1819 .twsk_obj_size = sizeof(struct tcp_timewait_sock),
1820 .twsk_unique = tcp_twsk_unique,
1821 .twsk_destructor= tcp_twsk_destructor,
1822 .twsk_getpeer = tcp_v4_tw_get_peer,
1823};
1da177e4 1824
3b401a81 1825const struct inet_connection_sock_af_ops ipv4_specific = {
543d9cfe
ACM
1826 .queue_xmit = ip_queue_xmit,
1827 .send_check = tcp_v4_send_check,
1828 .rebuild_header = inet_sk_rebuild_header,
1829 .conn_request = tcp_v4_conn_request,
1830 .syn_recv_sock = tcp_v4_syn_recv_sock,
3f419d2d 1831 .get_peer = tcp_v4_get_peer,
543d9cfe
ACM
1832 .net_header_len = sizeof(struct iphdr),
1833 .setsockopt = ip_setsockopt,
1834 .getsockopt = ip_getsockopt,
1835 .addr2sockaddr = inet_csk_addr2sockaddr,
1836 .sockaddr_len = sizeof(struct sockaddr_in),
ab1e0a13 1837 .bind_conflict = inet_csk_bind_conflict,
3fdadf7d 1838#ifdef CONFIG_COMPAT
543d9cfe
ACM
1839 .compat_setsockopt = compat_ip_setsockopt,
1840 .compat_getsockopt = compat_ip_getsockopt,
3fdadf7d 1841#endif
1da177e4 1842};
4bc2f18b 1843EXPORT_SYMBOL(ipv4_specific);
1da177e4 1844
cfb6eeb4 1845#ifdef CONFIG_TCP_MD5SIG
b2e4b3de 1846static const struct tcp_sock_af_ops tcp_sock_ipv4_specific = {
cfb6eeb4 1847 .md5_lookup = tcp_v4_md5_lookup,
49a72dfb 1848 .calc_md5_hash = tcp_v4_md5_hash_skb,
cfb6eeb4
YH
1849 .md5_add = tcp_v4_md5_add_func,
1850 .md5_parse = tcp_v4_parse_md5_keys,
cfb6eeb4 1851};
b6332e6c 1852#endif
cfb6eeb4 1853
1da177e4
LT
1854/* NOTE: A lot of things set to zero explicitly by call to
1855 * sk_alloc() so need not be done here.
1856 */
1857static int tcp_v4_init_sock(struct sock *sk)
1858{
6687e988 1859 struct inet_connection_sock *icsk = inet_csk(sk);
1da177e4
LT
1860 struct tcp_sock *tp = tcp_sk(sk);
1861
1862 skb_queue_head_init(&tp->out_of_order_queue);
1863 tcp_init_xmit_timers(sk);
1864 tcp_prequeue_init(tp);
1865
6687e988 1866 icsk->icsk_rto = TCP_TIMEOUT_INIT;
1da177e4
LT
1867 tp->mdev = TCP_TIMEOUT_INIT;
1868
1869 /* So many TCP implementations out there (incorrectly) count the
1870 * initial SYN frame in their delayed-ACK and congestion control
1871 * algorithms that we must have the following bandaid to talk
1872 * efficiently to them. -DaveM
1873 */
9ad7c049 1874 tp->snd_cwnd = TCP_INIT_CWND;
1da177e4
LT
1875
1876 /* See draft-stevens-tcpca-spec-01 for discussion of the
1877 * initialization of these values.
1878 */
0b6a05c1 1879 tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
1da177e4 1880 tp->snd_cwnd_clamp = ~0;
bee7ca9e 1881 tp->mss_cache = TCP_MSS_DEFAULT;
1da177e4
LT
1882
1883 tp->reordering = sysctl_tcp_reordering;
6687e988 1884 icsk->icsk_ca_ops = &tcp_init_congestion_ops;
1da177e4
LT
1885
1886 sk->sk_state = TCP_CLOSE;
1887
1888 sk->sk_write_space = sk_stream_write_space;
1889 sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
1890
8292a17a 1891 icsk->icsk_af_ops = &ipv4_specific;
d83d8461 1892 icsk->icsk_sync_mss = tcp_sync_mss;
cfb6eeb4
YH
1893#ifdef CONFIG_TCP_MD5SIG
1894 tp->af_specific = &tcp_sock_ipv4_specific;
1895#endif
1da177e4 1896
435cf559
WAS
1897 /* TCP Cookie Transactions */
1898 if (sysctl_tcp_cookie_size > 0) {
1899 /* Default, cookies without s_data_payload. */
1900 tp->cookie_values =
1901 kzalloc(sizeof(*tp->cookie_values),
1902 sk->sk_allocation);
1903 if (tp->cookie_values != NULL)
1904 kref_init(&tp->cookie_values->kref);
1905 }
1906 /* Presumed zeroed, in order of appearance:
1907 * cookie_in_always, cookie_out_never,
1908 * s_data_constant, s_data_in, s_data_out
1909 */
1da177e4
LT
1910 sk->sk_sndbuf = sysctl_tcp_wmem[1];
1911 sk->sk_rcvbuf = sysctl_tcp_rmem[1];
1912
eb4dea58 1913 local_bh_disable();
1748376b 1914 percpu_counter_inc(&tcp_sockets_allocated);
eb4dea58 1915 local_bh_enable();
1da177e4
LT
1916
1917 return 0;
1918}
1919
7d06b2e0 1920void tcp_v4_destroy_sock(struct sock *sk)
1da177e4
LT
1921{
1922 struct tcp_sock *tp = tcp_sk(sk);
1923
1924 tcp_clear_xmit_timers(sk);
1925
6687e988 1926 tcp_cleanup_congestion_control(sk);
317a76f9 1927
1da177e4 1928 /* Cleanup up the write buffer. */
fe067e8a 1929 tcp_write_queue_purge(sk);
1da177e4
LT
1930
1931 /* Cleans up our, hopefully empty, out_of_order_queue. */
e905a9ed 1932 __skb_queue_purge(&tp->out_of_order_queue);
1da177e4 1933
cfb6eeb4
YH
1934#ifdef CONFIG_TCP_MD5SIG
1935 /* Clean up the MD5 key list, if any */
1936 if (tp->md5sig_info) {
1937 tcp_v4_clear_md5_list(sk);
1938 kfree(tp->md5sig_info);
1939 tp->md5sig_info = NULL;
1940 }
1941#endif
1942
1a2449a8
CL
1943#ifdef CONFIG_NET_DMA
1944 /* Cleans up our sk_async_wait_queue */
e905a9ed 1945 __skb_queue_purge(&sk->sk_async_wait_queue);
1a2449a8
CL
1946#endif
1947
1da177e4
LT
1948 /* Clean prequeue, it must be empty really */
1949 __skb_queue_purge(&tp->ucopy.prequeue);
1950
1951 /* Clean up a referenced TCP bind bucket. */
463c84b9 1952 if (inet_csk(sk)->icsk_bind_hash)
ab1e0a13 1953 inet_put_port(sk);
1da177e4
LT
1954
1955 /*
1956 * If sendmsg cached page exists, toss it.
1957 */
1958 if (sk->sk_sndmsg_page) {
1959 __free_page(sk->sk_sndmsg_page);
1960 sk->sk_sndmsg_page = NULL;
1961 }
1962
435cf559
WAS
1963 /* TCP Cookie Transactions */
1964 if (tp->cookie_values != NULL) {
1965 kref_put(&tp->cookie_values->kref,
1966 tcp_cookie_values_release);
1967 tp->cookie_values = NULL;
1968 }
1969
1748376b 1970 percpu_counter_dec(&tcp_sockets_allocated);
1da177e4 1971}
1da177e4
LT
1972EXPORT_SYMBOL(tcp_v4_destroy_sock);
1973
1974#ifdef CONFIG_PROC_FS
1975/* Proc filesystem TCP sock list dumping. */
1976
3ab5aee7 1977static inline struct inet_timewait_sock *tw_head(struct hlist_nulls_head *head)
1da177e4 1978{
3ab5aee7 1979 return hlist_nulls_empty(head) ? NULL :
8feaf0c0 1980 list_entry(head->first, struct inet_timewait_sock, tw_node);
1da177e4
LT
1981}
1982
8feaf0c0 1983static inline struct inet_timewait_sock *tw_next(struct inet_timewait_sock *tw)
1da177e4 1984{
3ab5aee7
ED
1985 return !is_a_nulls(tw->tw_node.next) ?
1986 hlist_nulls_entry(tw->tw_node.next, typeof(*tw), tw_node) : NULL;
1da177e4
LT
1987}
1988
a8b690f9
TH
1989/*
1990 * Get next listener socket follow cur. If cur is NULL, get first socket
1991 * starting from bucket given in st->bucket; when st->bucket is zero the
1992 * very first socket in the hash table is returned.
1993 */
1da177e4
LT
1994static void *listening_get_next(struct seq_file *seq, void *cur)
1995{
463c84b9 1996 struct inet_connection_sock *icsk;
c25eb3bf 1997 struct hlist_nulls_node *node;
1da177e4 1998 struct sock *sk = cur;
5caea4ea 1999 struct inet_listen_hashbucket *ilb;
5799de0b 2000 struct tcp_iter_state *st = seq->private;
a4146b1b 2001 struct net *net = seq_file_net(seq);
1da177e4
LT
2002
2003 if (!sk) {
a8b690f9 2004 ilb = &tcp_hashinfo.listening_hash[st->bucket];
5caea4ea 2005 spin_lock_bh(&ilb->lock);
c25eb3bf 2006 sk = sk_nulls_head(&ilb->head);
a8b690f9 2007 st->offset = 0;
1da177e4
LT
2008 goto get_sk;
2009 }
5caea4ea 2010 ilb = &tcp_hashinfo.listening_hash[st->bucket];
1da177e4 2011 ++st->num;
a8b690f9 2012 ++st->offset;
1da177e4
LT
2013
2014 if (st->state == TCP_SEQ_STATE_OPENREQ) {
60236fdd 2015 struct request_sock *req = cur;
1da177e4 2016
72a3effa 2017 icsk = inet_csk(st->syn_wait_sk);
1da177e4
LT
2018 req = req->dl_next;
2019 while (1) {
2020 while (req) {
bdccc4ca 2021 if (req->rsk_ops->family == st->family) {
1da177e4
LT
2022 cur = req;
2023 goto out;
2024 }
2025 req = req->dl_next;
2026 }
72a3effa 2027 if (++st->sbucket >= icsk->icsk_accept_queue.listen_opt->nr_table_entries)
1da177e4
LT
2028 break;
2029get_req:
463c84b9 2030 req = icsk->icsk_accept_queue.listen_opt->syn_table[st->sbucket];
1da177e4 2031 }
1bde5ac4 2032 sk = sk_nulls_next(st->syn_wait_sk);
1da177e4 2033 st->state = TCP_SEQ_STATE_LISTENING;
463c84b9 2034 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1da177e4 2035 } else {
e905a9ed 2036 icsk = inet_csk(sk);
463c84b9
ACM
2037 read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2038 if (reqsk_queue_len(&icsk->icsk_accept_queue))
1da177e4 2039 goto start_req;
463c84b9 2040 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1bde5ac4 2041 sk = sk_nulls_next(sk);
1da177e4
LT
2042 }
2043get_sk:
c25eb3bf 2044 sk_nulls_for_each_from(sk, node) {
8475ef9f
PE
2045 if (!net_eq(sock_net(sk), net))
2046 continue;
2047 if (sk->sk_family == st->family) {
1da177e4
LT
2048 cur = sk;
2049 goto out;
2050 }
e905a9ed 2051 icsk = inet_csk(sk);
463c84b9
ACM
2052 read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2053 if (reqsk_queue_len(&icsk->icsk_accept_queue)) {
1da177e4
LT
2054start_req:
2055 st->uid = sock_i_uid(sk);
2056 st->syn_wait_sk = sk;
2057 st->state = TCP_SEQ_STATE_OPENREQ;
2058 st->sbucket = 0;
2059 goto get_req;
2060 }
463c84b9 2061 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1da177e4 2062 }
5caea4ea 2063 spin_unlock_bh(&ilb->lock);
a8b690f9 2064 st->offset = 0;
0f7ff927 2065 if (++st->bucket < INET_LHTABLE_SIZE) {
5caea4ea
ED
2066 ilb = &tcp_hashinfo.listening_hash[st->bucket];
2067 spin_lock_bh(&ilb->lock);
c25eb3bf 2068 sk = sk_nulls_head(&ilb->head);
1da177e4
LT
2069 goto get_sk;
2070 }
2071 cur = NULL;
2072out:
2073 return cur;
2074}
2075
2076static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
2077{
a8b690f9
TH
2078 struct tcp_iter_state *st = seq->private;
2079 void *rc;
2080
2081 st->bucket = 0;
2082 st->offset = 0;
2083 rc = listening_get_next(seq, NULL);
1da177e4
LT
2084
2085 while (rc && *pos) {
2086 rc = listening_get_next(seq, rc);
2087 --*pos;
2088 }
2089 return rc;
2090}
2091
6eac5604
AK
2092static inline int empty_bucket(struct tcp_iter_state *st)
2093{
3ab5aee7
ED
2094 return hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].chain) &&
2095 hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].twchain);
6eac5604
AK
2096}
2097
a8b690f9
TH
2098/*
2099 * Get first established socket starting from bucket given in st->bucket.
2100 * If st->bucket is zero, the very first socket in the hash is returned.
2101 */
1da177e4
LT
2102static void *established_get_first(struct seq_file *seq)
2103{
5799de0b 2104 struct tcp_iter_state *st = seq->private;
a4146b1b 2105 struct net *net = seq_file_net(seq);
1da177e4
LT
2106 void *rc = NULL;
2107
a8b690f9
TH
2108 st->offset = 0;
2109 for (; st->bucket <= tcp_hashinfo.ehash_mask; ++st->bucket) {
1da177e4 2110 struct sock *sk;
3ab5aee7 2111 struct hlist_nulls_node *node;
8feaf0c0 2112 struct inet_timewait_sock *tw;
9db66bdc 2113 spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, st->bucket);
1da177e4 2114
6eac5604
AK
2115 /* Lockless fast path for the common case of empty buckets */
2116 if (empty_bucket(st))
2117 continue;
2118
9db66bdc 2119 spin_lock_bh(lock);
3ab5aee7 2120 sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) {
f40c8174 2121 if (sk->sk_family != st->family ||
878628fb 2122 !net_eq(sock_net(sk), net)) {
1da177e4
LT
2123 continue;
2124 }
2125 rc = sk;
2126 goto out;
2127 }
2128 st->state = TCP_SEQ_STATE_TIME_WAIT;
8feaf0c0 2129 inet_twsk_for_each(tw, node,
dbca9b27 2130 &tcp_hashinfo.ehash[st->bucket].twchain) {
28518fc1 2131 if (tw->tw_family != st->family ||
878628fb 2132 !net_eq(twsk_net(tw), net)) {
1da177e4
LT
2133 continue;
2134 }
2135 rc = tw;
2136 goto out;
2137 }
9db66bdc 2138 spin_unlock_bh(lock);
1da177e4
LT
2139 st->state = TCP_SEQ_STATE_ESTABLISHED;
2140 }
2141out:
2142 return rc;
2143}
2144
2145static void *established_get_next(struct seq_file *seq, void *cur)
2146{
2147 struct sock *sk = cur;
8feaf0c0 2148 struct inet_timewait_sock *tw;
3ab5aee7 2149 struct hlist_nulls_node *node;
5799de0b 2150 struct tcp_iter_state *st = seq->private;
a4146b1b 2151 struct net *net = seq_file_net(seq);
1da177e4
LT
2152
2153 ++st->num;
a8b690f9 2154 ++st->offset;
1da177e4
LT
2155
2156 if (st->state == TCP_SEQ_STATE_TIME_WAIT) {
2157 tw = cur;
2158 tw = tw_next(tw);
2159get_tw:
878628fb 2160 while (tw && (tw->tw_family != st->family || !net_eq(twsk_net(tw), net))) {
1da177e4
LT
2161 tw = tw_next(tw);
2162 }
2163 if (tw) {
2164 cur = tw;
2165 goto out;
2166 }
9db66bdc 2167 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
1da177e4
LT
2168 st->state = TCP_SEQ_STATE_ESTABLISHED;
2169
6eac5604 2170 /* Look for next non empty bucket */
a8b690f9 2171 st->offset = 0;
f373b53b 2172 while (++st->bucket <= tcp_hashinfo.ehash_mask &&
6eac5604
AK
2173 empty_bucket(st))
2174 ;
f373b53b 2175 if (st->bucket > tcp_hashinfo.ehash_mask)
6eac5604
AK
2176 return NULL;
2177
9db66bdc 2178 spin_lock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
3ab5aee7 2179 sk = sk_nulls_head(&tcp_hashinfo.ehash[st->bucket].chain);
1da177e4 2180 } else
3ab5aee7 2181 sk = sk_nulls_next(sk);
1da177e4 2182
3ab5aee7 2183 sk_nulls_for_each_from(sk, node) {
878628fb 2184 if (sk->sk_family == st->family && net_eq(sock_net(sk), net))
1da177e4
LT
2185 goto found;
2186 }
2187
2188 st->state = TCP_SEQ_STATE_TIME_WAIT;
dbca9b27 2189 tw = tw_head(&tcp_hashinfo.ehash[st->bucket].twchain);
1da177e4
LT
2190 goto get_tw;
2191found:
2192 cur = sk;
2193out:
2194 return cur;
2195}
2196
2197static void *established_get_idx(struct seq_file *seq, loff_t pos)
2198{
a8b690f9
TH
2199 struct tcp_iter_state *st = seq->private;
2200 void *rc;
2201
2202 st->bucket = 0;
2203 rc = established_get_first(seq);
1da177e4
LT
2204
2205 while (rc && pos) {
2206 rc = established_get_next(seq, rc);
2207 --pos;
7174259e 2208 }
1da177e4
LT
2209 return rc;
2210}
2211
2212static void *tcp_get_idx(struct seq_file *seq, loff_t pos)
2213{
2214 void *rc;
5799de0b 2215 struct tcp_iter_state *st = seq->private;
1da177e4 2216
1da177e4
LT
2217 st->state = TCP_SEQ_STATE_LISTENING;
2218 rc = listening_get_idx(seq, &pos);
2219
2220 if (!rc) {
1da177e4
LT
2221 st->state = TCP_SEQ_STATE_ESTABLISHED;
2222 rc = established_get_idx(seq, pos);
2223 }
2224
2225 return rc;
2226}
2227
a8b690f9
TH
2228static void *tcp_seek_last_pos(struct seq_file *seq)
2229{
2230 struct tcp_iter_state *st = seq->private;
2231 int offset = st->offset;
2232 int orig_num = st->num;
2233 void *rc = NULL;
2234
2235 switch (st->state) {
2236 case TCP_SEQ_STATE_OPENREQ:
2237 case TCP_SEQ_STATE_LISTENING:
2238 if (st->bucket >= INET_LHTABLE_SIZE)
2239 break;
2240 st->state = TCP_SEQ_STATE_LISTENING;
2241 rc = listening_get_next(seq, NULL);
2242 while (offset-- && rc)
2243 rc = listening_get_next(seq, rc);
2244 if (rc)
2245 break;
2246 st->bucket = 0;
2247 /* Fallthrough */
2248 case TCP_SEQ_STATE_ESTABLISHED:
2249 case TCP_SEQ_STATE_TIME_WAIT:
2250 st->state = TCP_SEQ_STATE_ESTABLISHED;
2251 if (st->bucket > tcp_hashinfo.ehash_mask)
2252 break;
2253 rc = established_get_first(seq);
2254 while (offset-- && rc)
2255 rc = established_get_next(seq, rc);
2256 }
2257
2258 st->num = orig_num;
2259
2260 return rc;
2261}
2262
1da177e4
LT
2263static void *tcp_seq_start(struct seq_file *seq, loff_t *pos)
2264{
5799de0b 2265 struct tcp_iter_state *st = seq->private;
a8b690f9
TH
2266 void *rc;
2267
2268 if (*pos && *pos == st->last_pos) {
2269 rc = tcp_seek_last_pos(seq);
2270 if (rc)
2271 goto out;
2272 }
2273
1da177e4
LT
2274 st->state = TCP_SEQ_STATE_LISTENING;
2275 st->num = 0;
a8b690f9
TH
2276 st->bucket = 0;
2277 st->offset = 0;
2278 rc = *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2279
2280out:
2281 st->last_pos = *pos;
2282 return rc;
1da177e4
LT
2283}
2284
2285static void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2286{
a8b690f9 2287 struct tcp_iter_state *st = seq->private;
1da177e4 2288 void *rc = NULL;
1da177e4
LT
2289
2290 if (v == SEQ_START_TOKEN) {
2291 rc = tcp_get_idx(seq, 0);
2292 goto out;
2293 }
1da177e4
LT
2294
2295 switch (st->state) {
2296 case TCP_SEQ_STATE_OPENREQ:
2297 case TCP_SEQ_STATE_LISTENING:
2298 rc = listening_get_next(seq, v);
2299 if (!rc) {
1da177e4 2300 st->state = TCP_SEQ_STATE_ESTABLISHED;
a8b690f9
TH
2301 st->bucket = 0;
2302 st->offset = 0;
1da177e4
LT
2303 rc = established_get_first(seq);
2304 }
2305 break;
2306 case TCP_SEQ_STATE_ESTABLISHED:
2307 case TCP_SEQ_STATE_TIME_WAIT:
2308 rc = established_get_next(seq, v);
2309 break;
2310 }
2311out:
2312 ++*pos;
a8b690f9 2313 st->last_pos = *pos;
1da177e4
LT
2314 return rc;
2315}
2316
2317static void tcp_seq_stop(struct seq_file *seq, void *v)
2318{
5799de0b 2319 struct tcp_iter_state *st = seq->private;
1da177e4
LT
2320
2321 switch (st->state) {
2322 case TCP_SEQ_STATE_OPENREQ:
2323 if (v) {
463c84b9
ACM
2324 struct inet_connection_sock *icsk = inet_csk(st->syn_wait_sk);
2325 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1da177e4
LT
2326 }
2327 case TCP_SEQ_STATE_LISTENING:
2328 if (v != SEQ_START_TOKEN)
5caea4ea 2329 spin_unlock_bh(&tcp_hashinfo.listening_hash[st->bucket].lock);
1da177e4
LT
2330 break;
2331 case TCP_SEQ_STATE_TIME_WAIT:
2332 case TCP_SEQ_STATE_ESTABLISHED:
2333 if (v)
9db66bdc 2334 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
1da177e4
LT
2335 break;
2336 }
2337}
2338
2339static int tcp_seq_open(struct inode *inode, struct file *file)
2340{
2341 struct tcp_seq_afinfo *afinfo = PDE(inode)->data;
1da177e4 2342 struct tcp_iter_state *s;
52d6f3f1 2343 int err;
1da177e4 2344
52d6f3f1
DL
2345 err = seq_open_net(inode, file, &afinfo->seq_ops,
2346 sizeof(struct tcp_iter_state));
2347 if (err < 0)
2348 return err;
f40c8174 2349
52d6f3f1 2350 s = ((struct seq_file *)file->private_data)->private;
1da177e4 2351 s->family = afinfo->family;
a8b690f9 2352 s->last_pos = 0;
f40c8174
DL
2353 return 0;
2354}
2355
6f8b13bc 2356int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo)
1da177e4
LT
2357{
2358 int rc = 0;
2359 struct proc_dir_entry *p;
2360
68fcadd1
DL
2361 afinfo->seq_fops.open = tcp_seq_open;
2362 afinfo->seq_fops.read = seq_read;
2363 afinfo->seq_fops.llseek = seq_lseek;
2364 afinfo->seq_fops.release = seq_release_net;
7174259e 2365
9427c4b3
DL
2366 afinfo->seq_ops.start = tcp_seq_start;
2367 afinfo->seq_ops.next = tcp_seq_next;
2368 afinfo->seq_ops.stop = tcp_seq_stop;
2369
84841c3c
DL
2370 p = proc_create_data(afinfo->name, S_IRUGO, net->proc_net,
2371 &afinfo->seq_fops, afinfo);
2372 if (!p)
1da177e4
LT
2373 rc = -ENOMEM;
2374 return rc;
2375}
4bc2f18b 2376EXPORT_SYMBOL(tcp_proc_register);
1da177e4 2377
6f8b13bc 2378void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo)
1da177e4 2379{
6f8b13bc 2380 proc_net_remove(net, afinfo->name);
1da177e4 2381}
4bc2f18b 2382EXPORT_SYMBOL(tcp_proc_unregister);
1da177e4 2383
cf533ea5 2384static void get_openreq4(const struct sock *sk, const struct request_sock *req,
5e659e4c 2385 struct seq_file *f, int i, int uid, int *len)
1da177e4 2386{
2e6599cb 2387 const struct inet_request_sock *ireq = inet_rsk(req);
1da177e4
LT
2388 int ttd = req->expires - jiffies;
2389
5e659e4c 2390 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
71338aa7 2391 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %u %d %pK%n",
1da177e4 2392 i,
2e6599cb 2393 ireq->loc_addr,
c720c7e8 2394 ntohs(inet_sk(sk)->inet_sport),
2e6599cb
ACM
2395 ireq->rmt_addr,
2396 ntohs(ireq->rmt_port),
1da177e4
LT
2397 TCP_SYN_RECV,
2398 0, 0, /* could print option size, but that is af dependent. */
2399 1, /* timers active (only the expire timer) */
2400 jiffies_to_clock_t(ttd),
2401 req->retrans,
2402 uid,
2403 0, /* non standard timer */
2404 0, /* open_requests have no inode */
2405 atomic_read(&sk->sk_refcnt),
5e659e4c
PE
2406 req,
2407 len);
1da177e4
LT
2408}
2409
5e659e4c 2410static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len)
1da177e4
LT
2411{
2412 int timer_active;
2413 unsigned long timer_expires;
cf533ea5 2414 const struct tcp_sock *tp = tcp_sk(sk);
cf4c6bf8 2415 const struct inet_connection_sock *icsk = inet_csk(sk);
cf533ea5 2416 const struct inet_sock *inet = inet_sk(sk);
c720c7e8
ED
2417 __be32 dest = inet->inet_daddr;
2418 __be32 src = inet->inet_rcv_saddr;
2419 __u16 destp = ntohs(inet->inet_dport);
2420 __u16 srcp = ntohs(inet->inet_sport);
49d09007 2421 int rx_queue;
1da177e4 2422
463c84b9 2423 if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
1da177e4 2424 timer_active = 1;
463c84b9
ACM
2425 timer_expires = icsk->icsk_timeout;
2426 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
1da177e4 2427 timer_active = 4;
463c84b9 2428 timer_expires = icsk->icsk_timeout;
cf4c6bf8 2429 } else if (timer_pending(&sk->sk_timer)) {
1da177e4 2430 timer_active = 2;
cf4c6bf8 2431 timer_expires = sk->sk_timer.expires;
1da177e4
LT
2432 } else {
2433 timer_active = 0;
2434 timer_expires = jiffies;
2435 }
2436
49d09007
ED
2437 if (sk->sk_state == TCP_LISTEN)
2438 rx_queue = sk->sk_ack_backlog;
2439 else
2440 /*
2441 * because we dont lock socket, we might find a transient negative value
2442 */
2443 rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
2444
5e659e4c 2445 seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
71338aa7 2446 "%08X %5d %8d %lu %d %pK %lu %lu %u %u %d%n",
cf4c6bf8 2447 i, src, srcp, dest, destp, sk->sk_state,
47da8ee6 2448 tp->write_seq - tp->snd_una,
49d09007 2449 rx_queue,
1da177e4
LT
2450 timer_active,
2451 jiffies_to_clock_t(timer_expires - jiffies),
463c84b9 2452 icsk->icsk_retransmits,
cf4c6bf8 2453 sock_i_uid(sk),
6687e988 2454 icsk->icsk_probes_out,
cf4c6bf8
IJ
2455 sock_i_ino(sk),
2456 atomic_read(&sk->sk_refcnt), sk,
7be87351
SH
2457 jiffies_to_clock_t(icsk->icsk_rto),
2458 jiffies_to_clock_t(icsk->icsk_ack.ato),
463c84b9 2459 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
1da177e4 2460 tp->snd_cwnd,
0b6a05c1 2461 tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh,
5e659e4c 2462 len);
1da177e4
LT
2463}
2464
cf533ea5 2465static void get_timewait4_sock(const struct inet_timewait_sock *tw,
5e659e4c 2466 struct seq_file *f, int i, int *len)
1da177e4 2467{
23f33c2d 2468 __be32 dest, src;
1da177e4
LT
2469 __u16 destp, srcp;
2470 int ttd = tw->tw_ttd - jiffies;
2471
2472 if (ttd < 0)
2473 ttd = 0;
2474
2475 dest = tw->tw_daddr;
2476 src = tw->tw_rcv_saddr;
2477 destp = ntohs(tw->tw_dport);
2478 srcp = ntohs(tw->tw_sport);
2479
5e659e4c 2480 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
71338aa7 2481 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK%n",
1da177e4
LT
2482 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
2483 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
5e659e4c 2484 atomic_read(&tw->tw_refcnt), tw, len);
1da177e4
LT
2485}
2486
2487#define TMPSZ 150
2488
2489static int tcp4_seq_show(struct seq_file *seq, void *v)
2490{
5799de0b 2491 struct tcp_iter_state *st;
5e659e4c 2492 int len;
1da177e4
LT
2493
2494 if (v == SEQ_START_TOKEN) {
2495 seq_printf(seq, "%-*s\n", TMPSZ - 1,
2496 " sl local_address rem_address st tx_queue "
2497 "rx_queue tr tm->when retrnsmt uid timeout "
2498 "inode");
2499 goto out;
2500 }
2501 st = seq->private;
2502
2503 switch (st->state) {
2504 case TCP_SEQ_STATE_LISTENING:
2505 case TCP_SEQ_STATE_ESTABLISHED:
5e659e4c 2506 get_tcp4_sock(v, seq, st->num, &len);
1da177e4
LT
2507 break;
2508 case TCP_SEQ_STATE_OPENREQ:
5e659e4c 2509 get_openreq4(st->syn_wait_sk, v, seq, st->num, st->uid, &len);
1da177e4
LT
2510 break;
2511 case TCP_SEQ_STATE_TIME_WAIT:
5e659e4c 2512 get_timewait4_sock(v, seq, st->num, &len);
1da177e4
LT
2513 break;
2514 }
5e659e4c 2515 seq_printf(seq, "%*s\n", TMPSZ - 1 - len, "");
1da177e4
LT
2516out:
2517 return 0;
2518}
2519
1da177e4 2520static struct tcp_seq_afinfo tcp4_seq_afinfo = {
1da177e4
LT
2521 .name = "tcp",
2522 .family = AF_INET,
5f4472c5
DL
2523 .seq_fops = {
2524 .owner = THIS_MODULE,
2525 },
9427c4b3
DL
2526 .seq_ops = {
2527 .show = tcp4_seq_show,
2528 },
1da177e4
LT
2529};
2530
2c8c1e72 2531static int __net_init tcp4_proc_init_net(struct net *net)
757764f6
PE
2532{
2533 return tcp_proc_register(net, &tcp4_seq_afinfo);
2534}
2535
2c8c1e72 2536static void __net_exit tcp4_proc_exit_net(struct net *net)
757764f6
PE
2537{
2538 tcp_proc_unregister(net, &tcp4_seq_afinfo);
2539}
2540
2541static struct pernet_operations tcp4_net_ops = {
2542 .init = tcp4_proc_init_net,
2543 .exit = tcp4_proc_exit_net,
2544};
2545
1da177e4
LT
2546int __init tcp4_proc_init(void)
2547{
757764f6 2548 return register_pernet_subsys(&tcp4_net_ops);
1da177e4
LT
2549}
2550
2551void tcp4_proc_exit(void)
2552{
757764f6 2553 unregister_pernet_subsys(&tcp4_net_ops);
1da177e4
LT
2554}
2555#endif /* CONFIG_PROC_FS */
2556
bf296b12
HX
2557struct sk_buff **tcp4_gro_receive(struct sk_buff **head, struct sk_buff *skb)
2558{
b71d1d42 2559 const struct iphdr *iph = skb_gro_network_header(skb);
bf296b12
HX
2560
2561 switch (skb->ip_summed) {
2562 case CHECKSUM_COMPLETE:
86911732 2563 if (!tcp_v4_check(skb_gro_len(skb), iph->saddr, iph->daddr,
bf296b12
HX
2564 skb->csum)) {
2565 skb->ip_summed = CHECKSUM_UNNECESSARY;
2566 break;
2567 }
2568
2569 /* fall through */
2570 case CHECKSUM_NONE:
2571 NAPI_GRO_CB(skb)->flush = 1;
2572 return NULL;
2573 }
2574
2575 return tcp_gro_receive(head, skb);
2576}
bf296b12
HX
2577
2578int tcp4_gro_complete(struct sk_buff *skb)
2579{
b71d1d42 2580 const struct iphdr *iph = ip_hdr(skb);
bf296b12
HX
2581 struct tcphdr *th = tcp_hdr(skb);
2582
2583 th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb),
2584 iph->saddr, iph->daddr, 0);
2585 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
2586
2587 return tcp_gro_complete(skb);
2588}
bf296b12 2589
1da177e4
LT
2590struct proto tcp_prot = {
2591 .name = "TCP",
2592 .owner = THIS_MODULE,
2593 .close = tcp_close,
2594 .connect = tcp_v4_connect,
2595 .disconnect = tcp_disconnect,
463c84b9 2596 .accept = inet_csk_accept,
1da177e4
LT
2597 .ioctl = tcp_ioctl,
2598 .init = tcp_v4_init_sock,
2599 .destroy = tcp_v4_destroy_sock,
2600 .shutdown = tcp_shutdown,
2601 .setsockopt = tcp_setsockopt,
2602 .getsockopt = tcp_getsockopt,
1da177e4 2603 .recvmsg = tcp_recvmsg,
7ba42910
CG
2604 .sendmsg = tcp_sendmsg,
2605 .sendpage = tcp_sendpage,
1da177e4 2606 .backlog_rcv = tcp_v4_do_rcv,
ab1e0a13
ACM
2607 .hash = inet_hash,
2608 .unhash = inet_unhash,
2609 .get_port = inet_csk_get_port,
1da177e4
LT
2610 .enter_memory_pressure = tcp_enter_memory_pressure,
2611 .sockets_allocated = &tcp_sockets_allocated,
0a5578cf 2612 .orphan_count = &tcp_orphan_count,
1da177e4
LT
2613 .memory_allocated = &tcp_memory_allocated,
2614 .memory_pressure = &tcp_memory_pressure,
2615 .sysctl_mem = sysctl_tcp_mem,
2616 .sysctl_wmem = sysctl_tcp_wmem,
2617 .sysctl_rmem = sysctl_tcp_rmem,
2618 .max_header = MAX_TCP_HEADER,
2619 .obj_size = sizeof(struct tcp_sock),
3ab5aee7 2620 .slab_flags = SLAB_DESTROY_BY_RCU,
6d6ee43e 2621 .twsk_prot = &tcp_timewait_sock_ops,
60236fdd 2622 .rsk_prot = &tcp_request_sock_ops,
39d8cda7 2623 .h.hashinfo = &tcp_hashinfo,
7ba42910 2624 .no_autobind = true,
543d9cfe
ACM
2625#ifdef CONFIG_COMPAT
2626 .compat_setsockopt = compat_tcp_setsockopt,
2627 .compat_getsockopt = compat_tcp_getsockopt,
2628#endif
1da177e4 2629};
4bc2f18b 2630EXPORT_SYMBOL(tcp_prot);
1da177e4 2631
046ee902
DL
2632
2633static int __net_init tcp_sk_init(struct net *net)
2634{
2635 return inet_ctl_sock_create(&net->ipv4.tcp_sock,
2636 PF_INET, SOCK_RAW, IPPROTO_TCP, net);
2637}
2638
2639static void __net_exit tcp_sk_exit(struct net *net)
2640{
2641 inet_ctl_sock_destroy(net->ipv4.tcp_sock);
b099ce26
EB
2642}
2643
2644static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list)
2645{
2646 inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET);
046ee902
DL
2647}
2648
2649static struct pernet_operations __net_initdata tcp_sk_ops = {
b099ce26
EB
2650 .init = tcp_sk_init,
2651 .exit = tcp_sk_exit,
2652 .exit_batch = tcp_sk_exit_batch,
046ee902
DL
2653};
2654
9b0f976f 2655void __init tcp_v4_init(void)
1da177e4 2656{
5caea4ea 2657 inet_hashinfo_init(&tcp_hashinfo);
6a1b3054 2658 if (register_pernet_subsys(&tcp_sk_ops))
1da177e4 2659 panic("Failed to create the TCP control socket.\n");
1da177e4 2660}