]> git.ipfire.org Git - thirdparty/linux.git/blame - net/ipv4/tcp.c
net: ibm: emac: remove unused sysrq handler for 'c' key
[thirdparty/linux.git] / net / ipv4 / tcp.c
CommitLineData
1da177e4
LT
1/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Implementation of the Transmission Control Protocol(TCP).
7 *
02c30a84 8 * Authors: Ross Biro
1da177e4
LT
9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10 * Mark Evans, <evansmp@uhura.aston.ac.uk>
11 * Corey Minyard <wf-rch!minyard@relay.EU.net>
12 * Florian La Roche, <flla@stud.uni-sb.de>
13 * Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
14 * Linus Torvalds, <torvalds@cs.helsinki.fi>
15 * Alan Cox, <gw4pts@gw4pts.ampr.org>
16 * Matthew Dillon, <dillon@apollo.west.oic.com>
17 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
18 * Jorge Cwik, <jorge@laser.satlink.net>
19 *
20 * Fixes:
21 * Alan Cox : Numerous verify_area() calls
22 * Alan Cox : Set the ACK bit on a reset
23 * Alan Cox : Stopped it crashing if it closed while
24 * sk->inuse=1 and was trying to connect
25 * (tcp_err()).
26 * Alan Cox : All icmp error handling was broken
27 * pointers passed where wrong and the
28 * socket was looked up backwards. Nobody
29 * tested any icmp error code obviously.
30 * Alan Cox : tcp_err() now handled properly. It
31 * wakes people on errors. poll
32 * behaves and the icmp error race
33 * has gone by moving it into sock.c
34 * Alan Cox : tcp_send_reset() fixed to work for
35 * everything not just packets for
36 * unknown sockets.
37 * Alan Cox : tcp option processing.
38 * Alan Cox : Reset tweaked (still not 100%) [Had
39 * syn rule wrong]
40 * Herp Rosmanith : More reset fixes
41 * Alan Cox : No longer acks invalid rst frames.
42 * Acking any kind of RST is right out.
43 * Alan Cox : Sets an ignore me flag on an rst
44 * receive otherwise odd bits of prattle
45 * escape still
46 * Alan Cox : Fixed another acking RST frame bug.
47 * Should stop LAN workplace lockups.
48 * Alan Cox : Some tidyups using the new skb list
49 * facilities
50 * Alan Cox : sk->keepopen now seems to work
51 * Alan Cox : Pulls options out correctly on accepts
52 * Alan Cox : Fixed assorted sk->rqueue->next errors
53 * Alan Cox : PSH doesn't end a TCP read. Switched a
54 * bit to skb ops.
55 * Alan Cox : Tidied tcp_data to avoid a potential
56 * nasty.
57 * Alan Cox : Added some better commenting, as the
58 * tcp is hard to follow
59 * Alan Cox : Removed incorrect check for 20 * psh
60 * Michael O'Reilly : ack < copied bug fix.
61 * Johannes Stille : Misc tcp fixes (not all in yet).
62 * Alan Cox : FIN with no memory -> CRASH
63 * Alan Cox : Added socket option proto entries.
64 * Also added awareness of them to accept.
65 * Alan Cox : Added TCP options (SOL_TCP)
66 * Alan Cox : Switched wakeup calls to callbacks,
67 * so the kernel can layer network
68 * sockets.
69 * Alan Cox : Use ip_tos/ip_ttl settings.
70 * Alan Cox : Handle FIN (more) properly (we hope).
71 * Alan Cox : RST frames sent on unsynchronised
72 * state ack error.
73 * Alan Cox : Put in missing check for SYN bit.
74 * Alan Cox : Added tcp_select_window() aka NET2E
75 * window non shrink trick.
76 * Alan Cox : Added a couple of small NET2E timer
77 * fixes
78 * Charles Hedrick : TCP fixes
79 * Toomas Tamm : TCP window fixes
80 * Alan Cox : Small URG fix to rlogin ^C ack fight
81 * Charles Hedrick : Rewrote most of it to actually work
82 * Linus : Rewrote tcp_read() and URG handling
83 * completely
84 * Gerhard Koerting: Fixed some missing timer handling
85 * Matthew Dillon : Reworked TCP machine states as per RFC
86 * Gerhard Koerting: PC/TCP workarounds
87 * Adam Caldwell : Assorted timer/timing errors
88 * Matthew Dillon : Fixed another RST bug
89 * Alan Cox : Move to kernel side addressing changes.
90 * Alan Cox : Beginning work on TCP fastpathing
91 * (not yet usable)
92 * Arnt Gulbrandsen: Turbocharged tcp_check() routine.
93 * Alan Cox : TCP fast path debugging
94 * Alan Cox : Window clamping
95 * Michael Riepe : Bug in tcp_check()
96 * Matt Dillon : More TCP improvements and RST bug fixes
97 * Matt Dillon : Yet more small nasties remove from the
98 * TCP code (Be very nice to this man if
99 * tcp finally works 100%) 8)
100 * Alan Cox : BSD accept semantics.
101 * Alan Cox : Reset on closedown bug.
102 * Peter De Schrijver : ENOTCONN check missing in tcp_sendto().
103 * Michael Pall : Handle poll() after URG properly in
104 * all cases.
105 * Michael Pall : Undo the last fix in tcp_read_urg()
106 * (multi URG PUSH broke rlogin).
107 * Michael Pall : Fix the multi URG PUSH problem in
108 * tcp_readable(), poll() after URG
109 * works now.
110 * Michael Pall : recv(...,MSG_OOB) never blocks in the
111 * BSD api.
112 * Alan Cox : Changed the semantics of sk->socket to
113 * fix a race and a signal problem with
114 * accept() and async I/O.
115 * Alan Cox : Relaxed the rules on tcp_sendto().
116 * Yury Shevchuk : Really fixed accept() blocking problem.
117 * Craig I. Hagan : Allow for BSD compatible TIME_WAIT for
118 * clients/servers which listen in on
119 * fixed ports.
120 * Alan Cox : Cleaned the above up and shrank it to
121 * a sensible code size.
122 * Alan Cox : Self connect lockup fix.
123 * Alan Cox : No connect to multicast.
124 * Ross Biro : Close unaccepted children on master
125 * socket close.
126 * Alan Cox : Reset tracing code.
127 * Alan Cox : Spurious resets on shutdown.
128 * Alan Cox : Giant 15 minute/60 second timer error
129 * Alan Cox : Small whoops in polling before an
130 * accept.
131 * Alan Cox : Kept the state trace facility since
132 * it's handy for debugging.
133 * Alan Cox : More reset handler fixes.
134 * Alan Cox : Started rewriting the code based on
135 * the RFC's for other useful protocol
136 * references see: Comer, KA9Q NOS, and
137 * for a reference on the difference
138 * between specifications and how BSD
139 * works see the 4.4lite source.
140 * A.N.Kuznetsov : Don't time wait on completion of tidy
141 * close.
142 * Linus Torvalds : Fin/Shutdown & copied_seq changes.
143 * Linus Torvalds : Fixed BSD port reuse to work first syn
144 * Alan Cox : Reimplemented timers as per the RFC
145 * and using multiple timers for sanity.
146 * Alan Cox : Small bug fixes, and a lot of new
147 * comments.
148 * Alan Cox : Fixed dual reader crash by locking
149 * the buffers (much like datagram.c)
150 * Alan Cox : Fixed stuck sockets in probe. A probe
151 * now gets fed up of retrying without
152 * (even a no space) answer.
153 * Alan Cox : Extracted closing code better
154 * Alan Cox : Fixed the closing state machine to
155 * resemble the RFC.
156 * Alan Cox : More 'per spec' fixes.
157 * Jorge Cwik : Even faster checksumming.
158 * Alan Cox : tcp_data() doesn't ack illegal PSH
159 * only frames. At least one pc tcp stack
160 * generates them.
161 * Alan Cox : Cache last socket.
162 * Alan Cox : Per route irtt.
163 * Matt Day : poll()->select() match BSD precisely on error
164 * Alan Cox : New buffers
165 * Marc Tamsky : Various sk->prot->retransmits and
166 * sk->retransmits misupdating fixed.
167 * Fixed tcp_write_timeout: stuck close,
168 * and TCP syn retries gets used now.
169 * Mark Yarvis : In tcp_read_wakeup(), don't send an
170 * ack if state is TCP_CLOSED.
171 * Alan Cox : Look up device on a retransmit - routes may
172 * change. Doesn't yet cope with MSS shrink right
173 * but it's a start!
174 * Marc Tamsky : Closing in closing fixes.
175 * Mike Shaver : RFC1122 verifications.
176 * Alan Cox : rcv_saddr errors.
177 * Alan Cox : Block double connect().
178 * Alan Cox : Small hooks for enSKIP.
179 * Alexey Kuznetsov: Path MTU discovery.
180 * Alan Cox : Support soft errors.
181 * Alan Cox : Fix MTU discovery pathological case
182 * when the remote claims no mtu!
183 * Marc Tamsky : TCP_CLOSE fix.
184 * Colin (G3TNE) : Send a reset on syn ack replies in
185 * window but wrong (fixes NT lpd problems)
186 * Pedro Roque : Better TCP window handling, delayed ack.
187 * Joerg Reuter : No modification of locked buffers in
188 * tcp_do_retransmit()
189 * Eric Schenk : Changed receiver side silly window
190 * avoidance algorithm to BSD style
191 * algorithm. This doubles throughput
192 * against machines running Solaris,
193 * and seems to result in general
194 * improvement.
195 * Stefan Magdalinski : adjusted tcp_readable() to fix FIONREAD
196 * Willy Konynenberg : Transparent proxying support.
197 * Mike McLagan : Routing by source
198 * Keith Owens : Do proper merging with partial SKB's in
199 * tcp_do_sendmsg to avoid burstiness.
200 * Eric Schenk : Fix fast close down bug with
201 * shutdown() followed by close().
202 * Andi Kleen : Make poll agree with SIGIO
203 * Salvatore Sanfilippo : Support SO_LINGER with linger == 1 and
204 * lingertime == 0 (RFC 793 ABORT Call)
205 * Hirokazu Takahashi : Use copy_from_user() instead of
206 * csum_and_copy_from_user() if possible.
207 *
208 * This program is free software; you can redistribute it and/or
209 * modify it under the terms of the GNU General Public License
210 * as published by the Free Software Foundation; either version
211 * 2 of the License, or(at your option) any later version.
212 *
213 * Description of States:
214 *
215 * TCP_SYN_SENT sent a connection request, waiting for ack
216 *
217 * TCP_SYN_RECV received a connection request, sent ack,
218 * waiting for final ack in three-way handshake.
219 *
220 * TCP_ESTABLISHED connection established
221 *
222 * TCP_FIN_WAIT1 our side has shutdown, waiting to complete
223 * transmission of remaining buffered data
224 *
225 * TCP_FIN_WAIT2 all buffered data sent, waiting for remote
226 * to shutdown
227 *
228 * TCP_CLOSING both sides have shutdown but we still have
229 * data we have to finish sending
230 *
231 * TCP_TIME_WAIT timeout to catch resent junk before entering
232 * closed, can only be entered from FIN_WAIT2
233 * or CLOSING. Required because the other end
234 * may not have gotten our last ACK causing it
235 * to retransmit the data packet (which we ignore)
236 *
237 * TCP_CLOSE_WAIT remote side has shutdown and is waiting for
238 * us to finish writing our data and to shutdown
239 * (we have to close() to move on to LAST_ACK)
240 *
241 * TCP_LAST_ACK out side has shutdown after remote has
242 * shutdown. There may still be data in our
243 * buffer that we have to finish sending
244 *
245 * TCP_CLOSE socket is finished
246 */
247
afd46503
JP
248#define pr_fmt(fmt) "TCP: " fmt
249
cf80e0e4 250#include <crypto/hash.h>
172589cc 251#include <linux/kernel.h>
1da177e4
LT
252#include <linux/module.h>
253#include <linux/types.h>
254#include <linux/fcntl.h>
255#include <linux/poll.h>
6e9250f5 256#include <linux/inet_diag.h>
1da177e4 257#include <linux/init.h>
1da177e4 258#include <linux/fs.h>
9c55e01c 259#include <linux/skbuff.h>
81b23b4a 260#include <linux/scatterlist.h>
9c55e01c
JA
261#include <linux/splice.h>
262#include <linux/net.h>
263#include <linux/socket.h>
1da177e4
LT
264#include <linux/random.h>
265#include <linux/bootmem.h>
57413ebc
MS
266#include <linux/highmem.h>
267#include <linux/swap.h>
b8059ead 268#include <linux/cache.h>
f4c50d99 269#include <linux/err.h>
da5c78c8 270#include <linux/time.h>
5a0e3ad6 271#include <linux/slab.h>
1da177e4
LT
272
273#include <net/icmp.h>
cf60af03 274#include <net/inet_common.h>
1da177e4
LT
275#include <net/tcp.h>
276#include <net/xfrm.h>
277#include <net/ip.h>
9c55e01c 278#include <net/sock.h>
1da177e4 279
7c0f6ba6 280#include <linux/uaccess.h>
1da177e4 281#include <asm/ioctls.h>
076bb0c8 282#include <net/busy_poll.h>
1da177e4 283
95bd09eb
ED
284int sysctl_tcp_min_tso_segs __read_mostly = 2;
285
f54b3111
ED
286int sysctl_tcp_autocorking __read_mostly = 1;
287
dd24c001 288struct percpu_counter tcp_orphan_count;
0a5578cf
ACM
289EXPORT_SYMBOL_GPL(tcp_orphan_count);
290
a4fe34bf 291long sysctl_tcp_mem[3] __read_mostly;
b8059ead
DM
292int sysctl_tcp_wmem[3] __read_mostly;
293int sysctl_tcp_rmem[3] __read_mostly;
1da177e4 294
a4fe34bf 295EXPORT_SYMBOL(sysctl_tcp_mem);
1da177e4
LT
296EXPORT_SYMBOL(sysctl_tcp_rmem);
297EXPORT_SYMBOL(sysctl_tcp_wmem);
298
8d987e5c 299atomic_long_t tcp_memory_allocated; /* Current allocated memory. */
1da177e4 300EXPORT_SYMBOL(tcp_memory_allocated);
1748376b
ED
301
302/*
303 * Current number of TCP sockets.
304 */
305struct percpu_counter tcp_sockets_allocated;
1da177e4
LT
306EXPORT_SYMBOL(tcp_sockets_allocated);
307
9c55e01c
JA
308/*
309 * TCP splice context
310 */
311struct tcp_splice_state {
312 struct pipe_inode_info *pipe;
313 size_t len;
314 unsigned int flags;
315};
316
1da177e4
LT
317/*
318 * Pressure flag: try to collapse.
319 * Technical note: it is used by multiple contexts non atomically.
3ab224be 320 * All the __sk_mem_schedule() is of this nature: accounting
1da177e4
LT
321 * is strict, actions are advisory and have some latency.
322 */
4103f8cd 323int tcp_memory_pressure __read_mostly;
1da177e4
LT
324EXPORT_SYMBOL(tcp_memory_pressure);
325
5c52ba17 326void tcp_enter_memory_pressure(struct sock *sk)
1da177e4
LT
327{
328 if (!tcp_memory_pressure) {
4e673444 329 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMEMORYPRESSURES);
1da177e4
LT
330 tcp_memory_pressure = 1;
331 }
332}
1da177e4
LT
333EXPORT_SYMBOL(tcp_enter_memory_pressure);
334
b103cf34
JA
335/* Convert seconds to retransmits based on initial and max timeout */
336static u8 secs_to_retrans(int seconds, int timeout, int rto_max)
337{
338 u8 res = 0;
339
340 if (seconds > 0) {
341 int period = timeout;
342
343 res = 1;
344 while (seconds > period && res < 255) {
345 res++;
346 timeout <<= 1;
347 if (timeout > rto_max)
348 timeout = rto_max;
349 period += timeout;
350 }
351 }
352 return res;
353}
354
355/* Convert retransmits to seconds based on initial and max timeout */
356static int retrans_to_secs(u8 retrans, int timeout, int rto_max)
357{
358 int period = 0;
359
360 if (retrans > 0) {
361 period = timeout;
362 while (--retrans) {
363 timeout <<= 1;
364 if (timeout > rto_max)
365 timeout = rto_max;
366 period += timeout;
367 }
368 }
369 return period;
370}
371
900f65d3
NC
372/* Address-family independent initialization for a tcp_sock.
373 *
374 * NOTE: A lot of things set to zero explicitly by call to
375 * sk_alloc() so need not be done here.
376 */
377void tcp_init_sock(struct sock *sk)
378{
379 struct inet_connection_sock *icsk = inet_csk(sk);
380 struct tcp_sock *tp = tcp_sk(sk);
381
9f5afeae 382 tp->out_of_order_queue = RB_ROOT;
900f65d3
NC
383 tcp_init_xmit_timers(sk);
384 tcp_prequeue_init(tp);
46d3ceab 385 INIT_LIST_HEAD(&tp->tsq_node);
900f65d3
NC
386
387 icsk->icsk_rto = TCP_TIMEOUT_INIT;
740b0f18 388 tp->mdev_us = jiffies_to_usecs(TCP_TIMEOUT_INIT);
64033892 389 minmax_reset(&tp->rtt_min, tcp_time_stamp, ~0U);
900f65d3
NC
390
391 /* So many TCP implementations out there (incorrectly) count the
392 * initial SYN frame in their delayed-ACK and congestion control
393 * algorithms that we must have the following bandaid to talk
394 * efficiently to them. -DaveM
395 */
396 tp->snd_cwnd = TCP_INIT_CWND;
397
d7722e85
SHY
398 /* There's a bubble in the pipe until at least the first ACK. */
399 tp->app_limited = ~0U;
400
900f65d3
NC
401 /* See draft-stevens-tcpca-spec-01 for discussion of the
402 * initialization of these values.
403 */
404 tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
405 tp->snd_cwnd_clamp = ~0;
406 tp->mss_cache = TCP_MSS_DEFAULT;
407
1043e25f 408 tp->reordering = sock_net(sk)->ipv4.sysctl_tcp_reordering;
55d8694f 409 tcp_assign_congestion_control(sk);
900f65d3 410
ceaa1fef
AV
411 tp->tsoffset = 0;
412
900f65d3
NC
413 sk->sk_state = TCP_CLOSE;
414
415 sk->sk_write_space = sk_stream_write_space;
416 sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
417
418 icsk->icsk_sync_mss = tcp_sync_mss;
419
900f65d3
NC
420 sk->sk_sndbuf = sysctl_tcp_wmem[1];
421 sk->sk_rcvbuf = sysctl_tcp_rmem[1];
422
900f65d3 423 sk_sockets_allocated_inc(sk);
900f65d3
NC
424}
425EXPORT_SYMBOL(tcp_init_sock);
426
c14ac945 427static void tcp_tx_timestamp(struct sock *sk, u16 tsflags, struct sk_buff *skb)
4ed2d765 428{
ad02c4f5 429 if (tsflags && skb) {
f066e2b0 430 struct skb_shared_info *shinfo = skb_shinfo(skb);
6b084928 431 struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
4ed2d765 432
c14ac945 433 sock_tx_timestamp(sk, tsflags, &shinfo->tx_flags);
0a2cf20c
SHY
434 if (tsflags & SOF_TIMESTAMPING_TX_ACK)
435 tcb->txstamp_ack = 1;
436 if (tsflags & SOF_TIMESTAMPING_TX_RECORD_MASK)
f066e2b0
WB
437 shinfo->tskey = TCP_SKB_CB(skb)->seq + skb->len - 1;
438 }
4ed2d765
WB
439}
440
1da177e4
LT
441/*
442 * Wait for a TCP event.
443 *
444 * Note that we don't need to lock the socket, as the upper poll layers
445 * take care of normal races (between the test and the event) and we don't
446 * go look at any of the socket buffers directly.
447 */
448unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
449{
450 unsigned int mask;
451 struct sock *sk = sock->sk;
cf533ea5 452 const struct tcp_sock *tp = tcp_sk(sk);
00fd38d9 453 int state;
1da177e4 454
c3f1dbaf
DM
455 sock_rps_record_flow(sk);
456
aa395145 457 sock_poll_wait(file, sk_sleep(sk), wait);
00fd38d9
ED
458
459 state = sk_state_load(sk);
460 if (state == TCP_LISTEN)
dc40c7bc 461 return inet_csk_listen_poll(sk);
1da177e4
LT
462
463 /* Socket is not locked. We are protected from async events
70efce27
WN
464 * by poll logic and correct handling of state changes
465 * made by other threads is impossible in any case.
1da177e4
LT
466 */
467
468 mask = 0;
1da177e4
LT
469
470 /*
471 * POLLHUP is certainly not done right. But poll() doesn't
472 * have a notion of HUP in just one direction, and for a
473 * socket the read side is more interesting.
474 *
475 * Some poll() documentation says that POLLHUP is incompatible
476 * with the POLLOUT/POLLWR flags, so somebody should check this
477 * all. But careful, it tends to be safer to return too many
478 * bits than too few, and you can easily break real applications
479 * if you don't tell them that something has hung up!
480 *
481 * Check-me.
482 *
483 * Check number 1. POLLHUP is _UNMASKABLE_ event (see UNIX98 and
484 * our fs/select.c). It means that after we received EOF,
485 * poll always returns immediately, making impossible poll() on write()
486 * in state CLOSE_WAIT. One solution is evident --- to set POLLHUP
487 * if and only if shutdown has been made in both directions.
488 * Actually, it is interesting to look how Solaris and DUX
70efce27 489 * solve this dilemma. I would prefer, if POLLHUP were maskable,
1da177e4
LT
490 * then we could set it on SND_SHUTDOWN. BTW examples given
491 * in Stevens' books assume exactly this behaviour, it explains
70efce27 492 * why POLLHUP is incompatible with POLLOUT. --ANK
1da177e4
LT
493 *
494 * NOTE. Check for TCP_CLOSE is added. The goal is to prevent
495 * blocking on fresh not-connected or disconnected socket. --ANK
496 */
00fd38d9 497 if (sk->sk_shutdown == SHUTDOWN_MASK || state == TCP_CLOSE)
1da177e4
LT
498 mask |= POLLHUP;
499 if (sk->sk_shutdown & RCV_SHUTDOWN)
f348d70a 500 mask |= POLLIN | POLLRDNORM | POLLRDHUP;
1da177e4 501
8336886f 502 /* Connected or passive Fast Open socket? */
00fd38d9
ED
503 if (state != TCP_SYN_SENT &&
504 (state != TCP_SYN_RECV || tp->fastopen_rsk)) {
c7004482
DM
505 int target = sock_rcvlowat(sk, 0, INT_MAX);
506
507 if (tp->urg_seq == tp->copied_seq &&
508 !sock_flag(sk, SOCK_URGINLINE) &&
509 tp->urg_data)
b634f875 510 target++;
c7004482 511
c7004482 512 if (tp->rcv_nxt - tp->copied_seq >= target)
1da177e4
LT
513 mask |= POLLIN | POLLRDNORM;
514
515 if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
64dc6130 516 if (sk_stream_is_writeable(sk)) {
1da177e4
LT
517 mask |= POLLOUT | POLLWRNORM;
518 } else { /* send SIGIO later */
9cd3e072 519 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
1da177e4
LT
520 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
521
522 /* Race breaker. If space is freed after
523 * wspace test but before the flags are set,
3c715127 524 * IO signal will be lost. Memory barrier
525 * pairs with the input side.
1da177e4 526 */
3c715127 527 smp_mb__after_atomic();
64dc6130 528 if (sk_stream_is_writeable(sk))
1da177e4
LT
529 mask |= POLLOUT | POLLWRNORM;
530 }
d84ba638
KM
531 } else
532 mask |= POLLOUT | POLLWRNORM;
1da177e4
LT
533
534 if (tp->urg_data & TCP_URG_VALID)
535 mask |= POLLPRI;
19f6d3f3
WW
536 } else if (sk->sk_state == TCP_SYN_SENT && inet_sk(sk)->defer_connect) {
537 /* Active TCP fastopen socket with defer_connect
538 * Return POLLOUT so application can call write()
539 * in order for kernel to generate SYN+data
540 */
541 mask |= POLLOUT | POLLWRNORM;
1da177e4 542 }
a4d25803
TM
543 /* This barrier is coupled with smp_wmb() in tcp_reset() */
544 smp_rmb();
4ed2d765 545 if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
a4d25803
TM
546 mask |= POLLERR;
547
1da177e4
LT
548 return mask;
549}
4bc2f18b 550EXPORT_SYMBOL(tcp_poll);
1da177e4
LT
551
552int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg)
553{
554 struct tcp_sock *tp = tcp_sk(sk);
555 int answ;
0e71c55c 556 bool slow;
1da177e4
LT
557
558 switch (cmd) {
559 case SIOCINQ:
560 if (sk->sk_state == TCP_LISTEN)
561 return -EINVAL;
562
0e71c55c 563 slow = lock_sock_fast(sk);
473bd239 564 answ = tcp_inq(sk);
0e71c55c 565 unlock_sock_fast(sk, slow);
1da177e4
LT
566 break;
567 case SIOCATMARK:
568 answ = tp->urg_data && tp->urg_seq == tp->copied_seq;
569 break;
570 case SIOCOUTQ:
571 if (sk->sk_state == TCP_LISTEN)
572 return -EINVAL;
573
574 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
575 answ = 0;
576 else
577 answ = tp->write_seq - tp->snd_una;
578 break;
2f4e1b39
MS
579 case SIOCOUTQNSD:
580 if (sk->sk_state == TCP_LISTEN)
581 return -EINVAL;
582
583 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
584 answ = 0;
585 else
586 answ = tp->write_seq - tp->snd_nxt;
587 break;
1da177e4
LT
588 default:
589 return -ENOIOCTLCMD;
3ff50b79 590 }
1da177e4
LT
591
592 return put_user(answ, (int __user *)arg);
593}
4bc2f18b 594EXPORT_SYMBOL(tcp_ioctl);
1da177e4 595
1da177e4
LT
596static inline void tcp_mark_push(struct tcp_sock *tp, struct sk_buff *skb)
597{
4de075e0 598 TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH;
1da177e4
LT
599 tp->pushed_seq = tp->write_seq;
600}
601
a2a385d6 602static inline bool forced_push(const struct tcp_sock *tp)
1da177e4
LT
603{
604 return after(tp->write_seq, tp->pushed_seq + (tp->max_window >> 1));
605}
606
f4a775d1 607static void skb_entail(struct sock *sk, struct sk_buff *skb)
1da177e4 608{
9e412ba7 609 struct tcp_sock *tp = tcp_sk(sk);
352d4800
ACM
610 struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
611
612 skb->csum = 0;
613 tcb->seq = tcb->end_seq = tp->write_seq;
4de075e0 614 tcb->tcp_flags = TCPHDR_ACK;
352d4800 615 tcb->sacked = 0;
f4a775d1 616 __skb_header_release(skb);
fe067e8a 617 tcp_add_write_queue_tail(sk, skb);
3ab224be
HA
618 sk->sk_wmem_queued += skb->truesize;
619 sk_mem_charge(sk, skb->truesize);
89ebd197 620 if (tp->nonagle & TCP_NAGLE_PUSH)
e905a9ed 621 tp->nonagle &= ~TCP_NAGLE_PUSH;
6f021c62
ED
622
623 tcp_slow_start_after_idle_check(sk);
1da177e4
LT
624}
625
afeca340 626static inline void tcp_mark_urg(struct tcp_sock *tp, int flags)
1da177e4 627{
33f5f57e 628 if (flags & MSG_OOB)
1da177e4 629 tp->snd_up = tp->write_seq;
1da177e4
LT
630}
631
f54b3111 632/* If a not yet filled skb is pushed, do not send it if
a181ceb5 633 * we have data packets in Qdisc or NIC queues :
f54b3111
ED
634 * Because TX completion will happen shortly, it gives a chance
635 * to coalesce future sendmsg() payload into this skb, without
636 * need for a timer, and with no latency trade off.
637 * As packets containing data payload have a bigger truesize
a181ceb5
ED
638 * than pure acks (dataless) packets, the last checks prevent
639 * autocorking if we only have an ACK in Qdisc/NIC queues,
640 * or if TX completion was delayed after we processed ACK packet.
f54b3111
ED
641 */
642static bool tcp_should_autocork(struct sock *sk, struct sk_buff *skb,
643 int size_goal)
1da177e4 644{
f54b3111
ED
645 return skb->len < size_goal &&
646 sysctl_tcp_autocorking &&
a181ceb5 647 skb != tcp_write_queue_head(sk) &&
f54b3111
ED
648 atomic_read(&sk->sk_wmem_alloc) > skb->truesize;
649}
650
651static void tcp_push(struct sock *sk, int flags, int mss_now,
652 int nonagle, int size_goal)
653{
654 struct tcp_sock *tp = tcp_sk(sk);
655 struct sk_buff *skb;
afeca340 656
f54b3111
ED
657 if (!tcp_send_head(sk))
658 return;
afeca340 659
f54b3111
ED
660 skb = tcp_write_queue_tail(sk);
661 if (!(flags & MSG_MORE) || forced_push(tp))
662 tcp_mark_push(tp, skb);
663
664 tcp_mark_urg(tp, flags);
665
666 if (tcp_should_autocork(sk, skb, size_goal)) {
667
668 /* avoid atomic op if TSQ_THROTTLED bit is already set */
7aa5470c 669 if (!test_bit(TSQ_THROTTLED, &sk->sk_tsq_flags)) {
f54b3111 670 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPAUTOCORKING);
7aa5470c 671 set_bit(TSQ_THROTTLED, &sk->sk_tsq_flags);
f54b3111 672 }
a181ceb5
ED
673 /* It is possible TX completion already happened
674 * before we set TSQ_THROTTLED.
675 */
676 if (atomic_read(&sk->sk_wmem_alloc) > skb->truesize)
677 return;
1da177e4 678 }
f54b3111
ED
679
680 if (flags & MSG_MORE)
681 nonagle = TCP_NAGLE_CORK;
682
683 __tcp_push_pending_frames(sk, mss_now, nonagle);
1da177e4
LT
684}
685
6ff7751d
AB
686static int tcp_splice_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb,
687 unsigned int offset, size_t len)
9c55e01c
JA
688{
689 struct tcp_splice_state *tss = rd_desc->arg.data;
33966dd0 690 int ret;
9c55e01c 691
a60e3cc7 692 ret = skb_splice_bits(skb, skb->sk, offset, tss->pipe,
25869262 693 min(rd_desc->count, len), tss->flags);
33966dd0
WT
694 if (ret > 0)
695 rd_desc->count -= ret;
696 return ret;
9c55e01c
JA
697}
698
699static int __tcp_splice_read(struct sock *sk, struct tcp_splice_state *tss)
700{
701 /* Store TCP splice context information in read_descriptor_t. */
702 read_descriptor_t rd_desc = {
703 .arg.data = tss,
33966dd0 704 .count = tss->len,
9c55e01c
JA
705 };
706
707 return tcp_read_sock(sk, &rd_desc, tcp_splice_data_recv);
708}
709
710/**
711 * tcp_splice_read - splice data from TCP socket to a pipe
712 * @sock: socket to splice from
713 * @ppos: position (not valid)
714 * @pipe: pipe to splice to
715 * @len: number of bytes to splice
716 * @flags: splice modifier flags
717 *
718 * Description:
719 * Will read pages from given socket and fill them into a pipe.
720 *
721 **/
722ssize_t tcp_splice_read(struct socket *sock, loff_t *ppos,
723 struct pipe_inode_info *pipe, size_t len,
724 unsigned int flags)
725{
726 struct sock *sk = sock->sk;
727 struct tcp_splice_state tss = {
728 .pipe = pipe,
729 .len = len,
730 .flags = flags,
731 };
732 long timeo;
733 ssize_t spliced;
734 int ret;
735
3a047bf8 736 sock_rps_record_flow(sk);
9c55e01c
JA
737 /*
738 * We can't seek on a socket input
739 */
740 if (unlikely(*ppos))
741 return -ESPIPE;
742
743 ret = spliced = 0;
744
745 lock_sock(sk);
746
42324c62 747 timeo = sock_rcvtimeo(sk, sock->file->f_flags & O_NONBLOCK);
9c55e01c
JA
748 while (tss.len) {
749 ret = __tcp_splice_read(sk, &tss);
750 if (ret < 0)
751 break;
752 else if (!ret) {
753 if (spliced)
754 break;
9c55e01c
JA
755 if (sock_flag(sk, SOCK_DONE))
756 break;
757 if (sk->sk_err) {
758 ret = sock_error(sk);
759 break;
760 }
761 if (sk->sk_shutdown & RCV_SHUTDOWN)
762 break;
763 if (sk->sk_state == TCP_CLOSE) {
764 /*
765 * This occurs when user tries to read
766 * from never connected socket.
767 */
768 if (!sock_flag(sk, SOCK_DONE))
769 ret = -ENOTCONN;
770 break;
771 }
772 if (!timeo) {
773 ret = -EAGAIN;
774 break;
775 }
ccf7abb9
ED
776 /* if __tcp_splice_read() got nothing while we have
777 * an skb in receive queue, we do not want to loop.
778 * This might happen with URG data.
779 */
780 if (!skb_queue_empty(&sk->sk_receive_queue))
781 break;
dfbafc99 782 sk_wait_data(sk, &timeo, NULL);
9c55e01c
JA
783 if (signal_pending(current)) {
784 ret = sock_intr_errno(timeo);
785 break;
786 }
787 continue;
788 }
789 tss.len -= ret;
790 spliced += ret;
791
33966dd0
WT
792 if (!timeo)
793 break;
9c55e01c
JA
794 release_sock(sk);
795 lock_sock(sk);
796
797 if (sk->sk_err || sk->sk_state == TCP_CLOSE ||
33966dd0 798 (sk->sk_shutdown & RCV_SHUTDOWN) ||
9c55e01c
JA
799 signal_pending(current))
800 break;
801 }
802
803 release_sock(sk);
804
805 if (spliced)
806 return spliced;
807
808 return ret;
809}
4bc2f18b 810EXPORT_SYMBOL(tcp_splice_read);
9c55e01c 811
eb934478
ED
812struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp,
813 bool force_schedule)
f561d0f2
PE
814{
815 struct sk_buff *skb;
816
817 /* The TCP header must be at least 32-bit aligned. */
818 size = ALIGN(size, 4);
819
8e4d980a
ED
820 if (unlikely(tcp_under_memory_pressure(sk)))
821 sk_mem_reclaim_partial(sk);
822
f561d0f2 823 skb = alloc_skb_fclone(size + sk->sk_prot->max_header, gfp);
8e4d980a 824 if (likely(skb)) {
eb934478 825 bool mem_scheduled;
8e4d980a 826
eb934478
ED
827 if (force_schedule) {
828 mem_scheduled = true;
8e4d980a
ED
829 sk_forced_mem_schedule(sk, skb->truesize);
830 } else {
eb934478 831 mem_scheduled = sk_wmem_schedule(sk, skb->truesize);
8e4d980a 832 }
eb934478 833 if (likely(mem_scheduled)) {
a21d4572 834 skb_reserve(skb, sk->sk_prot->max_header);
f561d0f2
PE
835 /*
836 * Make sure that we have exactly size bytes
837 * available to the caller, no more, no less.
838 */
16fad69c 839 skb->reserved_tailroom = skb->end - skb->tail - size;
f561d0f2
PE
840 return skb;
841 }
842 __kfree_skb(skb);
843 } else {
5c52ba17 844 sk->sk_prot->enter_memory_pressure(sk);
f561d0f2
PE
845 sk_stream_moderate_sndbuf(sk);
846 }
847 return NULL;
848}
849
0c54b85f
IJ
850static unsigned int tcp_xmit_size_goal(struct sock *sk, u32 mss_now,
851 int large_allowed)
852{
853 struct tcp_sock *tp = tcp_sk(sk);
6c09fa09 854 u32 new_size_goal, size_goal;
605ad7f1
ED
855
856 if (!large_allowed || !sk_can_gso(sk))
857 return mss_now;
858
6c09fa09
ED
859 /* Note : tcp_tso_autosize() will eventually split this later */
860 new_size_goal = sk->sk_gso_max_size - 1 - MAX_TCP_HEADER;
605ad7f1
ED
861 new_size_goal = tcp_bound_to_half_wnd(tp, new_size_goal);
862
863 /* We try hard to avoid divides here */
864 size_goal = tp->gso_segs * mss_now;
865 if (unlikely(new_size_goal < size_goal ||
866 new_size_goal >= size_goal + mss_now)) {
867 tp->gso_segs = min_t(u16, new_size_goal / mss_now,
868 sk->sk_gso_max_segs);
869 size_goal = tp->gso_segs * mss_now;
0c54b85f
IJ
870 }
871
605ad7f1 872 return max(size_goal, mss_now);
0c54b85f
IJ
873}
874
875static int tcp_send_mss(struct sock *sk, int *size_goal, int flags)
876{
877 int mss_now;
878
879 mss_now = tcp_current_mss(sk);
880 *size_goal = tcp_xmit_size_goal(sk, mss_now, !(flags & MSG_OOB));
881
882 return mss_now;
883}
884
64022d0b
ED
885static ssize_t do_tcp_sendpages(struct sock *sk, struct page *page, int offset,
886 size_t size, int flags)
1da177e4
LT
887{
888 struct tcp_sock *tp = tcp_sk(sk);
c1b4a7e6 889 int mss_now, size_goal;
1da177e4
LT
890 int err;
891 ssize_t copied;
892 long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
893
8336886f
JC
894 /* Wait for a connection to finish. One exception is TCP Fast Open
895 * (passive side) where data is allowed to be sent before a connection
896 * is fully established.
897 */
898 if (((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) &&
899 !tcp_passive_fastopen(sk)) {
686a5624
YM
900 err = sk_stream_wait_connect(sk, &timeo);
901 if (err != 0)
1da177e4 902 goto out_err;
8336886f 903 }
1da177e4 904
9cd3e072 905 sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
1da177e4 906
0c54b85f 907 mss_now = tcp_send_mss(sk, &size_goal, flags);
1da177e4
LT
908 copied = 0;
909
910 err = -EPIPE;
911 if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
0d6a775e 912 goto out_err;
1da177e4 913
64022d0b 914 while (size > 0) {
fe067e8a 915 struct sk_buff *skb = tcp_write_queue_tail(sk);
38ba0a65 916 int copy, i;
38ba0a65 917 bool can_coalesce;
1da177e4 918
c134ecb8
MKL
919 if (!tcp_send_head(sk) || (copy = size_goal - skb->len) <= 0 ||
920 !tcp_skb_can_collapse_to(skb)) {
1da177e4
LT
921new_segment:
922 if (!sk_stream_memory_free(sk))
923 goto wait_for_sndbuf;
924
eb934478
ED
925 skb = sk_stream_alloc_skb(sk, 0, sk->sk_allocation,
926 skb_queue_empty(&sk->sk_write_queue));
1da177e4
LT
927 if (!skb)
928 goto wait_for_memory;
929
9e412ba7 930 skb_entail(sk, skb);
c1b4a7e6 931 copy = size_goal;
1da177e4
LT
932 }
933
934 if (copy > size)
935 copy = size;
936
937 i = skb_shinfo(skb)->nr_frags;
938 can_coalesce = skb_can_coalesce(skb, i, page, offset);
5f74f82e 939 if (!can_coalesce && i >= sysctl_max_skb_frags) {
1da177e4
LT
940 tcp_mark_push(tp, skb);
941 goto new_segment;
942 }
3ab224be 943 if (!sk_wmem_schedule(sk, copy))
1da177e4 944 goto wait_for_memory;
e905a9ed 945
1da177e4 946 if (can_coalesce) {
9e903e08 947 skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
1da177e4
LT
948 } else {
949 get_page(page);
950 skb_fill_page_desc(skb, i, page, offset, copy);
951 }
c9af6db4 952 skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG;
cef401de 953
1da177e4
LT
954 skb->len += copy;
955 skb->data_len += copy;
956 skb->truesize += copy;
957 sk->sk_wmem_queued += copy;
3ab224be 958 sk_mem_charge(sk, copy);
84fa7933 959 skb->ip_summed = CHECKSUM_PARTIAL;
1da177e4
LT
960 tp->write_seq += copy;
961 TCP_SKB_CB(skb)->end_seq += copy;
cd7d8498 962 tcp_skb_pcount_set(skb, 0);
1da177e4
LT
963
964 if (!copied)
4de075e0 965 TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_PSH;
1da177e4
LT
966
967 copied += copy;
64022d0b 968 offset += copy;
686a5624 969 size -= copy;
ad02c4f5 970 if (!size)
1da177e4
LT
971 goto out;
972
69d15067 973 if (skb->len < size_goal || (flags & MSG_OOB))
1da177e4
LT
974 continue;
975
976 if (forced_push(tp)) {
977 tcp_mark_push(tp, skb);
9e412ba7 978 __tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_PUSH);
fe067e8a 979 } else if (skb == tcp_send_head(sk))
1da177e4
LT
980 tcp_push_one(sk, mss_now);
981 continue;
982
983wait_for_sndbuf:
984 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
985wait_for_memory:
f54b3111
ED
986 tcp_push(sk, flags & ~MSG_MORE, mss_now,
987 TCP_NAGLE_PUSH, size_goal);
1da177e4 988
686a5624
YM
989 err = sk_stream_wait_memory(sk, &timeo);
990 if (err != 0)
1da177e4
LT
991 goto do_error;
992
0c54b85f 993 mss_now = tcp_send_mss(sk, &size_goal, flags);
1da177e4
LT
994 }
995
996out:
ad02c4f5
SHY
997 if (copied) {
998 tcp_tx_timestamp(sk, sk->sk_tsflags, tcp_write_queue_tail(sk));
999 if (!(flags & MSG_SENDPAGE_NOTLAST))
1000 tcp_push(sk, flags, mss_now, tp->nonagle, size_goal);
1001 }
1da177e4
LT
1002 return copied;
1003
1004do_error:
1005 if (copied)
1006 goto out;
1007out_err:
ce5ec440 1008 /* make sure we wake any epoll edge trigger waiter */
b0f71bd3
FY
1009 if (unlikely(skb_queue_len(&sk->sk_write_queue) == 0 &&
1010 err == -EAGAIN)) {
ce5ec440 1011 sk->sk_write_space(sk);
b0f71bd3
FY
1012 tcp_chrono_stop(sk, TCP_CHRONO_SNDBUF_LIMITED);
1013 }
1da177e4
LT
1014 return sk_stream_error(sk, flags, err);
1015}
1016
7ba42910
CG
1017int tcp_sendpage(struct sock *sk, struct page *page, int offset,
1018 size_t size, int flags)
1da177e4
LT
1019{
1020 ssize_t res;
1da177e4 1021
1da177e4 1022 if (!(sk->sk_route_caps & NETIF_F_SG) ||
9a49850d 1023 !sk_check_csum_caps(sk))
7ba42910
CG
1024 return sock_no_sendpage(sk->sk_socket, page, offset, size,
1025 flags);
1da177e4 1026
1da177e4 1027 lock_sock(sk);
d7722e85
SHY
1028
1029 tcp_rate_check_app_limited(sk); /* is sending application-limited? */
1030
64022d0b 1031 res = do_tcp_sendpages(sk, page, offset, size, flags);
1da177e4
LT
1032 release_sock(sk);
1033 return res;
1034}
4bc2f18b 1035EXPORT_SYMBOL(tcp_sendpage);
1da177e4 1036
3613b3db
ED
1037/* Do not bother using a page frag for very small frames.
1038 * But use this heuristic only for the first skb in write queue.
1039 *
1040 * Having no payload in skb->head allows better SACK shifting
1041 * in tcp_shift_skb_data(), reducing sack/rack overhead, because
1042 * write queue has less skbs.
1043 * Each skb can hold up to MAX_SKB_FRAGS * 32Kbytes, or ~0.5 MB.
1044 * This also speeds up tso_fragment(), since it wont fallback
1045 * to tcp_fragment().
1046 */
1047static int linear_payload_sz(bool first_skb)
1048{
1049 if (first_skb)
1050 return SKB_WITH_OVERHEAD(2048 - MAX_TCP_HEADER);
1051 return 0;
1052}
1053
1054static int select_size(const struct sock *sk, bool sg, bool first_skb)
1da177e4 1055{
cf533ea5 1056 const struct tcp_sock *tp = tcp_sk(sk);
c1b4a7e6 1057 int tmp = tp->mss_cache;
1da177e4 1058
def87cf4 1059 if (sg) {
f07d960d 1060 if (sk_can_gso(sk)) {
3613b3db 1061 tmp = linear_payload_sz(first_skb);
f07d960d 1062 } else {
b4e26f5e
DM
1063 int pgbreak = SKB_MAX_HEAD(MAX_TCP_HEADER);
1064
1065 if (tmp >= pgbreak &&
1066 tmp <= pgbreak + (MAX_SKB_FRAGS - 1) * PAGE_SIZE)
1067 tmp = pgbreak;
1068 }
1069 }
1da177e4 1070
1da177e4
LT
1071 return tmp;
1072}
1073
cf60af03
YC
1074void tcp_free_fastopen_req(struct tcp_sock *tp)
1075{
00db4124 1076 if (tp->fastopen_req) {
cf60af03
YC
1077 kfree(tp->fastopen_req);
1078 tp->fastopen_req = NULL;
1079 }
1080}
1081
f5ddcbbb
ED
1082static int tcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg,
1083 int *copied, size_t size)
cf60af03
YC
1084{
1085 struct tcp_sock *tp = tcp_sk(sk);
19f6d3f3 1086 struct inet_sock *inet = inet_sk(sk);
cf60af03
YC
1087 int err, flags;
1088
1089 if (!(sysctl_tcp_fastopen & TFO_CLIENT_ENABLE))
1090 return -EOPNOTSUPP;
00db4124 1091 if (tp->fastopen_req)
cf60af03
YC
1092 return -EALREADY; /* Another Fast Open is in progress */
1093
1094 tp->fastopen_req = kzalloc(sizeof(struct tcp_fastopen_request),
1095 sk->sk_allocation);
51456b29 1096 if (unlikely(!tp->fastopen_req))
cf60af03
YC
1097 return -ENOBUFS;
1098 tp->fastopen_req->data = msg;
f5ddcbbb 1099 tp->fastopen_req->size = size;
cf60af03 1100
19f6d3f3
WW
1101 if (inet->defer_connect) {
1102 err = tcp_connect(sk);
1103 /* Same failure procedure as in tcp_v4/6_connect */
1104 if (err) {
1105 tcp_set_state(sk, TCP_CLOSE);
1106 inet->inet_dport = 0;
1107 sk->sk_route_caps = 0;
1108 }
1109 }
cf60af03
YC
1110 flags = (msg->msg_flags & MSG_DONTWAIT) ? O_NONBLOCK : 0;
1111 err = __inet_stream_connect(sk->sk_socket, msg->msg_name,
3979ad7e 1112 msg->msg_namelen, flags, 1);
7db92362
WW
1113 /* fastopen_req could already be freed in __inet_stream_connect
1114 * if the connection times out or gets rst
1115 */
1116 if (tp->fastopen_req) {
1117 *copied = tp->fastopen_req->copied;
1118 tcp_free_fastopen_req(tp);
1119 inet->defer_connect = 0;
1120 }
cf60af03
YC
1121 return err;
1122}
1123
1b784140 1124int tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
1da177e4 1125{
1da177e4
LT
1126 struct tcp_sock *tp = tcp_sk(sk);
1127 struct sk_buff *skb;
c14ac945 1128 struct sockcm_cookie sockc;
57be5bda
AV
1129 int flags, err, copied = 0;
1130 int mss_now = 0, size_goal, copied_syn = 0;
d4011239 1131 bool process_backlog = false;
690e99c4 1132 bool sg;
1da177e4
LT
1133 long timeo;
1134
1135 lock_sock(sk);
1da177e4
LT
1136
1137 flags = msg->msg_flags;
19f6d3f3 1138 if (unlikely(flags & MSG_FASTOPEN || inet_sk(sk)->defer_connect)) {
f5ddcbbb 1139 err = tcp_sendmsg_fastopen(sk, msg, &copied_syn, size);
cf60af03
YC
1140 if (err == -EINPROGRESS && copied_syn > 0)
1141 goto out;
1142 else if (err)
1143 goto out_err;
cf60af03
YC
1144 }
1145
1da177e4
LT
1146 timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
1147
d7722e85
SHY
1148 tcp_rate_check_app_limited(sk); /* is sending application-limited? */
1149
8336886f
JC
1150 /* Wait for a connection to finish. One exception is TCP Fast Open
1151 * (passive side) where data is allowed to be sent before a connection
1152 * is fully established.
1153 */
1154 if (((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) &&
1155 !tcp_passive_fastopen(sk)) {
686a5624
YM
1156 err = sk_stream_wait_connect(sk, &timeo);
1157 if (err != 0)
cf60af03 1158 goto do_error;
8336886f 1159 }
1da177e4 1160
c0e88ff0
PE
1161 if (unlikely(tp->repair)) {
1162 if (tp->repair_queue == TCP_RECV_QUEUE) {
1163 copied = tcp_send_rcvq(sk, msg, size);
5924f17a 1164 goto out_nopush;
c0e88ff0
PE
1165 }
1166
1167 err = -EINVAL;
1168 if (tp->repair_queue == TCP_NO_QUEUE)
1169 goto out_err;
1170
1171 /* 'common' sending to sendq */
1172 }
1173
c14ac945
SHY
1174 sockc.tsflags = sk->sk_tsflags;
1175 if (msg->msg_controllen) {
1176 err = sock_cmsg_send(sk, msg, &sockc);
1177 if (unlikely(err)) {
1178 err = -EINVAL;
1179 goto out_err;
1180 }
1181 }
1182
1da177e4 1183 /* This should be in poll */
9cd3e072 1184 sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
1da177e4 1185
1da177e4 1186 /* Ok commence sending. */
1da177e4
LT
1187 copied = 0;
1188
d41a69f1
ED
1189restart:
1190 mss_now = tcp_send_mss(sk, &size_goal, flags);
1191
1da177e4
LT
1192 err = -EPIPE;
1193 if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
79d8665b 1194 goto do_error;
1da177e4 1195
690e99c4 1196 sg = !!(sk->sk_route_caps & NETIF_F_SG);
def87cf4 1197
01e97e65 1198 while (msg_data_left(msg)) {
57be5bda
AV
1199 int copy = 0;
1200 int max = size_goal;
1da177e4 1201
57be5bda
AV
1202 skb = tcp_write_queue_tail(sk);
1203 if (tcp_send_head(sk)) {
1204 if (skb->ip_summed == CHECKSUM_NONE)
1205 max = mss_now;
1206 copy = max - skb->len;
cf60af03 1207 }
1da177e4 1208
c134ecb8 1209 if (copy <= 0 || !tcp_skb_can_collapse_to(skb)) {
3613b3db
ED
1210 bool first_skb;
1211
1da177e4 1212new_segment:
57be5bda
AV
1213 /* Allocate new segment. If the interface is SG,
1214 * allocate skb fitting to single page.
1215 */
1216 if (!sk_stream_memory_free(sk))
1217 goto wait_for_sndbuf;
1da177e4 1218
d4011239
ED
1219 if (process_backlog && sk_flush_backlog(sk)) {
1220 process_backlog = false;
d41a69f1 1221 goto restart;
d4011239 1222 }
3613b3db 1223 first_skb = skb_queue_empty(&sk->sk_write_queue);
57be5bda 1224 skb = sk_stream_alloc_skb(sk,
3613b3db 1225 select_size(sk, sg, first_skb),
eb934478 1226 sk->sk_allocation,
3613b3db 1227 first_skb);
57be5bda
AV
1228 if (!skb)
1229 goto wait_for_memory;
1da177e4 1230
d4011239 1231 process_backlog = true;
57be5bda
AV
1232 /*
1233 * Check whether we can use HW checksum.
1234 */
9a49850d 1235 if (sk_check_csum_caps(sk))
57be5bda 1236 skb->ip_summed = CHECKSUM_PARTIAL;
1da177e4 1237
57be5bda
AV
1238 skb_entail(sk, skb);
1239 copy = size_goal;
1240 max = size_goal;
9d186cac 1241
57be5bda
AV
1242 /* All packets are restored as if they have
1243 * already been sent. skb_mstamp isn't set to
1244 * avoid wrong rtt estimation.
1245 */
1246 if (tp->repair)
1247 TCP_SKB_CB(skb)->sacked |= TCPCB_REPAIRED;
1248 }
1da177e4 1249
57be5bda 1250 /* Try to append data to the end of skb. */
01e97e65
AV
1251 if (copy > msg_data_left(msg))
1252 copy = msg_data_left(msg);
57be5bda
AV
1253
1254 /* Where to copy to? */
1255 if (skb_availroom(skb) > 0) {
1256 /* We have some space in skb head. Superb! */
1257 copy = min_t(int, copy, skb_availroom(skb));
1258 err = skb_add_data_nocache(sk, skb, &msg->msg_iter, copy);
1259 if (err)
1260 goto do_fault;
1261 } else {
1262 bool merge = true;
1263 int i = skb_shinfo(skb)->nr_frags;
1264 struct page_frag *pfrag = sk_page_frag(sk);
1265
1266 if (!sk_page_frag_refill(sk, pfrag))
1267 goto wait_for_memory;
ef015786 1268
57be5bda
AV
1269 if (!skb_can_coalesce(skb, i, pfrag->page,
1270 pfrag->offset)) {
ac9e70b1 1271 if (i >= sysctl_max_skb_frags || !sg) {
57be5bda
AV
1272 tcp_mark_push(tp, skb);
1273 goto new_segment;
1da177e4 1274 }
57be5bda 1275 merge = false;
1da177e4
LT
1276 }
1277
57be5bda
AV
1278 copy = min_t(int, copy, pfrag->size - pfrag->offset);
1279
1280 if (!sk_wmem_schedule(sk, copy))
1281 goto wait_for_memory;
1da177e4 1282
57be5bda
AV
1283 err = skb_copy_to_page_nocache(sk, &msg->msg_iter, skb,
1284 pfrag->page,
1285 pfrag->offset,
1286 copy);
1287 if (err)
1288 goto do_error;
1da177e4 1289
57be5bda
AV
1290 /* Update the skb. */
1291 if (merge) {
1292 skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
1293 } else {
1294 skb_fill_page_desc(skb, i, pfrag->page,
1295 pfrag->offset, copy);
4e33e346 1296 page_ref_inc(pfrag->page);
4ed2d765 1297 }
57be5bda
AV
1298 pfrag->offset += copy;
1299 }
1300
1301 if (!copied)
1302 TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_PSH;
1303
1304 tp->write_seq += copy;
1305 TCP_SKB_CB(skb)->end_seq += copy;
1306 tcp_skb_pcount_set(skb, 0);
1da177e4 1307
57be5bda 1308 copied += copy;
01e97e65 1309 if (!msg_data_left(msg)) {
c134ecb8
MKL
1310 if (unlikely(flags & MSG_EOR))
1311 TCP_SKB_CB(skb)->eor = 1;
57be5bda
AV
1312 goto out;
1313 }
1da177e4 1314
57be5bda 1315 if (skb->len < max || (flags & MSG_OOB) || unlikely(tp->repair))
1da177e4
LT
1316 continue;
1317
57be5bda
AV
1318 if (forced_push(tp)) {
1319 tcp_mark_push(tp, skb);
1320 __tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_PUSH);
1321 } else if (skb == tcp_send_head(sk))
1322 tcp_push_one(sk, mss_now);
1323 continue;
1324
1da177e4 1325wait_for_sndbuf:
57be5bda 1326 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1da177e4 1327wait_for_memory:
57be5bda
AV
1328 if (copied)
1329 tcp_push(sk, flags & ~MSG_MORE, mss_now,
1330 TCP_NAGLE_PUSH, size_goal);
1da177e4 1331
686a5624
YM
1332 err = sk_stream_wait_memory(sk, &timeo);
1333 if (err != 0)
57be5bda 1334 goto do_error;
1da177e4 1335
57be5bda 1336 mss_now = tcp_send_mss(sk, &size_goal, flags);
1da177e4
LT
1337 }
1338
1339out:
ad02c4f5
SHY
1340 if (copied) {
1341 tcp_tx_timestamp(sk, sockc.tsflags, tcp_write_queue_tail(sk));
f54b3111 1342 tcp_push(sk, flags, mss_now, tp->nonagle, size_goal);
ad02c4f5 1343 }
5924f17a 1344out_nopush:
1da177e4 1345 release_sock(sk);
cf60af03 1346 return copied + copied_syn;
1da177e4
LT
1347
1348do_fault:
1349 if (!skb->len) {
fe067e8a
DM
1350 tcp_unlink_write_queue(skb, sk);
1351 /* It is the one place in all of TCP, except connection
1352 * reset, where we can be unlinking the send_head.
1353 */
1354 tcp_check_send_head(sk, skb);
3ab224be 1355 sk_wmem_free_skb(sk, skb);
1da177e4
LT
1356 }
1357
1358do_error:
cf60af03 1359 if (copied + copied_syn)
1da177e4
LT
1360 goto out;
1361out_err:
1362 err = sk_stream_error(sk, flags, err);
ce5ec440 1363 /* make sure we wake any epoll edge trigger waiter */
b0f71bd3
FY
1364 if (unlikely(skb_queue_len(&sk->sk_write_queue) == 0 &&
1365 err == -EAGAIN)) {
ce5ec440 1366 sk->sk_write_space(sk);
b0f71bd3
FY
1367 tcp_chrono_stop(sk, TCP_CHRONO_SNDBUF_LIMITED);
1368 }
1da177e4
LT
1369 release_sock(sk);
1370 return err;
1371}
4bc2f18b 1372EXPORT_SYMBOL(tcp_sendmsg);
1da177e4
LT
1373
1374/*
1375 * Handle reading urgent data. BSD has very simple semantics for
1376 * this, no blocking and very strange errors 8)
1377 */
1378
377f0a08 1379static int tcp_recv_urg(struct sock *sk, struct msghdr *msg, int len, int flags)
1da177e4
LT
1380{
1381 struct tcp_sock *tp = tcp_sk(sk);
1382
1383 /* No URG data to read. */
1384 if (sock_flag(sk, SOCK_URGINLINE) || !tp->urg_data ||
1385 tp->urg_data == TCP_URG_READ)
1386 return -EINVAL; /* Yes this is right ! */
1387
1388 if (sk->sk_state == TCP_CLOSE && !sock_flag(sk, SOCK_DONE))
1389 return -ENOTCONN;
1390
1391 if (tp->urg_data & TCP_URG_VALID) {
1392 int err = 0;
1393 char c = tp->urg_data;
1394
1395 if (!(flags & MSG_PEEK))
1396 tp->urg_data = TCP_URG_READ;
1397
1398 /* Read urgent data. */
1399 msg->msg_flags |= MSG_OOB;
1400
1401 if (len > 0) {
1402 if (!(flags & MSG_TRUNC))
7eab8d9e 1403 err = memcpy_to_msg(msg, &c, 1);
1da177e4
LT
1404 len = 1;
1405 } else
1406 msg->msg_flags |= MSG_TRUNC;
1407
1408 return err ? -EFAULT : len;
1409 }
1410
1411 if (sk->sk_state == TCP_CLOSE || (sk->sk_shutdown & RCV_SHUTDOWN))
1412 return 0;
1413
1414 /* Fixed the recv(..., MSG_OOB) behaviour. BSD docs and
1415 * the available implementations agree in this case:
1416 * this call should never block, independent of the
1417 * blocking state of the socket.
1418 * Mike <pall@rz.uni-karlsruhe.de>
1419 */
1420 return -EAGAIN;
1421}
1422
c0e88ff0
PE
1423static int tcp_peek_sndq(struct sock *sk, struct msghdr *msg, int len)
1424{
1425 struct sk_buff *skb;
1426 int copied = 0, err = 0;
1427
1428 /* XXX -- need to support SO_PEEK_OFF */
1429
1430 skb_queue_walk(&sk->sk_write_queue, skb) {
51f3d02b 1431 err = skb_copy_datagram_msg(skb, 0, msg, skb->len);
c0e88ff0
PE
1432 if (err)
1433 break;
1434
1435 copied += skb->len;
1436 }
1437
1438 return err ?: copied;
1439}
1440
1da177e4
LT
1441/* Clean up the receive buffer for full frames taken by the user,
1442 * then send an ACK if necessary. COPIED is the number of bytes
1443 * tcp_recvmsg has given to the user so far, it speeds up the
1444 * calculation of whether or not we must ACK for the sake of
1445 * a window update.
1446 */
3f334078 1447static void tcp_cleanup_rbuf(struct sock *sk, int copied)
1da177e4
LT
1448{
1449 struct tcp_sock *tp = tcp_sk(sk);
a2a385d6 1450 bool time_to_ack = false;
1da177e4 1451
1da177e4
LT
1452 struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
1453
d792c100 1454 WARN(skb && !before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq),
2af6fd8b 1455 "cleanup rbuf bug: copied %X seq %X rcvnxt %X\n",
d792c100 1456 tp->copied_seq, TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt);
1da177e4 1457
463c84b9
ACM
1458 if (inet_csk_ack_scheduled(sk)) {
1459 const struct inet_connection_sock *icsk = inet_csk(sk);
1da177e4
LT
1460 /* Delayed ACKs frequently hit locked sockets during bulk
1461 * receive. */
463c84b9 1462 if (icsk->icsk_ack.blocked ||
1da177e4 1463 /* Once-per-two-segments ACK was not sent by tcp_input.c */
463c84b9 1464 tp->rcv_nxt - tp->rcv_wup > icsk->icsk_ack.rcv_mss ||
1da177e4
LT
1465 /*
1466 * If this read emptied read buffer, we send ACK, if
1467 * connection is not bidirectional, user drained
1468 * receive buffer and there was a small segment
1469 * in queue.
1470 */
1ef9696c
AK
1471 (copied > 0 &&
1472 ((icsk->icsk_ack.pending & ICSK_ACK_PUSHED2) ||
1473 ((icsk->icsk_ack.pending & ICSK_ACK_PUSHED) &&
1474 !icsk->icsk_ack.pingpong)) &&
1475 !atomic_read(&sk->sk_rmem_alloc)))
a2a385d6 1476 time_to_ack = true;
1da177e4
LT
1477 }
1478
1479 /* We send an ACK if we can now advertise a non-zero window
1480 * which has been raised "significantly".
1481 *
1482 * Even if window raised up to infinity, do not send window open ACK
1483 * in states, where we will not receive more. It is useless.
1484 */
1485 if (copied > 0 && !time_to_ack && !(sk->sk_shutdown & RCV_SHUTDOWN)) {
1486 __u32 rcv_window_now = tcp_receive_window(tp);
1487
1488 /* Optimize, __tcp_select_window() is not cheap. */
1489 if (2*rcv_window_now <= tp->window_clamp) {
1490 __u32 new_window = __tcp_select_window(sk);
1491
1492 /* Send ACK now, if this read freed lots of space
1493 * in our buffer. Certainly, new_window is new window.
1494 * We can advertise it now, if it is not less than current one.
1495 * "Lots" means "at least twice" here.
1496 */
1497 if (new_window && new_window >= 2 * rcv_window_now)
a2a385d6 1498 time_to_ack = true;
1da177e4
LT
1499 }
1500 }
1501 if (time_to_ack)
1502 tcp_send_ack(sk);
1503}
1504
1505static void tcp_prequeue_process(struct sock *sk)
1506{
1507 struct sk_buff *skb;
1508 struct tcp_sock *tp = tcp_sk(sk);
1509
6aef70a8 1510 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPPREQUEUED);
1da177e4 1511
1da177e4 1512 while ((skb = __skb_dequeue(&tp->ucopy.prequeue)) != NULL)
c57943a1 1513 sk_backlog_rcv(sk, skb);
1da177e4
LT
1514
1515 /* Clear memory counter. */
1516 tp->ucopy.memory = 0;
1517}
1518
f26845b4 1519static struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off)
1da177e4
LT
1520{
1521 struct sk_buff *skb;
1522 u32 offset;
1523
f26845b4 1524 while ((skb = skb_peek(&sk->sk_receive_queue)) != NULL) {
1da177e4 1525 offset = seq - TCP_SKB_CB(skb)->seq;
9d691539
ED
1526 if (unlikely(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)) {
1527 pr_err_once("%s: found a SYN, please report !\n", __func__);
1da177e4 1528 offset--;
9d691539 1529 }
e11ecddf 1530 if (offset < skb->len || (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)) {
1da177e4
LT
1531 *off = offset;
1532 return skb;
1533 }
f26845b4
ED
1534 /* This looks weird, but this can happen if TCP collapsing
1535 * splitted a fat GRO packet, while we released socket lock
1536 * in skb_splice_bits()
1537 */
7bced397 1538 sk_eat_skb(sk, skb);
1da177e4
LT
1539 }
1540 return NULL;
1541}
1542
1543/*
1544 * This routine provides an alternative to tcp_recvmsg() for routines
1545 * that would like to handle copying from skbuffs directly in 'sendfile'
1546 * fashion.
1547 * Note:
1548 * - It is assumed that the socket was locked by the caller.
1549 * - The routine does not block.
1550 * - At present, there is no support for reading OOB data
1551 * or for 'peeking' the socket using this routine
1552 * (although both would be easy to implement).
1553 */
1554int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
1555 sk_read_actor_t recv_actor)
1556{
1557 struct sk_buff *skb;
1558 struct tcp_sock *tp = tcp_sk(sk);
1559 u32 seq = tp->copied_seq;
1560 u32 offset;
1561 int copied = 0;
1562
1563 if (sk->sk_state == TCP_LISTEN)
1564 return -ENOTCONN;
1565 while ((skb = tcp_recv_skb(sk, seq, &offset)) != NULL) {
1566 if (offset < skb->len) {
374e7b59
OP
1567 int used;
1568 size_t len;
1da177e4
LT
1569
1570 len = skb->len - offset;
1571 /* Stop reading if we hit a patch of urgent data */
1572 if (tp->urg_data) {
1573 u32 urg_offset = tp->urg_seq - seq;
1574 if (urg_offset < len)
1575 len = urg_offset;
1576 if (!len)
1577 break;
1578 }
1579 used = recv_actor(desc, skb, offset, len);
ff905b1e 1580 if (used <= 0) {
ddb61a57
JA
1581 if (!copied)
1582 copied = used;
1583 break;
1584 } else if (used <= len) {
1da177e4
LT
1585 seq += used;
1586 copied += used;
1587 offset += used;
1588 }
02275a2e 1589 /* If recv_actor drops the lock (e.g. TCP splice
293ad604
OP
1590 * receive) the skb pointer might be invalid when
1591 * getting here: tcp_collapse might have deleted it
1592 * while aggregating skbs from the socket queue.
1593 */
02275a2e
WT
1594 skb = tcp_recv_skb(sk, seq - 1, &offset);
1595 if (!skb)
1da177e4 1596 break;
02275a2e
WT
1597 /* TCP coalescing might have appended data to the skb.
1598 * Try to splice more frags
1599 */
1600 if (offset + 1 != skb->len)
1601 continue;
1da177e4 1602 }
e11ecddf 1603 if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) {
7bced397 1604 sk_eat_skb(sk, skb);
1da177e4
LT
1605 ++seq;
1606 break;
1607 }
7bced397 1608 sk_eat_skb(sk, skb);
1da177e4
LT
1609 if (!desc->count)
1610 break;
baff42ab 1611 tp->copied_seq = seq;
1da177e4
LT
1612 }
1613 tp->copied_seq = seq;
1614
1615 tcp_rcv_space_adjust(sk);
1616
1617 /* Clean up data we have read: This will do ACK frames. */
f26845b4
ED
1618 if (copied > 0) {
1619 tcp_recv_skb(sk, seq, &offset);
0e4b4992 1620 tcp_cleanup_rbuf(sk, copied);
f26845b4 1621 }
1da177e4
LT
1622 return copied;
1623}
4bc2f18b 1624EXPORT_SYMBOL(tcp_read_sock);
1da177e4 1625
32035585
TH
1626int tcp_peek_len(struct socket *sock)
1627{
1628 return tcp_inq(sock->sk);
1629}
1630EXPORT_SYMBOL(tcp_peek_len);
1631
1da177e4
LT
1632/*
1633 * This routine copies from a sock struct into the user buffer.
1634 *
1635 * Technical note: in 2.3 we work on _locked_ socket, so that
1636 * tricks with *seq access order and skb->users are not required.
1637 * Probably, code can be easily improved even more.
1638 */
1639
1b784140
YX
1640int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock,
1641 int flags, int *addr_len)
1da177e4
LT
1642{
1643 struct tcp_sock *tp = tcp_sk(sk);
1644 int copied = 0;
1645 u32 peek_seq;
1646 u32 *seq;
1647 unsigned long used;
1648 int err;
1649 int target; /* Read at least this many bytes */
1650 long timeo;
1651 struct task_struct *user_recv = NULL;
dfbafc99 1652 struct sk_buff *skb, *last;
77527313 1653 u32 urg_hole = 0;
1da177e4 1654
4ed2d765 1655 if (unlikely(flags & MSG_ERRQUEUE))
f4713a3d 1656 return inet_recv_error(sk, msg, len, addr_len);
4ed2d765 1657
cbf55001
ET
1658 if (sk_can_busy_loop(sk) && skb_queue_empty(&sk->sk_receive_queue) &&
1659 (sk->sk_state == TCP_ESTABLISHED))
1660 sk_busy_loop(sk, nonblock);
d30e383b 1661
1da177e4
LT
1662 lock_sock(sk);
1663
1da177e4
LT
1664 err = -ENOTCONN;
1665 if (sk->sk_state == TCP_LISTEN)
1666 goto out;
1667
1668 timeo = sock_rcvtimeo(sk, nonblock);
1669
1670 /* Urgent data needs to be handled specially. */
1671 if (flags & MSG_OOB)
1672 goto recv_urg;
1673
c0e88ff0
PE
1674 if (unlikely(tp->repair)) {
1675 err = -EPERM;
1676 if (!(flags & MSG_PEEK))
1677 goto out;
1678
1679 if (tp->repair_queue == TCP_SEND_QUEUE)
1680 goto recv_sndq;
1681
1682 err = -EINVAL;
1683 if (tp->repair_queue == TCP_NO_QUEUE)
1684 goto out;
1685
1686 /* 'common' recv queue MSG_PEEK-ing */
1687 }
1688
1da177e4
LT
1689 seq = &tp->copied_seq;
1690 if (flags & MSG_PEEK) {
1691 peek_seq = tp->copied_seq;
1692 seq = &peek_seq;
1693 }
1694
1695 target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
1696
1697 do {
1da177e4
LT
1698 u32 offset;
1699
1700 /* Are we at urgent data? Stop if we have read anything or have SIGURG pending. */
1701 if (tp->urg_data && tp->urg_seq == *seq) {
1702 if (copied)
1703 break;
1704 if (signal_pending(current)) {
1705 copied = timeo ? sock_intr_errno(timeo) : -EAGAIN;
1706 break;
1707 }
1708 }
1709
1710 /* Next get a buffer. */
1711
dfbafc99 1712 last = skb_peek_tail(&sk->sk_receive_queue);
91521944 1713 skb_queue_walk(&sk->sk_receive_queue, skb) {
dfbafc99 1714 last = skb;
1da177e4
LT
1715 /* Now that we have two receive queues this
1716 * shouldn't happen.
1717 */
d792c100 1718 if (WARN(before(*seq, TCP_SKB_CB(skb)->seq),
2af6fd8b
JP
1719 "recvmsg bug: copied %X seq %X rcvnxt %X fl %X\n",
1720 *seq, TCP_SKB_CB(skb)->seq, tp->rcv_nxt,
1721 flags))
1da177e4 1722 break;
d792c100 1723
1da177e4 1724 offset = *seq - TCP_SKB_CB(skb)->seq;
9d691539
ED
1725 if (unlikely(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)) {
1726 pr_err_once("%s: found a SYN, please report !\n", __func__);
1da177e4 1727 offset--;
9d691539 1728 }
1da177e4
LT
1729 if (offset < skb->len)
1730 goto found_ok_skb;
e11ecddf 1731 if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
1da177e4 1732 goto found_fin_ok;
2af6fd8b
JP
1733 WARN(!(flags & MSG_PEEK),
1734 "recvmsg bug 2: copied %X seq %X rcvnxt %X fl %X\n",
1735 *seq, TCP_SKB_CB(skb)->seq, tp->rcv_nxt, flags);
91521944 1736 }
1da177e4
LT
1737
1738 /* Well, if we have backlog, try to process it now yet. */
1739
1740 if (copied >= target && !sk->sk_backlog.tail)
1741 break;
1742
1743 if (copied) {
1744 if (sk->sk_err ||
1745 sk->sk_state == TCP_CLOSE ||
1746 (sk->sk_shutdown & RCV_SHUTDOWN) ||
1747 !timeo ||
518a09ef 1748 signal_pending(current))
1da177e4
LT
1749 break;
1750 } else {
1751 if (sock_flag(sk, SOCK_DONE))
1752 break;
1753
1754 if (sk->sk_err) {
1755 copied = sock_error(sk);
1756 break;
1757 }
1758
1759 if (sk->sk_shutdown & RCV_SHUTDOWN)
1760 break;
1761
1762 if (sk->sk_state == TCP_CLOSE) {
1763 if (!sock_flag(sk, SOCK_DONE)) {
1764 /* This occurs when user tries to read
1765 * from never connected socket.
1766 */
1767 copied = -ENOTCONN;
1768 break;
1769 }
1770 break;
1771 }
1772
1773 if (!timeo) {
1774 copied = -EAGAIN;
1775 break;
1776 }
1777
1778 if (signal_pending(current)) {
1779 copied = sock_intr_errno(timeo);
1780 break;
1781 }
1782 }
1783
0e4b4992 1784 tcp_cleanup_rbuf(sk, copied);
1da177e4 1785
7df55125 1786 if (!sysctl_tcp_low_latency && tp->ucopy.task == user_recv) {
1da177e4
LT
1787 /* Install new reader */
1788 if (!user_recv && !(flags & (MSG_TRUNC | MSG_PEEK))) {
1789 user_recv = current;
1790 tp->ucopy.task = user_recv;
f4362a2c 1791 tp->ucopy.msg = msg;
1da177e4
LT
1792 }
1793
1794 tp->ucopy.len = len;
1795
547b792c
IJ
1796 WARN_ON(tp->copied_seq != tp->rcv_nxt &&
1797 !(flags & (MSG_PEEK | MSG_TRUNC)));
1da177e4
LT
1798
1799 /* Ugly... If prequeue is not empty, we have to
1800 * process it before releasing socket, otherwise
1801 * order will be broken at second iteration.
1802 * More elegant solution is required!!!
1803 *
1804 * Look: we have the following (pseudo)queues:
1805 *
1806 * 1. packets in flight
1807 * 2. backlog
1808 * 3. prequeue
1809 * 4. receive_queue
1810 *
1811 * Each queue can be processed only if the next ones
1812 * are empty. At this point we have empty receive_queue.
1813 * But prequeue _can_ be not empty after 2nd iteration,
1814 * when we jumped to start of loop because backlog
1815 * processing added something to receive_queue.
1816 * We cannot release_sock(), because backlog contains
1817 * packets arrived _after_ prequeued ones.
1818 *
1819 * Shortly, algorithm is clear --- to process all
1820 * the queues in order. We could make it more directly,
1821 * requeueing packets from backlog to prequeue, if
1822 * is not empty. It is more elegant, but eats cycles,
1823 * unfortunately.
1824 */
b03efcfb 1825 if (!skb_queue_empty(&tp->ucopy.prequeue))
1da177e4
LT
1826 goto do_prequeue;
1827
1828 /* __ Set realtime policy in scheduler __ */
1829 }
1830
1831 if (copied >= target) {
1832 /* Do not sleep, just process backlog. */
1833 release_sock(sk);
1834 lock_sock(sk);
dfbafc99
SD
1835 } else {
1836 sk_wait_data(sk, &timeo, last);
1837 }
1da177e4
LT
1838
1839 if (user_recv) {
1840 int chunk;
1841
1842 /* __ Restore normal policy in scheduler __ */
1843
686a5624
YM
1844 chunk = len - tp->ucopy.len;
1845 if (chunk != 0) {
6aef70a8 1846 NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPDIRECTCOPYFROMBACKLOG, chunk);
1da177e4
LT
1847 len -= chunk;
1848 copied += chunk;
1849 }
1850
1851 if (tp->rcv_nxt == tp->copied_seq &&
b03efcfb 1852 !skb_queue_empty(&tp->ucopy.prequeue)) {
1da177e4
LT
1853do_prequeue:
1854 tcp_prequeue_process(sk);
1855
686a5624
YM
1856 chunk = len - tp->ucopy.len;
1857 if (chunk != 0) {
6aef70a8 1858 NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk);
1da177e4
LT
1859 len -= chunk;
1860 copied += chunk;
1861 }
1862 }
1863 }
77527313
IJ
1864 if ((flags & MSG_PEEK) &&
1865 (peek_seq - copied - urg_hole != tp->copied_seq)) {
e87cc472
JP
1866 net_dbg_ratelimited("TCP(%s:%d): Application bug, race in MSG_PEEK\n",
1867 current->comm,
1868 task_pid_nr(current));
1da177e4
LT
1869 peek_seq = tp->copied_seq;
1870 }
1871 continue;
1872
1873 found_ok_skb:
1874 /* Ok so how much can we use? */
1875 used = skb->len - offset;
1876 if (len < used)
1877 used = len;
1878
1879 /* Do we have urgent data here? */
1880 if (tp->urg_data) {
1881 u32 urg_offset = tp->urg_seq - *seq;
1882 if (urg_offset < used) {
1883 if (!urg_offset) {
1884 if (!sock_flag(sk, SOCK_URGINLINE)) {
1885 ++*seq;
77527313 1886 urg_hole++;
1da177e4
LT
1887 offset++;
1888 used--;
1889 if (!used)
1890 goto skip_copy;
1891 }
1892 } else
1893 used = urg_offset;
1894 }
1895 }
1896
1897 if (!(flags & MSG_TRUNC)) {
51f3d02b 1898 err = skb_copy_datagram_msg(skb, offset, msg, used);
7bced397
DW
1899 if (err) {
1900 /* Exception. Bailout! */
1901 if (!copied)
1902 copied = -EFAULT;
1903 break;
1da177e4
LT
1904 }
1905 }
1906
1907 *seq += used;
1908 copied += used;
1909 len -= used;
1910
1911 tcp_rcv_space_adjust(sk);
1912
1913skip_copy:
1914 if (tp->urg_data && after(tp->copied_seq, tp->urg_seq)) {
1915 tp->urg_data = 0;
9e412ba7 1916 tcp_fast_path_check(sk);
1da177e4
LT
1917 }
1918 if (used + offset < skb->len)
1919 continue;
1920
e11ecddf 1921 if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
1da177e4 1922 goto found_fin_ok;
7bced397
DW
1923 if (!(flags & MSG_PEEK))
1924 sk_eat_skb(sk, skb);
1da177e4
LT
1925 continue;
1926
1927 found_fin_ok:
1928 /* Process the FIN. */
1929 ++*seq;
7bced397
DW
1930 if (!(flags & MSG_PEEK))
1931 sk_eat_skb(sk, skb);
1da177e4
LT
1932 break;
1933 } while (len > 0);
1934
1935 if (user_recv) {
b03efcfb 1936 if (!skb_queue_empty(&tp->ucopy.prequeue)) {
1da177e4
LT
1937 int chunk;
1938
1939 tp->ucopy.len = copied > 0 ? len : 0;
1940
1941 tcp_prequeue_process(sk);
1942
1943 if (copied > 0 && (chunk = len - tp->ucopy.len) != 0) {
6aef70a8 1944 NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk);
1da177e4
LT
1945 len -= chunk;
1946 copied += chunk;
1947 }
1948 }
1949
1950 tp->ucopy.task = NULL;
1951 tp->ucopy.len = 0;
1952 }
1953
1954 /* According to UNIX98, msg_name/msg_namelen are ignored
1955 * on connected socket. I was just happy when found this 8) --ANK
1956 */
1957
1958 /* Clean up data we have read: This will do ACK frames. */
0e4b4992 1959 tcp_cleanup_rbuf(sk, copied);
1da177e4 1960
1da177e4
LT
1961 release_sock(sk);
1962 return copied;
1963
1964out:
1da177e4
LT
1965 release_sock(sk);
1966 return err;
1967
1968recv_urg:
377f0a08 1969 err = tcp_recv_urg(sk, msg, len, flags);
1da177e4 1970 goto out;
c0e88ff0
PE
1971
1972recv_sndq:
1973 err = tcp_peek_sndq(sk, msg, len);
1974 goto out;
1da177e4 1975}
4bc2f18b 1976EXPORT_SYMBOL(tcp_recvmsg);
1da177e4 1977
490d5046
IJ
1978void tcp_set_state(struct sock *sk, int state)
1979{
1980 int oldstate = sk->sk_state;
1981
1982 switch (state) {
1983 case TCP_ESTABLISHED:
1984 if (oldstate != TCP_ESTABLISHED)
81cc8a75 1985 TCP_INC_STATS(sock_net(sk), TCP_MIB_CURRESTAB);
490d5046
IJ
1986 break;
1987
1988 case TCP_CLOSE:
1989 if (oldstate == TCP_CLOSE_WAIT || oldstate == TCP_ESTABLISHED)
81cc8a75 1990 TCP_INC_STATS(sock_net(sk), TCP_MIB_ESTABRESETS);
490d5046
IJ
1991
1992 sk->sk_prot->unhash(sk);
1993 if (inet_csk(sk)->icsk_bind_hash &&
1994 !(sk->sk_userlocks & SOCK_BINDPORT_LOCK))
ab1e0a13 1995 inet_put_port(sk);
490d5046
IJ
1996 /* fall through */
1997 default:
5a5f3a8d 1998 if (oldstate == TCP_ESTABLISHED)
74688e48 1999 TCP_DEC_STATS(sock_net(sk), TCP_MIB_CURRESTAB);
490d5046
IJ
2000 }
2001
2002 /* Change state AFTER socket is unhashed to avoid closed
2003 * socket sitting in hash tables.
2004 */
00fd38d9 2005 sk_state_store(sk, state);
490d5046
IJ
2006
2007#ifdef STATE_TRACE
5a5f3a8d 2008 SOCK_DEBUG(sk, "TCP sk=%p, State %s -> %s\n", sk, statename[oldstate], statename[state]);
490d5046
IJ
2009#endif
2010}
2011EXPORT_SYMBOL_GPL(tcp_set_state);
2012
1da177e4
LT
2013/*
2014 * State processing on a close. This implements the state shift for
2015 * sending our FIN frame. Note that we only send a FIN for some
2016 * states. A shutdown() may have already sent the FIN, or we may be
2017 * closed.
2018 */
2019
9b5b5cff 2020static const unsigned char new_state[16] = {
1da177e4 2021 /* current state: new state: action: */
0980c1e3
ED
2022 [0 /* (Invalid) */] = TCP_CLOSE,
2023 [TCP_ESTABLISHED] = TCP_FIN_WAIT1 | TCP_ACTION_FIN,
2024 [TCP_SYN_SENT] = TCP_CLOSE,
2025 [TCP_SYN_RECV] = TCP_FIN_WAIT1 | TCP_ACTION_FIN,
2026 [TCP_FIN_WAIT1] = TCP_FIN_WAIT1,
2027 [TCP_FIN_WAIT2] = TCP_FIN_WAIT2,
2028 [TCP_TIME_WAIT] = TCP_CLOSE,
2029 [TCP_CLOSE] = TCP_CLOSE,
2030 [TCP_CLOSE_WAIT] = TCP_LAST_ACK | TCP_ACTION_FIN,
2031 [TCP_LAST_ACK] = TCP_LAST_ACK,
2032 [TCP_LISTEN] = TCP_CLOSE,
2033 [TCP_CLOSING] = TCP_CLOSING,
2034 [TCP_NEW_SYN_RECV] = TCP_CLOSE, /* should not happen ! */
1da177e4
LT
2035};
2036
2037static int tcp_close_state(struct sock *sk)
2038{
2039 int next = (int)new_state[sk->sk_state];
2040 int ns = next & TCP_STATE_MASK;
2041
2042 tcp_set_state(sk, ns);
2043
2044 return next & TCP_ACTION_FIN;
2045}
2046
2047/*
2048 * Shutdown the sending side of a connection. Much like close except
1f29b058 2049 * that we don't receive shut down or sock_set_flag(sk, SOCK_DEAD).
1da177e4
LT
2050 */
2051
2052void tcp_shutdown(struct sock *sk, int how)
2053{
2054 /* We need to grab some memory, and put together a FIN,
2055 * and then put it into the queue to be sent.
2056 * Tim MacKenzie(tym@dibbler.cs.monash.edu.au) 4 Dec '92.
2057 */
2058 if (!(how & SEND_SHUTDOWN))
2059 return;
2060
2061 /* If we've already sent a FIN, or it's a closed state, skip this. */
2062 if ((1 << sk->sk_state) &
2063 (TCPF_ESTABLISHED | TCPF_SYN_SENT |
2064 TCPF_SYN_RECV | TCPF_CLOSE_WAIT)) {
2065 /* Clear out any half completed packets. FIN if needed. */
2066 if (tcp_close_state(sk))
2067 tcp_send_fin(sk);
2068 }
2069}
4bc2f18b 2070EXPORT_SYMBOL(tcp_shutdown);
1da177e4 2071
efcdbf24
AS
2072bool tcp_check_oom(struct sock *sk, int shift)
2073{
2074 bool too_many_orphans, out_of_socket_memory;
2075
2076 too_many_orphans = tcp_too_many_orphans(sk, shift);
2077 out_of_socket_memory = tcp_out_of_memory(sk);
2078
e87cc472
JP
2079 if (too_many_orphans)
2080 net_info_ratelimited("too many orphaned sockets\n");
2081 if (out_of_socket_memory)
2082 net_info_ratelimited("out of memory -- consider tuning tcp_mem\n");
efcdbf24
AS
2083 return too_many_orphans || out_of_socket_memory;
2084}
2085
1da177e4
LT
2086void tcp_close(struct sock *sk, long timeout)
2087{
2088 struct sk_buff *skb;
2089 int data_was_unread = 0;
75c2d907 2090 int state;
1da177e4
LT
2091
2092 lock_sock(sk);
2093 sk->sk_shutdown = SHUTDOWN_MASK;
2094
2095 if (sk->sk_state == TCP_LISTEN) {
2096 tcp_set_state(sk, TCP_CLOSE);
2097
2098 /* Special case. */
0a5578cf 2099 inet_csk_listen_stop(sk);
1da177e4
LT
2100
2101 goto adjudge_to_death;
2102 }
2103
2104 /* We need to flush the recv. buffs. We do this only on the
2105 * descriptor close, not protocol-sourced closes, because the
2106 * reader process may not have drained the data yet!
2107 */
2108 while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) {
e11ecddf
ED
2109 u32 len = TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq;
2110
2111 if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
2112 len--;
1da177e4
LT
2113 data_was_unread += len;
2114 __kfree_skb(skb);
2115 }
2116
3ab224be 2117 sk_mem_reclaim(sk);
1da177e4 2118
565b7b2d
KK
2119 /* If socket has been already reset (e.g. in tcp_reset()) - kill it. */
2120 if (sk->sk_state == TCP_CLOSE)
2121 goto adjudge_to_death;
2122
65bb723c
GR
2123 /* As outlined in RFC 2525, section 2.17, we send a RST here because
2124 * data was lost. To witness the awful effects of the old behavior of
2125 * always doing a FIN, run an older 2.1.x kernel or 2.0.x, start a bulk
2126 * GET in an FTP client, suspend the process, wait for the client to
2127 * advertise a zero window, then kill -9 the FTP client, wheee...
2128 * Note: timeout is always zero in such a case.
1da177e4 2129 */
ee995283
PE
2130 if (unlikely(tcp_sk(sk)->repair)) {
2131 sk->sk_prot->disconnect(sk, 0);
2132 } else if (data_was_unread) {
1da177e4 2133 /* Unread data was tossed, zap the connection. */
6aef70a8 2134 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONCLOSE);
1da177e4 2135 tcp_set_state(sk, TCP_CLOSE);
aa133076 2136 tcp_send_active_reset(sk, sk->sk_allocation);
1da177e4
LT
2137 } else if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) {
2138 /* Check zero linger _after_ checking for unread data. */
2139 sk->sk_prot->disconnect(sk, 0);
6aef70a8 2140 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONDATA);
1da177e4
LT
2141 } else if (tcp_close_state(sk)) {
2142 /* We FIN if the application ate all the data before
2143 * zapping the connection.
2144 */
2145
2146 /* RED-PEN. Formally speaking, we have broken TCP state
2147 * machine. State transitions:
2148 *
2149 * TCP_ESTABLISHED -> TCP_FIN_WAIT1
2150 * TCP_SYN_RECV -> TCP_FIN_WAIT1 (forget it, it's impossible)
2151 * TCP_CLOSE_WAIT -> TCP_LAST_ACK
2152 *
2153 * are legal only when FIN has been sent (i.e. in window),
2154 * rather than queued out of window. Purists blame.
2155 *
2156 * F.e. "RFC state" is ESTABLISHED,
2157 * if Linux state is FIN-WAIT-1, but FIN is still not sent.
2158 *
2159 * The visible declinations are that sometimes
2160 * we enter time-wait state, when it is not required really
2161 * (harmless), do not send active resets, when they are
2162 * required by specs (TCP_ESTABLISHED, TCP_CLOSE_WAIT, when
2163 * they look as CLOSING or LAST_ACK for Linux)
2164 * Probably, I missed some more holelets.
2165 * --ANK
8336886f
JC
2166 * XXX (TFO) - To start off we don't support SYN+ACK+FIN
2167 * in a single packet! (May consider it later but will
2168 * probably need API support or TCP_CORK SYN-ACK until
2169 * data is written and socket is closed.)
1da177e4
LT
2170 */
2171 tcp_send_fin(sk);
2172 }
2173
2174 sk_stream_wait_close(sk, timeout);
2175
2176adjudge_to_death:
75c2d907
HX
2177 state = sk->sk_state;
2178 sock_hold(sk);
2179 sock_orphan(sk);
75c2d907 2180
1da177e4
LT
2181 /* It is the last release_sock in its life. It will remove backlog. */
2182 release_sock(sk);
2183
2184
2185 /* Now socket is owned by kernel and we acquire BH lock
2186 to finish close. No need to check for user refs.
2187 */
2188 local_bh_disable();
2189 bh_lock_sock(sk);
547b792c 2190 WARN_ON(sock_owned_by_user(sk));
1da177e4 2191
eb4dea58
HX
2192 percpu_counter_inc(sk->sk_prot->orphan_count);
2193
75c2d907
HX
2194 /* Have we already been destroyed by a softirq or backlog? */
2195 if (state != TCP_CLOSE && sk->sk_state == TCP_CLOSE)
2196 goto out;
1da177e4
LT
2197
2198 /* This is a (useful) BSD violating of the RFC. There is a
2199 * problem with TCP as specified in that the other end could
2200 * keep a socket open forever with no application left this end.
b10bd54c 2201 * We use a 1 minute timeout (about the same as BSD) then kill
1da177e4
LT
2202 * our end. If they send after that then tough - BUT: long enough
2203 * that we won't make the old 4*rto = almost no time - whoops
2204 * reset mistake.
2205 *
2206 * Nope, it was not mistake. It is really desired behaviour
2207 * f.e. on http servers, when such sockets are useless, but
2208 * consume significant resources. Let's do it with special
2209 * linger2 option. --ANK
2210 */
2211
2212 if (sk->sk_state == TCP_FIN_WAIT2) {
2213 struct tcp_sock *tp = tcp_sk(sk);
2214 if (tp->linger2 < 0) {
2215 tcp_set_state(sk, TCP_CLOSE);
2216 tcp_send_active_reset(sk, GFP_ATOMIC);
02a1d6e7 2217 __NET_INC_STATS(sock_net(sk),
de0744af 2218 LINUX_MIB_TCPABORTONLINGER);
1da177e4 2219 } else {
463c84b9 2220 const int tmo = tcp_fin_time(sk);
1da177e4
LT
2221
2222 if (tmo > TCP_TIMEWAIT_LEN) {
52499afe
DM
2223 inet_csk_reset_keepalive_timer(sk,
2224 tmo - TCP_TIMEWAIT_LEN);
1da177e4 2225 } else {
1da177e4
LT
2226 tcp_time_wait(sk, TCP_FIN_WAIT2, tmo);
2227 goto out;
2228 }
2229 }
2230 }
2231 if (sk->sk_state != TCP_CLOSE) {
3ab224be 2232 sk_mem_reclaim(sk);
efcdbf24 2233 if (tcp_check_oom(sk, 0)) {
1da177e4
LT
2234 tcp_set_state(sk, TCP_CLOSE);
2235 tcp_send_active_reset(sk, GFP_ATOMIC);
02a1d6e7 2236 __NET_INC_STATS(sock_net(sk),
de0744af 2237 LINUX_MIB_TCPABORTONMEMORY);
1da177e4
LT
2238 }
2239 }
1da177e4 2240
8336886f
JC
2241 if (sk->sk_state == TCP_CLOSE) {
2242 struct request_sock *req = tcp_sk(sk)->fastopen_rsk;
2243 /* We could get here with a non-NULL req if the socket is
2244 * aborted (e.g., closed with unread data) before 3WHS
2245 * finishes.
2246 */
00db4124 2247 if (req)
8336886f 2248 reqsk_fastopen_remove(sk, req, false);
0a5578cf 2249 inet_csk_destroy_sock(sk);
8336886f 2250 }
1da177e4
LT
2251 /* Otherwise, socket is reprieved until protocol close. */
2252
2253out:
2254 bh_unlock_sock(sk);
2255 local_bh_enable();
2256 sock_put(sk);
2257}
4bc2f18b 2258EXPORT_SYMBOL(tcp_close);
1da177e4
LT
2259
2260/* These states need RST on ABORT according to RFC793 */
2261
a2a385d6 2262static inline bool tcp_need_reset(int state)
1da177e4
LT
2263{
2264 return (1 << state) &
2265 (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT | TCPF_FIN_WAIT1 |
2266 TCPF_FIN_WAIT2 | TCPF_SYN_RECV);
2267}
2268
2269int tcp_disconnect(struct sock *sk, int flags)
2270{
2271 struct inet_sock *inet = inet_sk(sk);
463c84b9 2272 struct inet_connection_sock *icsk = inet_csk(sk);
1da177e4
LT
2273 struct tcp_sock *tp = tcp_sk(sk);
2274 int err = 0;
2275 int old_state = sk->sk_state;
2276
2277 if (old_state != TCP_CLOSE)
2278 tcp_set_state(sk, TCP_CLOSE);
2279
2280 /* ABORT function of RFC793 */
2281 if (old_state == TCP_LISTEN) {
0a5578cf 2282 inet_csk_listen_stop(sk);
ee995283
PE
2283 } else if (unlikely(tp->repair)) {
2284 sk->sk_err = ECONNABORTED;
1da177e4
LT
2285 } else if (tcp_need_reset(old_state) ||
2286 (tp->snd_nxt != tp->write_seq &&
2287 (1 << old_state) & (TCPF_CLOSING | TCPF_LAST_ACK))) {
caa20d9a 2288 /* The last check adjusts for discrepancy of Linux wrt. RFC
1da177e4
LT
2289 * states
2290 */
2291 tcp_send_active_reset(sk, gfp_any());
2292 sk->sk_err = ECONNRESET;
2293 } else if (old_state == TCP_SYN_SENT)
2294 sk->sk_err = ECONNRESET;
2295
2296 tcp_clear_xmit_timers(sk);
2297 __skb_queue_purge(&sk->sk_receive_queue);
fe067e8a 2298 tcp_write_queue_purge(sk);
9f5afeae 2299 skb_rbtree_purge(&tp->out_of_order_queue);
1da177e4 2300
c720c7e8 2301 inet->inet_dport = 0;
1da177e4
LT
2302
2303 if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK))
2304 inet_reset_saddr(sk);
2305
2306 sk->sk_shutdown = 0;
2307 sock_reset_flag(sk, SOCK_DONE);
740b0f18 2308 tp->srtt_us = 0;
686a5624
YM
2309 tp->write_seq += tp->max_window + 2;
2310 if (tp->write_seq == 0)
1da177e4 2311 tp->write_seq = 1;
463c84b9 2312 icsk->icsk_backoff = 0;
1da177e4 2313 tp->snd_cwnd = 2;
6687e988 2314 icsk->icsk_probes_out = 0;
1da177e4 2315 tp->packets_out = 0;
0b6a05c1 2316 tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
1da177e4 2317 tp->snd_cwnd_cnt = 0;
1fdf475a 2318 tp->window_clamp = 0;
6687e988 2319 tcp_set_ca_state(sk, TCP_CA_Open);
1da177e4 2320 tcp_clear_retrans(tp);
463c84b9 2321 inet_csk_delack_init(sk);
fe067e8a 2322 tcp_init_send_head(sk);
b40b4f79 2323 memset(&tp->rx_opt, 0, sizeof(tp->rx_opt));
1da177e4
LT
2324 __sk_dst_reset(sk);
2325
7db92362
WW
2326 /* Clean up fastopen related fields */
2327 tcp_free_fastopen_req(tp);
2328 inet->defer_connect = 0;
2329
c720c7e8 2330 WARN_ON(inet->inet_num && !icsk->icsk_bind_hash);
1da177e4
LT
2331
2332 sk->sk_error_report(sk);
2333 return err;
2334}
4bc2f18b 2335EXPORT_SYMBOL(tcp_disconnect);
1da177e4 2336
a2a385d6 2337static inline bool tcp_can_repair_sock(const struct sock *sk)
ee995283 2338{
52e804c6 2339 return ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN) &&
319b0534 2340 (sk->sk_state != TCP_LISTEN);
ee995283
PE
2341}
2342
b1ed4c4f
AV
2343static int tcp_repair_set_window(struct tcp_sock *tp, char __user *optbuf, int len)
2344{
2345 struct tcp_repair_window opt;
2346
2347 if (!tp->repair)
2348 return -EPERM;
2349
2350 if (len != sizeof(opt))
2351 return -EINVAL;
2352
2353 if (copy_from_user(&opt, optbuf, sizeof(opt)))
2354 return -EFAULT;
2355
2356 if (opt.max_window < opt.snd_wnd)
2357 return -EINVAL;
2358
2359 if (after(opt.snd_wl1, tp->rcv_nxt + opt.rcv_wnd))
2360 return -EINVAL;
2361
2362 if (after(opt.rcv_wup, tp->rcv_nxt))
2363 return -EINVAL;
2364
2365 tp->snd_wl1 = opt.snd_wl1;
2366 tp->snd_wnd = opt.snd_wnd;
2367 tp->max_window = opt.max_window;
2368
2369 tp->rcv_wnd = opt.rcv_wnd;
2370 tp->rcv_wup = opt.rcv_wup;
2371
2372 return 0;
2373}
2374
de248a75
PE
2375static int tcp_repair_options_est(struct tcp_sock *tp,
2376 struct tcp_repair_opt __user *optbuf, unsigned int len)
b139ba4e 2377{
de248a75 2378 struct tcp_repair_opt opt;
b139ba4e 2379
de248a75
PE
2380 while (len >= sizeof(opt)) {
2381 if (copy_from_user(&opt, optbuf, sizeof(opt)))
b139ba4e
PE
2382 return -EFAULT;
2383
2384 optbuf++;
de248a75 2385 len -= sizeof(opt);
b139ba4e 2386
de248a75
PE
2387 switch (opt.opt_code) {
2388 case TCPOPT_MSS:
2389 tp->rx_opt.mss_clamp = opt.opt_val;
b139ba4e 2390 break;
de248a75 2391 case TCPOPT_WINDOW:
bc26ccd8
AV
2392 {
2393 u16 snd_wscale = opt.opt_val & 0xFFFF;
2394 u16 rcv_wscale = opt.opt_val >> 16;
2395
2396 if (snd_wscale > 14 || rcv_wscale > 14)
2397 return -EFBIG;
b139ba4e 2398
bc26ccd8
AV
2399 tp->rx_opt.snd_wscale = snd_wscale;
2400 tp->rx_opt.rcv_wscale = rcv_wscale;
2401 tp->rx_opt.wscale_ok = 1;
2402 }
b139ba4e 2403 break;
b139ba4e 2404 case TCPOPT_SACK_PERM:
de248a75
PE
2405 if (opt.opt_val != 0)
2406 return -EINVAL;
2407
b139ba4e
PE
2408 tp->rx_opt.sack_ok |= TCP_SACK_SEEN;
2409 if (sysctl_tcp_fack)
2410 tcp_enable_fack(tp);
2411 break;
2412 case TCPOPT_TIMESTAMP:
de248a75
PE
2413 if (opt.opt_val != 0)
2414 return -EINVAL;
2415
b139ba4e
PE
2416 tp->rx_opt.tstamp_ok = 1;
2417 break;
2418 }
2419 }
2420
2421 return 0;
2422}
2423
1da177e4
LT
2424/*
2425 * Socket option code for TCP.
2426 */
3fdadf7d 2427static int do_tcp_setsockopt(struct sock *sk, int level,
b7058842 2428 int optname, char __user *optval, unsigned int optlen)
1da177e4
LT
2429{
2430 struct tcp_sock *tp = tcp_sk(sk);
463c84b9 2431 struct inet_connection_sock *icsk = inet_csk(sk);
1e579caa 2432 struct net *net = sock_net(sk);
1da177e4
LT
2433 int val;
2434 int err = 0;
2435
e56fb50f
WAS
2436 /* These are data/string values, all the others are ints */
2437 switch (optname) {
2438 case TCP_CONGESTION: {
5f8ef48d
SH
2439 char name[TCP_CA_NAME_MAX];
2440
2441 if (optlen < 1)
2442 return -EINVAL;
2443
2444 val = strncpy_from_user(name, optval,
4fdb78d3 2445 min_t(long, TCP_CA_NAME_MAX-1, optlen));
5f8ef48d
SH
2446 if (val < 0)
2447 return -EFAULT;
2448 name[val] = 0;
2449
2450 lock_sock(sk);
6687e988 2451 err = tcp_set_congestion_control(sk, name);
5f8ef48d
SH
2452 release_sock(sk);
2453 return err;
2454 }
e56fb50f
WAS
2455 default:
2456 /* fallthru */
2457 break;
ccbd6a5a 2458 }
5f8ef48d 2459
1da177e4
LT
2460 if (optlen < sizeof(int))
2461 return -EINVAL;
2462
2463 if (get_user(val, (int __user *)optval))
2464 return -EFAULT;
2465
2466 lock_sock(sk);
2467
2468 switch (optname) {
2469 case TCP_MAXSEG:
2470 /* Values greater than interface MTU won't take effect. However
2471 * at the point when this call is done we typically don't yet
2472 * know which interface is going to be used */
cfc62d87 2473 if (val && (val < TCP_MIN_MSS || val > MAX_TCP_WINDOW)) {
1da177e4
LT
2474 err = -EINVAL;
2475 break;
2476 }
2477 tp->rx_opt.user_mss = val;
2478 break;
2479
2480 case TCP_NODELAY:
2481 if (val) {
2482 /* TCP_NODELAY is weaker than TCP_CORK, so that
2483 * this option on corked socket is remembered, but
2484 * it is not activated until cork is cleared.
2485 *
2486 * However, when TCP_NODELAY is set we make
2487 * an explicit push, which overrides even TCP_CORK
2488 * for currently queued segments.
2489 */
2490 tp->nonagle |= TCP_NAGLE_OFF|TCP_NAGLE_PUSH;
9e412ba7 2491 tcp_push_pending_frames(sk);
1da177e4
LT
2492 } else {
2493 tp->nonagle &= ~TCP_NAGLE_OFF;
2494 }
2495 break;
2496
36e31b0a
AP
2497 case TCP_THIN_LINEAR_TIMEOUTS:
2498 if (val < 0 || val > 1)
2499 err = -EINVAL;
2500 else
2501 tp->thin_lto = val;
2502 break;
2503
7e380175
AP
2504 case TCP_THIN_DUPACK:
2505 if (val < 0 || val > 1)
2506 err = -EINVAL;
7e380175
AP
2507 break;
2508
ee995283
PE
2509 case TCP_REPAIR:
2510 if (!tcp_can_repair_sock(sk))
2511 err = -EPERM;
2512 else if (val == 1) {
2513 tp->repair = 1;
2514 sk->sk_reuse = SK_FORCE_REUSE;
2515 tp->repair_queue = TCP_NO_QUEUE;
2516 } else if (val == 0) {
2517 tp->repair = 0;
2518 sk->sk_reuse = SK_NO_REUSE;
2519 tcp_send_window_probe(sk);
2520 } else
2521 err = -EINVAL;
2522
2523 break;
2524
2525 case TCP_REPAIR_QUEUE:
2526 if (!tp->repair)
2527 err = -EPERM;
2528 else if (val < TCP_QUEUES_NR)
2529 tp->repair_queue = val;
2530 else
2531 err = -EINVAL;
2532 break;
2533
2534 case TCP_QUEUE_SEQ:
2535 if (sk->sk_state != TCP_CLOSE)
2536 err = -EPERM;
2537 else if (tp->repair_queue == TCP_SEND_QUEUE)
2538 tp->write_seq = val;
2539 else if (tp->repair_queue == TCP_RECV_QUEUE)
2540 tp->rcv_nxt = val;
2541 else
2542 err = -EINVAL;
2543 break;
2544
b139ba4e
PE
2545 case TCP_REPAIR_OPTIONS:
2546 if (!tp->repair)
2547 err = -EINVAL;
2548 else if (sk->sk_state == TCP_ESTABLISHED)
de248a75
PE
2549 err = tcp_repair_options_est(tp,
2550 (struct tcp_repair_opt __user *)optval,
2551 optlen);
b139ba4e
PE
2552 else
2553 err = -EPERM;
2554 break;
2555
1da177e4
LT
2556 case TCP_CORK:
2557 /* When set indicates to always queue non-full frames.
2558 * Later the user clears this option and we transmit
2559 * any pending partial frames in the queue. This is
2560 * meant to be used alongside sendfile() to get properly
2561 * filled frames when the user (for example) must write
2562 * out headers with a write() call first and then use
2563 * sendfile to send out the data parts.
2564 *
2565 * TCP_CORK can be set together with TCP_NODELAY and it is
2566 * stronger than TCP_NODELAY.
2567 */
2568 if (val) {
2569 tp->nonagle |= TCP_NAGLE_CORK;
2570 } else {
2571 tp->nonagle &= ~TCP_NAGLE_CORK;
2572 if (tp->nonagle&TCP_NAGLE_OFF)
2573 tp->nonagle |= TCP_NAGLE_PUSH;
9e412ba7 2574 tcp_push_pending_frames(sk);
1da177e4
LT
2575 }
2576 break;
2577
2578 case TCP_KEEPIDLE:
2579 if (val < 1 || val > MAX_TCP_KEEPIDLE)
2580 err = -EINVAL;
2581 else {
2582 tp->keepalive_time = val * HZ;
2583 if (sock_flag(sk, SOCK_KEEPOPEN) &&
2584 !((1 << sk->sk_state) &
2585 (TCPF_CLOSE | TCPF_LISTEN))) {
6c37e5de 2586 u32 elapsed = keepalive_time_elapsed(tp);
1da177e4
LT
2587 if (tp->keepalive_time > elapsed)
2588 elapsed = tp->keepalive_time - elapsed;
2589 else
2590 elapsed = 0;
463c84b9 2591 inet_csk_reset_keepalive_timer(sk, elapsed);
1da177e4
LT
2592 }
2593 }
2594 break;
2595 case TCP_KEEPINTVL:
2596 if (val < 1 || val > MAX_TCP_KEEPINTVL)
2597 err = -EINVAL;
2598 else
2599 tp->keepalive_intvl = val * HZ;
2600 break;
2601 case TCP_KEEPCNT:
2602 if (val < 1 || val > MAX_TCP_KEEPCNT)
2603 err = -EINVAL;
2604 else
2605 tp->keepalive_probes = val;
2606 break;
2607 case TCP_SYNCNT:
2608 if (val < 1 || val > MAX_TCP_SYNCNT)
2609 err = -EINVAL;
2610 else
463c84b9 2611 icsk->icsk_syn_retries = val;
1da177e4
LT
2612 break;
2613
cd8ae852
ED
2614 case TCP_SAVE_SYN:
2615 if (val < 0 || val > 1)
2616 err = -EINVAL;
2617 else
2618 tp->save_syn = val;
2619 break;
2620
1da177e4
LT
2621 case TCP_LINGER2:
2622 if (val < 0)
2623 tp->linger2 = -1;
1e579caa 2624 else if (val > net->ipv4.sysctl_tcp_fin_timeout / HZ)
1da177e4
LT
2625 tp->linger2 = 0;
2626 else
2627 tp->linger2 = val * HZ;
2628 break;
2629
2630 case TCP_DEFER_ACCEPT:
b103cf34
JA
2631 /* Translate value in seconds to number of retransmits */
2632 icsk->icsk_accept_queue.rskq_defer_accept =
2633 secs_to_retrans(val, TCP_TIMEOUT_INIT / HZ,
2634 TCP_RTO_MAX / HZ);
1da177e4
LT
2635 break;
2636
2637 case TCP_WINDOW_CLAMP:
2638 if (!val) {
2639 if (sk->sk_state != TCP_CLOSE) {
2640 err = -EINVAL;
2641 break;
2642 }
2643 tp->window_clamp = 0;
2644 } else
2645 tp->window_clamp = val < SOCK_MIN_RCVBUF / 2 ?
2646 SOCK_MIN_RCVBUF / 2 : val;
2647 break;
2648
2649 case TCP_QUICKACK:
2650 if (!val) {
463c84b9 2651 icsk->icsk_ack.pingpong = 1;
1da177e4 2652 } else {
463c84b9 2653 icsk->icsk_ack.pingpong = 0;
1da177e4
LT
2654 if ((1 << sk->sk_state) &
2655 (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT) &&
463c84b9
ACM
2656 inet_csk_ack_scheduled(sk)) {
2657 icsk->icsk_ack.pending |= ICSK_ACK_PUSHED;
0e4b4992 2658 tcp_cleanup_rbuf(sk, 1);
1da177e4 2659 if (!(val & 1))
463c84b9 2660 icsk->icsk_ack.pingpong = 1;
1da177e4
LT
2661 }
2662 }
2663 break;
2664
cfb6eeb4
YH
2665#ifdef CONFIG_TCP_MD5SIG
2666 case TCP_MD5SIG:
2667 /* Read the IP->Key mappings from userspace */
2668 err = tp->af_specific->md5_parse(sk, optval, optlen);
2669 break;
2670#endif
dca43c75 2671 case TCP_USER_TIMEOUT:
b248230c 2672 /* Cap the max time in ms TCP will retry or probe the window
dca43c75
JC
2673 * before giving up and aborting (ETIMEDOUT) a connection.
2674 */
42493570
HL
2675 if (val < 0)
2676 err = -EINVAL;
2677 else
2678 icsk->icsk_user_timeout = msecs_to_jiffies(val);
dca43c75 2679 break;
8336886f
JC
2680
2681 case TCP_FASTOPEN:
2682 if (val >= 0 && ((1 << sk->sk_state) & (TCPF_CLOSE |
dfea2aa6
CP
2683 TCPF_LISTEN))) {
2684 tcp_fastopen_init_key_once(true);
2685
0536fcc0 2686 fastopen_queue_tune(sk, val);
dfea2aa6 2687 } else {
8336886f 2688 err = -EINVAL;
dfea2aa6 2689 }
8336886f 2690 break;
19f6d3f3
WW
2691 case TCP_FASTOPEN_CONNECT:
2692 if (val > 1 || val < 0) {
2693 err = -EINVAL;
2694 } else if (sysctl_tcp_fastopen & TFO_CLIENT_ENABLE) {
2695 if (sk->sk_state == TCP_CLOSE)
2696 tp->fastopen_connect = val;
2697 else
2698 err = -EINVAL;
2699 } else {
2700 err = -EOPNOTSUPP;
2701 }
2702 break;
93be6ce0
AV
2703 case TCP_TIMESTAMP:
2704 if (!tp->repair)
2705 err = -EPERM;
2706 else
2707 tp->tsoffset = val - tcp_time_stamp;
2708 break;
b1ed4c4f
AV
2709 case TCP_REPAIR_WINDOW:
2710 err = tcp_repair_set_window(tp, optval, optlen);
2711 break;
c9bee3b7
ED
2712 case TCP_NOTSENT_LOWAT:
2713 tp->notsent_lowat = val;
2714 sk->sk_write_space(sk);
2715 break;
1da177e4
LT
2716 default:
2717 err = -ENOPROTOOPT;
2718 break;
3ff50b79
SH
2719 }
2720
1da177e4
LT
2721 release_sock(sk);
2722 return err;
2723}
2724
3fdadf7d 2725int tcp_setsockopt(struct sock *sk, int level, int optname, char __user *optval,
b7058842 2726 unsigned int optlen)
3fdadf7d 2727{
cf533ea5 2728 const struct inet_connection_sock *icsk = inet_csk(sk);
3fdadf7d
DM
2729
2730 if (level != SOL_TCP)
2731 return icsk->icsk_af_ops->setsockopt(sk, level, optname,
2732 optval, optlen);
2733 return do_tcp_setsockopt(sk, level, optname, optval, optlen);
2734}
4bc2f18b 2735EXPORT_SYMBOL(tcp_setsockopt);
3fdadf7d
DM
2736
2737#ifdef CONFIG_COMPAT
543d9cfe 2738int compat_tcp_setsockopt(struct sock *sk, int level, int optname,
b7058842 2739 char __user *optval, unsigned int optlen)
3fdadf7d 2740{
dec73ff0
ACM
2741 if (level != SOL_TCP)
2742 return inet_csk_compat_setsockopt(sk, level, optname,
2743 optval, optlen);
3fdadf7d
DM
2744 return do_tcp_setsockopt(sk, level, optname, optval, optlen);
2745}
543d9cfe 2746EXPORT_SYMBOL(compat_tcp_setsockopt);
3fdadf7d
DM
2747#endif
2748
efd90174
FY
2749static void tcp_get_info_chrono_stats(const struct tcp_sock *tp,
2750 struct tcp_info *info)
2751{
2752 u64 stats[__TCP_CHRONO_MAX], total = 0;
2753 enum tcp_chrono i;
2754
2755 for (i = TCP_CHRONO_BUSY; i < __TCP_CHRONO_MAX; ++i) {
2756 stats[i] = tp->chrono_stat[i - 1];
2757 if (i == tp->chrono_type)
2758 stats[i] += tcp_time_stamp - tp->chrono_start;
2759 stats[i] *= USEC_PER_SEC / HZ;
2760 total += stats[i];
2761 }
2762
2763 info->tcpi_busy_time = total;
2764 info->tcpi_rwnd_limited = stats[TCP_CHRONO_RWND_LIMITED];
2765 info->tcpi_sndbuf_limited = stats[TCP_CHRONO_SNDBUF_LIMITED];
2766}
2767
1da177e4 2768/* Return information about state of tcp endpoint in API format. */
0df48c26 2769void tcp_get_info(struct sock *sk, struct tcp_info *info)
1da177e4 2770{
35ac838a 2771 const struct tcp_sock *tp = tcp_sk(sk); /* iff sk_type == SOCK_STREAM */
463c84b9 2772 const struct inet_connection_sock *icsk = inet_csk(sk);
db7f00b8 2773 u32 now, intv;
ff5d7497 2774 u64 rate64;
67db3e4b 2775 bool slow;
fad9dfef 2776 u32 rate;
1da177e4
LT
2777
2778 memset(info, 0, sizeof(*info));
35ac838a
CG
2779 if (sk->sk_type != SOCK_STREAM)
2780 return;
1da177e4 2781
00fd38d9
ED
2782 info->tcpi_state = sk_state_load(sk);
2783
ccbf3bfa
ED
2784 /* Report meaningful fields for all TCP states, including listeners */
2785 rate = READ_ONCE(sk->sk_pacing_rate);
2786 rate64 = rate != ~0U ? rate : ~0ULL;
f522a5fc 2787 info->tcpi_pacing_rate = rate64;
ccbf3bfa
ED
2788
2789 rate = READ_ONCE(sk->sk_max_pacing_rate);
2790 rate64 = rate != ~0U ? rate : ~0ULL;
f522a5fc 2791 info->tcpi_max_pacing_rate = rate64;
ccbf3bfa
ED
2792
2793 info->tcpi_reordering = tp->reordering;
2794 info->tcpi_snd_cwnd = tp->snd_cwnd;
2795
2796 if (info->tcpi_state == TCP_LISTEN) {
2797 /* listeners aliased fields :
2798 * tcpi_unacked -> Number of children ready for accept()
2799 * tcpi_sacked -> max backlog
2800 */
2801 info->tcpi_unacked = sk->sk_ack_backlog;
2802 info->tcpi_sacked = sk->sk_max_ack_backlog;
2803 return;
2804 }
b369e7fd
ED
2805
2806 slow = lock_sock_fast(sk);
2807
6687e988 2808 info->tcpi_ca_state = icsk->icsk_ca_state;
463c84b9 2809 info->tcpi_retransmits = icsk->icsk_retransmits;
6687e988 2810 info->tcpi_probes = icsk->icsk_probes_out;
463c84b9 2811 info->tcpi_backoff = icsk->icsk_backoff;
1da177e4
LT
2812
2813 if (tp->rx_opt.tstamp_ok)
2814 info->tcpi_options |= TCPI_OPT_TIMESTAMPS;
e60402d0 2815 if (tcp_is_sack(tp))
1da177e4
LT
2816 info->tcpi_options |= TCPI_OPT_SACK;
2817 if (tp->rx_opt.wscale_ok) {
2818 info->tcpi_options |= TCPI_OPT_WSCALE;
2819 info->tcpi_snd_wscale = tp->rx_opt.snd_wscale;
2820 info->tcpi_rcv_wscale = tp->rx_opt.rcv_wscale;
e905a9ed 2821 }
1da177e4 2822
b5c5693b 2823 if (tp->ecn_flags & TCP_ECN_OK)
1da177e4 2824 info->tcpi_options |= TCPI_OPT_ECN;
b5c5693b
ED
2825 if (tp->ecn_flags & TCP_ECN_SEEN)
2826 info->tcpi_options |= TCPI_OPT_ECN_SEEN;
6f73601e
YC
2827 if (tp->syn_data_acked)
2828 info->tcpi_options |= TCPI_OPT_SYN_DATA;
1da177e4 2829
463c84b9
ACM
2830 info->tcpi_rto = jiffies_to_usecs(icsk->icsk_rto);
2831 info->tcpi_ato = jiffies_to_usecs(icsk->icsk_ack.ato);
c1b4a7e6 2832 info->tcpi_snd_mss = tp->mss_cache;
463c84b9 2833 info->tcpi_rcv_mss = icsk->icsk_ack.rcv_mss;
1da177e4 2834
ccbf3bfa
ED
2835 info->tcpi_unacked = tp->packets_out;
2836 info->tcpi_sacked = tp->sacked_out;
2837
1da177e4
LT
2838 info->tcpi_lost = tp->lost_out;
2839 info->tcpi_retrans = tp->retrans_out;
2840 info->tcpi_fackets = tp->fackets_out;
2841
db7f00b8 2842 now = tcp_time_stamp;
1da177e4 2843 info->tcpi_last_data_sent = jiffies_to_msecs(now - tp->lsndtime);
463c84b9 2844 info->tcpi_last_data_recv = jiffies_to_msecs(now - icsk->icsk_ack.lrcvtime);
1da177e4
LT
2845 info->tcpi_last_ack_recv = jiffies_to_msecs(now - tp->rcv_tstamp);
2846
d83d8461 2847 info->tcpi_pmtu = icsk->icsk_pmtu_cookie;
1da177e4 2848 info->tcpi_rcv_ssthresh = tp->rcv_ssthresh;
740b0f18
ED
2849 info->tcpi_rtt = tp->srtt_us >> 3;
2850 info->tcpi_rttvar = tp->mdev_us >> 2;
1da177e4 2851 info->tcpi_snd_ssthresh = tp->snd_ssthresh;
1da177e4 2852 info->tcpi_advmss = tp->advmss;
1da177e4
LT
2853
2854 info->tcpi_rcv_rtt = jiffies_to_usecs(tp->rcv_rtt_est.rtt)>>3;
2855 info->tcpi_rcv_space = tp->rcvq_space.space;
2856
2857 info->tcpi_total_retrans = tp->total_retrans;
977cb0ec 2858
f522a5fc
ED
2859 info->tcpi_bytes_acked = tp->bytes_acked;
2860 info->tcpi_bytes_received = tp->bytes_received;
67db3e4b 2861 info->tcpi_notsent_bytes = max_t(int, 0, tp->write_seq - tp->snd_nxt);
efd90174 2862 tcp_get_info_chrono_stats(tp, info);
67db3e4b 2863
2efd055c
MRL
2864 info->tcpi_segs_out = tp->segs_out;
2865 info->tcpi_segs_in = tp->segs_in;
cd9b2660 2866
cd9b2660 2867 info->tcpi_min_rtt = tcp_min_rtt(tp);
a44d6eac
MKL
2868 info->tcpi_data_segs_in = tp->data_segs_in;
2869 info->tcpi_data_segs_out = tp->data_segs_out;
eb8329e0
YC
2870
2871 info->tcpi_delivery_rate_app_limited = tp->rate_app_limited ? 1 : 0;
2872 rate = READ_ONCE(tp->rate_delivered);
2873 intv = READ_ONCE(tp->rate_interval_us);
2874 if (rate && intv) {
2875 rate64 = (u64)rate * tp->mss_cache * USEC_PER_SEC;
2876 do_div(rate64, intv);
f522a5fc 2877 info->tcpi_delivery_rate = rate64;
eb8329e0 2878 }
b369e7fd 2879 unlock_sock_fast(sk, slow);
1da177e4 2880}
1da177e4
LT
2881EXPORT_SYMBOL_GPL(tcp_get_info);
2882
1c885808
FY
2883struct sk_buff *tcp_get_timestamping_opt_stats(const struct sock *sk)
2884{
2885 const struct tcp_sock *tp = tcp_sk(sk);
2886 struct sk_buff *stats;
2887 struct tcp_info info;
2888
7e98102f 2889 stats = alloc_skb(5 * nla_total_size_64bit(sizeof(u64)), GFP_ATOMIC);
1c885808
FY
2890 if (!stats)
2891 return NULL;
2892
2893 tcp_get_info_chrono_stats(tp, &info);
2894 nla_put_u64_64bit(stats, TCP_NLA_BUSY,
2895 info.tcpi_busy_time, TCP_NLA_PAD);
2896 nla_put_u64_64bit(stats, TCP_NLA_RWND_LIMITED,
2897 info.tcpi_rwnd_limited, TCP_NLA_PAD);
2898 nla_put_u64_64bit(stats, TCP_NLA_SNDBUF_LIMITED,
2899 info.tcpi_sndbuf_limited, TCP_NLA_PAD);
7e98102f
YC
2900 nla_put_u64_64bit(stats, TCP_NLA_DATA_SEGS_OUT,
2901 tp->data_segs_out, TCP_NLA_PAD);
2902 nla_put_u64_64bit(stats, TCP_NLA_TOTAL_RETRANS,
2903 tp->total_retrans, TCP_NLA_PAD);
1c885808
FY
2904 return stats;
2905}
2906
3fdadf7d
DM
2907static int do_tcp_getsockopt(struct sock *sk, int level,
2908 int optname, char __user *optval, int __user *optlen)
1da177e4 2909{
295f7324 2910 struct inet_connection_sock *icsk = inet_csk(sk);
1da177e4 2911 struct tcp_sock *tp = tcp_sk(sk);
6fa25166 2912 struct net *net = sock_net(sk);
1da177e4
LT
2913 int val, len;
2914
1da177e4
LT
2915 if (get_user(len, optlen))
2916 return -EFAULT;
2917
2918 len = min_t(unsigned int, len, sizeof(int));
2919
2920 if (len < 0)
2921 return -EINVAL;
2922
2923 switch (optname) {
2924 case TCP_MAXSEG:
c1b4a7e6 2925 val = tp->mss_cache;
1da177e4
LT
2926 if (!val && ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)))
2927 val = tp->rx_opt.user_mss;
5e6a3ce6
PE
2928 if (tp->repair)
2929 val = tp->rx_opt.mss_clamp;
1da177e4
LT
2930 break;
2931 case TCP_NODELAY:
2932 val = !!(tp->nonagle&TCP_NAGLE_OFF);
2933 break;
2934 case TCP_CORK:
2935 val = !!(tp->nonagle&TCP_NAGLE_CORK);
2936 break;
2937 case TCP_KEEPIDLE:
df19a626 2938 val = keepalive_time_when(tp) / HZ;
1da177e4
LT
2939 break;
2940 case TCP_KEEPINTVL:
df19a626 2941 val = keepalive_intvl_when(tp) / HZ;
1da177e4
LT
2942 break;
2943 case TCP_KEEPCNT:
df19a626 2944 val = keepalive_probes(tp);
1da177e4
LT
2945 break;
2946 case TCP_SYNCNT:
6fa25166 2947 val = icsk->icsk_syn_retries ? : net->ipv4.sysctl_tcp_syn_retries;
1da177e4
LT
2948 break;
2949 case TCP_LINGER2:
2950 val = tp->linger2;
2951 if (val >= 0)
1e579caa 2952 val = (val ? : net->ipv4.sysctl_tcp_fin_timeout) / HZ;
1da177e4
LT
2953 break;
2954 case TCP_DEFER_ACCEPT:
b103cf34
JA
2955 val = retrans_to_secs(icsk->icsk_accept_queue.rskq_defer_accept,
2956 TCP_TIMEOUT_INIT / HZ, TCP_RTO_MAX / HZ);
1da177e4
LT
2957 break;
2958 case TCP_WINDOW_CLAMP:
2959 val = tp->window_clamp;
2960 break;
2961 case TCP_INFO: {
2962 struct tcp_info info;
2963
2964 if (get_user(len, optlen))
2965 return -EFAULT;
2966
2967 tcp_get_info(sk, &info);
2968
2969 len = min_t(unsigned int, len, sizeof(info));
2970 if (put_user(len, optlen))
2971 return -EFAULT;
2972 if (copy_to_user(optval, &info, len))
2973 return -EFAULT;
2974 return 0;
2975 }
6e9250f5
ED
2976 case TCP_CC_INFO: {
2977 const struct tcp_congestion_ops *ca_ops;
2978 union tcp_cc_info info;
2979 size_t sz = 0;
2980 int attr;
2981
2982 if (get_user(len, optlen))
2983 return -EFAULT;
2984
2985 ca_ops = icsk->icsk_ca_ops;
2986 if (ca_ops && ca_ops->get_info)
2987 sz = ca_ops->get_info(sk, ~0U, &attr, &info);
2988
2989 len = min_t(unsigned int, len, sz);
2990 if (put_user(len, optlen))
2991 return -EFAULT;
2992 if (copy_to_user(optval, &info, len))
2993 return -EFAULT;
2994 return 0;
2995 }
1da177e4 2996 case TCP_QUICKACK:
295f7324 2997 val = !icsk->icsk_ack.pingpong;
1da177e4 2998 break;
5f8ef48d
SH
2999
3000 case TCP_CONGESTION:
3001 if (get_user(len, optlen))
3002 return -EFAULT;
3003 len = min_t(unsigned int, len, TCP_CA_NAME_MAX);
3004 if (put_user(len, optlen))
3005 return -EFAULT;
6687e988 3006 if (copy_to_user(optval, icsk->icsk_ca_ops->name, len))
5f8ef48d
SH
3007 return -EFAULT;
3008 return 0;
e56fb50f 3009
3c0fef0b
JH
3010 case TCP_THIN_LINEAR_TIMEOUTS:
3011 val = tp->thin_lto;
3012 break;
4a7f6009 3013
3c0fef0b 3014 case TCP_THIN_DUPACK:
4a7f6009 3015 val = 0;
3c0fef0b 3016 break;
dca43c75 3017
ee995283
PE
3018 case TCP_REPAIR:
3019 val = tp->repair;
3020 break;
3021
3022 case TCP_REPAIR_QUEUE:
3023 if (tp->repair)
3024 val = tp->repair_queue;
3025 else
3026 return -EINVAL;
3027 break;
3028
b1ed4c4f
AV
3029 case TCP_REPAIR_WINDOW: {
3030 struct tcp_repair_window opt;
3031
3032 if (get_user(len, optlen))
3033 return -EFAULT;
3034
3035 if (len != sizeof(opt))
3036 return -EINVAL;
3037
3038 if (!tp->repair)
3039 return -EPERM;
3040
3041 opt.snd_wl1 = tp->snd_wl1;
3042 opt.snd_wnd = tp->snd_wnd;
3043 opt.max_window = tp->max_window;
3044 opt.rcv_wnd = tp->rcv_wnd;
3045 opt.rcv_wup = tp->rcv_wup;
3046
3047 if (copy_to_user(optval, &opt, len))
3048 return -EFAULT;
3049 return 0;
3050 }
ee995283
PE
3051 case TCP_QUEUE_SEQ:
3052 if (tp->repair_queue == TCP_SEND_QUEUE)
3053 val = tp->write_seq;
3054 else if (tp->repair_queue == TCP_RECV_QUEUE)
3055 val = tp->rcv_nxt;
3056 else
3057 return -EINVAL;
3058 break;
3059
dca43c75
JC
3060 case TCP_USER_TIMEOUT:
3061 val = jiffies_to_msecs(icsk->icsk_user_timeout);
3062 break;
1536e285
KN
3063
3064 case TCP_FASTOPEN:
0536fcc0 3065 val = icsk->icsk_accept_queue.fastopenq.max_qlen;
1536e285
KN
3066 break;
3067
19f6d3f3
WW
3068 case TCP_FASTOPEN_CONNECT:
3069 val = tp->fastopen_connect;
3070 break;
3071
93be6ce0
AV
3072 case TCP_TIMESTAMP:
3073 val = tcp_time_stamp + tp->tsoffset;
3074 break;
c9bee3b7
ED
3075 case TCP_NOTSENT_LOWAT:
3076 val = tp->notsent_lowat;
3077 break;
cd8ae852
ED
3078 case TCP_SAVE_SYN:
3079 val = tp->save_syn;
3080 break;
3081 case TCP_SAVED_SYN: {
3082 if (get_user(len, optlen))
3083 return -EFAULT;
3084
3085 lock_sock(sk);
3086 if (tp->saved_syn) {
aea0929e
EM
3087 if (len < tp->saved_syn[0]) {
3088 if (put_user(tp->saved_syn[0], optlen)) {
3089 release_sock(sk);
3090 return -EFAULT;
3091 }
3092 release_sock(sk);
3093 return -EINVAL;
3094 }
3095 len = tp->saved_syn[0];
cd8ae852
ED
3096 if (put_user(len, optlen)) {
3097 release_sock(sk);
3098 return -EFAULT;
3099 }
3100 if (copy_to_user(optval, tp->saved_syn + 1, len)) {
3101 release_sock(sk);
3102 return -EFAULT;
3103 }
3104 tcp_saved_syn_free(tp);
3105 release_sock(sk);
3106 } else {
3107 release_sock(sk);
3108 len = 0;
3109 if (put_user(len, optlen))
3110 return -EFAULT;
3111 }
3112 return 0;
3113 }
1da177e4
LT
3114 default:
3115 return -ENOPROTOOPT;
3ff50b79 3116 }
1da177e4
LT
3117
3118 if (put_user(len, optlen))
3119 return -EFAULT;
3120 if (copy_to_user(optval, &val, len))
3121 return -EFAULT;
3122 return 0;
3123}
3124
3fdadf7d
DM
3125int tcp_getsockopt(struct sock *sk, int level, int optname, char __user *optval,
3126 int __user *optlen)
3127{
3128 struct inet_connection_sock *icsk = inet_csk(sk);
3129
3130 if (level != SOL_TCP)
3131 return icsk->icsk_af_ops->getsockopt(sk, level, optname,
3132 optval, optlen);
3133 return do_tcp_getsockopt(sk, level, optname, optval, optlen);
3134}
4bc2f18b 3135EXPORT_SYMBOL(tcp_getsockopt);
3fdadf7d
DM
3136
3137#ifdef CONFIG_COMPAT
543d9cfe
ACM
3138int compat_tcp_getsockopt(struct sock *sk, int level, int optname,
3139 char __user *optval, int __user *optlen)
3fdadf7d 3140{
dec73ff0
ACM
3141 if (level != SOL_TCP)
3142 return inet_csk_compat_getsockopt(sk, level, optname,
3143 optval, optlen);
3fdadf7d
DM
3144 return do_tcp_getsockopt(sk, level, optname, optval, optlen);
3145}
543d9cfe 3146EXPORT_SYMBOL(compat_tcp_getsockopt);
3fdadf7d 3147#endif
1da177e4 3148
cfb6eeb4 3149#ifdef CONFIG_TCP_MD5SIG
349ce993 3150static DEFINE_PER_CPU(struct tcp_md5sig_pool, tcp_md5sig_pool);
71cea17e 3151static DEFINE_MUTEX(tcp_md5sig_mutex);
349ce993 3152static bool tcp_md5sig_pool_populated = false;
cfb6eeb4 3153
71cea17e 3154static void __tcp_alloc_md5sig_pool(void)
cfb6eeb4 3155{
cf80e0e4 3156 struct crypto_ahash *hash;
cfb6eeb4 3157 int cpu;
cfb6eeb4 3158
cf80e0e4 3159 hash = crypto_alloc_ahash("md5", 0, CRYPTO_ALG_ASYNC);
70477371 3160 if (IS_ERR(hash))
cf80e0e4
HX
3161 return;
3162
cfb6eeb4 3163 for_each_possible_cpu(cpu) {
19689e38 3164 void *scratch = per_cpu(tcp_md5sig_pool, cpu).scratch;
cf80e0e4 3165 struct ahash_request *req;
cfb6eeb4 3166
19689e38
ED
3167 if (!scratch) {
3168 scratch = kmalloc_node(sizeof(union tcp_md5sum_block) +
3169 sizeof(struct tcphdr),
3170 GFP_KERNEL,
3171 cpu_to_node(cpu));
3172 if (!scratch)
3173 return;
3174 per_cpu(tcp_md5sig_pool, cpu).scratch = scratch;
3175 }
cf80e0e4
HX
3176 if (per_cpu(tcp_md5sig_pool, cpu).md5_req)
3177 continue;
3178
3179 req = ahash_request_alloc(hash, GFP_KERNEL);
3180 if (!req)
3181 return;
3182
3183 ahash_request_set_callback(req, 0, NULL, NULL);
3184
3185 per_cpu(tcp_md5sig_pool, cpu).md5_req = req;
cfb6eeb4 3186 }
349ce993
ED
3187 /* before setting tcp_md5sig_pool_populated, we must commit all writes
3188 * to memory. See smp_rmb() in tcp_get_md5sig_pool()
71cea17e
ED
3189 */
3190 smp_wmb();
349ce993 3191 tcp_md5sig_pool_populated = true;
cfb6eeb4
YH
3192}
3193
71cea17e 3194bool tcp_alloc_md5sig_pool(void)
cfb6eeb4 3195{
349ce993 3196 if (unlikely(!tcp_md5sig_pool_populated)) {
71cea17e
ED
3197 mutex_lock(&tcp_md5sig_mutex);
3198
349ce993 3199 if (!tcp_md5sig_pool_populated)
71cea17e
ED
3200 __tcp_alloc_md5sig_pool();
3201
3202 mutex_unlock(&tcp_md5sig_mutex);
cfb6eeb4 3203 }
349ce993 3204 return tcp_md5sig_pool_populated;
cfb6eeb4 3205}
cfb6eeb4
YH
3206EXPORT_SYMBOL(tcp_alloc_md5sig_pool);
3207
35790c04
ED
3208
3209/**
3210 * tcp_get_md5sig_pool - get md5sig_pool for this user
3211 *
3212 * We use percpu structure, so if we succeed, we exit with preemption
3213 * and BH disabled, to make sure another thread or softirq handling
3214 * wont try to get same context.
3215 */
3216struct tcp_md5sig_pool *tcp_get_md5sig_pool(void)
cfb6eeb4 3217{
35790c04 3218 local_bh_disable();
cfb6eeb4 3219
349ce993
ED
3220 if (tcp_md5sig_pool_populated) {
3221 /* coupled with smp_wmb() in __tcp_alloc_md5sig_pool() */
3222 smp_rmb();
3223 return this_cpu_ptr(&tcp_md5sig_pool);
3224 }
35790c04
ED
3225 local_bh_enable();
3226 return NULL;
3227}
3228EXPORT_SYMBOL(tcp_get_md5sig_pool);
cfb6eeb4 3229
49a72dfb 3230int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *hp,
cf533ea5 3231 const struct sk_buff *skb, unsigned int header_len)
49a72dfb
AL
3232{
3233 struct scatterlist sg;
3234 const struct tcphdr *tp = tcp_hdr(skb);
cf80e0e4 3235 struct ahash_request *req = hp->md5_req;
95c96174
ED
3236 unsigned int i;
3237 const unsigned int head_data_len = skb_headlen(skb) > header_len ?
3238 skb_headlen(skb) - header_len : 0;
49a72dfb 3239 const struct skb_shared_info *shi = skb_shinfo(skb);
d7fd1b57 3240 struct sk_buff *frag_iter;
49a72dfb
AL
3241
3242 sg_init_table(&sg, 1);
3243
3244 sg_set_buf(&sg, ((u8 *) tp) + header_len, head_data_len);
cf80e0e4
HX
3245 ahash_request_set_crypt(req, &sg, NULL, head_data_len);
3246 if (crypto_ahash_update(req))
49a72dfb
AL
3247 return 1;
3248
3249 for (i = 0; i < shi->nr_frags; ++i) {
3250 const struct skb_frag_struct *f = &shi->frags[i];
54d27fcb
ED
3251 unsigned int offset = f->page_offset;
3252 struct page *page = skb_frag_page(f) + (offset >> PAGE_SHIFT);
3253
3254 sg_set_page(&sg, page, skb_frag_size(f),
3255 offset_in_page(offset));
cf80e0e4
HX
3256 ahash_request_set_crypt(req, &sg, NULL, skb_frag_size(f));
3257 if (crypto_ahash_update(req))
49a72dfb
AL
3258 return 1;
3259 }
3260
d7fd1b57
ED
3261 skb_walk_frags(skb, frag_iter)
3262 if (tcp_md5_hash_skb_data(hp, frag_iter, 0))
3263 return 1;
3264
49a72dfb
AL
3265 return 0;
3266}
49a72dfb
AL
3267EXPORT_SYMBOL(tcp_md5_hash_skb_data);
3268
cf533ea5 3269int tcp_md5_hash_key(struct tcp_md5sig_pool *hp, const struct tcp_md5sig_key *key)
49a72dfb
AL
3270{
3271 struct scatterlist sg;
3272
3273 sg_init_one(&sg, key->key, key->keylen);
cf80e0e4
HX
3274 ahash_request_set_crypt(hp->md5_req, &sg, NULL, key->keylen);
3275 return crypto_ahash_update(hp->md5_req);
49a72dfb 3276}
49a72dfb
AL
3277EXPORT_SYMBOL(tcp_md5_hash_key);
3278
cfb6eeb4
YH
3279#endif
3280
4ac02bab
AK
3281void tcp_done(struct sock *sk)
3282{
8336886f
JC
3283 struct request_sock *req = tcp_sk(sk)->fastopen_rsk;
3284
5a5f3a8d 3285 if (sk->sk_state == TCP_SYN_SENT || sk->sk_state == TCP_SYN_RECV)
c10d9310 3286 TCP_INC_STATS(sock_net(sk), TCP_MIB_ATTEMPTFAILS);
4ac02bab
AK
3287
3288 tcp_set_state(sk, TCP_CLOSE);
3289 tcp_clear_xmit_timers(sk);
00db4124 3290 if (req)
8336886f 3291 reqsk_fastopen_remove(sk, req, false);
4ac02bab
AK
3292
3293 sk->sk_shutdown = SHUTDOWN_MASK;
3294
3295 if (!sock_flag(sk, SOCK_DEAD))
3296 sk->sk_state_change(sk);
3297 else
3298 inet_csk_destroy_sock(sk);
3299}
3300EXPORT_SYMBOL_GPL(tcp_done);
3301
c1e64e29
LC
3302int tcp_abort(struct sock *sk, int err)
3303{
3304 if (!sk_fullsock(sk)) {
07f6f4a3
ED
3305 if (sk->sk_state == TCP_NEW_SYN_RECV) {
3306 struct request_sock *req = inet_reqsk(sk);
3307
3308 local_bh_disable();
3309 inet_csk_reqsk_queue_drop_and_put(req->rsk_listener,
3310 req);
3311 local_bh_enable();
3312 return 0;
3313 }
c1e64e29
LC
3314 return -EOPNOTSUPP;
3315 }
3316
3317 /* Don't race with userspace socket closes such as tcp_close. */
3318 lock_sock(sk);
3319
2010b93e
LC
3320 if (sk->sk_state == TCP_LISTEN) {
3321 tcp_set_state(sk, TCP_CLOSE);
3322 inet_csk_listen_stop(sk);
3323 }
3324
c1e64e29
LC
3325 /* Don't race with BH socket closes such as inet_csk_listen_stop. */
3326 local_bh_disable();
3327 bh_lock_sock(sk);
3328
3329 if (!sock_flag(sk, SOCK_DEAD)) {
3330 sk->sk_err = err;
3331 /* This barrier is coupled with smp_rmb() in tcp_poll() */
3332 smp_wmb();
3333 sk->sk_error_report(sk);
3334 if (tcp_need_reset(sk->sk_state))
3335 tcp_send_active_reset(sk, GFP_ATOMIC);
3336 tcp_done(sk);
3337 }
3338
3339 bh_unlock_sock(sk);
3340 local_bh_enable();
3341 release_sock(sk);
c1e64e29
LC
3342 return 0;
3343}
3344EXPORT_SYMBOL_GPL(tcp_abort);
3345
5f8ef48d 3346extern struct tcp_congestion_ops tcp_reno;
1da177e4
LT
3347
3348static __initdata unsigned long thash_entries;
3349static int __init set_thash_entries(char *str)
3350{
413c27d8
EZ
3351 ssize_t ret;
3352
1da177e4
LT
3353 if (!str)
3354 return 0;
413c27d8
EZ
3355
3356 ret = kstrtoul(str, 0, &thash_entries);
3357 if (ret)
3358 return 0;
3359
1da177e4
LT
3360 return 1;
3361}
3362__setup("thash_entries=", set_thash_entries);
3363
47d7a88c 3364static void __init tcp_init_mem(void)
4acb4190 3365{
b66e91cc
ED
3366 unsigned long limit = nr_free_buffer_pages() / 16;
3367
4acb4190 3368 limit = max(limit, 128UL);
b66e91cc
ED
3369 sysctl_tcp_mem[0] = limit / 4 * 3; /* 4.68 % */
3370 sysctl_tcp_mem[1] = limit; /* 6.25 % */
3371 sysctl_tcp_mem[2] = sysctl_tcp_mem[0] * 2; /* 9.37 % */
4acb4190
GC
3372}
3373
1da177e4
LT
3374void __init tcp_init(void)
3375{
b49960a0 3376 int max_rshare, max_wshare, cnt;
b2d3ea4a 3377 unsigned long limit;
074b8517 3378 unsigned int i;
1da177e4 3379
b2d3ea4a
ED
3380 BUILD_BUG_ON(sizeof(struct tcp_skb_cb) >
3381 FIELD_SIZEOF(struct sk_buff, cb));
1da177e4 3382
908c7f19
TH
3383 percpu_counter_init(&tcp_sockets_allocated, 0, GFP_KERNEL);
3384 percpu_counter_init(&tcp_orphan_count, 0, GFP_KERNEL);
1946e672 3385 inet_hashinfo_init(&tcp_hashinfo);
6e04e021
ACM
3386 tcp_hashinfo.bind_bucket_cachep =
3387 kmem_cache_create("tcp_bind_bucket",
3388 sizeof(struct inet_bind_bucket), 0,
20c2df83 3389 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
1da177e4 3390
1da177e4
LT
3391 /* Size and allocate the main established and bind bucket
3392 * hash tables.
3393 *
3394 * The methodology is similar to that of the buffer cache.
3395 */
6e04e021 3396 tcp_hashinfo.ehash =
1da177e4 3397 alloc_large_system_hash("TCP established",
0f7ff927 3398 sizeof(struct inet_ehash_bucket),
1da177e4 3399 thash_entries,
fd90b29d 3400 17, /* one slot per 128 KB of memory */
9e950efa 3401 0,
1da177e4 3402 NULL,
f373b53b 3403 &tcp_hashinfo.ehash_mask,
31fe62b9 3404 0,
0ccfe618 3405 thash_entries ? 0 : 512 * 1024);
05dbc7b5 3406 for (i = 0; i <= tcp_hashinfo.ehash_mask; i++)
3ab5aee7 3407 INIT_HLIST_NULLS_HEAD(&tcp_hashinfo.ehash[i].chain, i);
05dbc7b5 3408
230140cf
ED
3409 if (inet_ehash_locks_alloc(&tcp_hashinfo))
3410 panic("TCP: failed to alloc ehash_locks");
6e04e021 3411 tcp_hashinfo.bhash =
1da177e4 3412 alloc_large_system_hash("TCP bind",
0f7ff927 3413 sizeof(struct inet_bind_hashbucket),
f373b53b 3414 tcp_hashinfo.ehash_mask + 1,
fd90b29d 3415 17, /* one slot per 128 KB of memory */
9e950efa 3416 0,
6e04e021 3417 &tcp_hashinfo.bhash_size,
1da177e4 3418 NULL,
31fe62b9 3419 0,
1da177e4 3420 64 * 1024);
074b8517 3421 tcp_hashinfo.bhash_size = 1U << tcp_hashinfo.bhash_size;
6e04e021
ACM
3422 for (i = 0; i < tcp_hashinfo.bhash_size; i++) {
3423 spin_lock_init(&tcp_hashinfo.bhash[i].lock);
3424 INIT_HLIST_HEAD(&tcp_hashinfo.bhash[i].chain);
1da177e4
LT
3425 }
3426
c5ed63d6
ED
3427
3428 cnt = tcp_hashinfo.ehash_mask + 1;
c5ed63d6 3429 sysctl_tcp_max_orphans = cnt / 2;
1da177e4 3430
a4fe34bf 3431 tcp_init_mem();
c43b874d 3432 /* Set per-socket limits to no more than 1/128 the pressure threshold */
5fb84b14 3433 limit = nr_free_buffer_pages() << (PAGE_SHIFT - 7);
b49960a0
ED
3434 max_wshare = min(4UL*1024*1024, limit);
3435 max_rshare = min(6UL*1024*1024, limit);
7b4f4b5e 3436
3ab224be 3437 sysctl_tcp_wmem[0] = SK_MEM_QUANTUM;
7b4f4b5e 3438 sysctl_tcp_wmem[1] = 16*1024;
b49960a0 3439 sysctl_tcp_wmem[2] = max(64*1024, max_wshare);
7b4f4b5e 3440
3ab224be 3441 sysctl_tcp_rmem[0] = SK_MEM_QUANTUM;
7b4f4b5e 3442 sysctl_tcp_rmem[1] = 87380;
b49960a0 3443 sysctl_tcp_rmem[2] = max(87380, max_rshare);
1da177e4 3444
afd46503 3445 pr_info("Hash tables configured (established %u bind %u)\n",
058bd4d2 3446 tcp_hashinfo.ehash_mask + 1, tcp_hashinfo.bhash_size);
317a76f9 3447
1946e672 3448 tcp_v4_init();
51c5d0c4 3449 tcp_metrics_init();
55d8694f 3450 BUG_ON(tcp_register_congestion_control(&tcp_reno) != 0);
46d3ceab 3451 tcp_tasklet_init();
1da177e4 3452}