1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * INET An implementation of the TCP/IP protocol suite for the LINUX
4 * operating system. INET is implemented using the BSD Socket
5 * interface as the means of communication with the user level.
7 * Implementation of the Transmission Control Protocol(TCP).
10 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
11 * Mark Evans, <evansmp@uhura.aston.ac.uk>
12 * Corey Minyard <wf-rch!minyard@relay.EU.net>
13 * Florian La Roche, <flla@stud.uni-sb.de>
14 * Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
15 * Linus Torvalds, <torvalds@cs.helsinki.fi>
16 * Alan Cox, <gw4pts@gw4pts.ampr.org>
17 * Matthew Dillon, <dillon@apollo.west.oic.com>
18 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
19 * Jorge Cwik, <jorge@laser.satlink.net>
22 * Alan Cox : Numerous verify_area() calls
23 * Alan Cox : Set the ACK bit on a reset
24 * Alan Cox : Stopped it crashing if it closed while
25 * sk->inuse=1 and was trying to connect
27 * Alan Cox : All icmp error handling was broken
28 * pointers passed where wrong and the
29 * socket was looked up backwards. Nobody
30 * tested any icmp error code obviously.
31 * Alan Cox : tcp_err() now handled properly. It
32 * wakes people on errors. poll
33 * behaves and the icmp error race
34 * has gone by moving it into sock.c
35 * Alan Cox : tcp_send_reset() fixed to work for
36 * everything not just packets for
38 * Alan Cox : tcp option processing.
39 * Alan Cox : Reset tweaked (still not 100%) [Had
41 * Herp Rosmanith : More reset fixes
42 * Alan Cox : No longer acks invalid rst frames.
43 * Acking any kind of RST is right out.
44 * Alan Cox : Sets an ignore me flag on an rst
45 * receive otherwise odd bits of prattle
47 * Alan Cox : Fixed another acking RST frame bug.
48 * Should stop LAN workplace lockups.
49 * Alan Cox : Some tidyups using the new skb list
51 * Alan Cox : sk->keepopen now seems to work
52 * Alan Cox : Pulls options out correctly on accepts
53 * Alan Cox : Fixed assorted sk->rqueue->next errors
54 * Alan Cox : PSH doesn't end a TCP read. Switched a
56 * Alan Cox : Tidied tcp_data to avoid a potential
58 * Alan Cox : Added some better commenting, as the
59 * tcp is hard to follow
60 * Alan Cox : Removed incorrect check for 20 * psh
61 * Michael O'Reilly : ack < copied bug fix.
62 * Johannes Stille : Misc tcp fixes (not all in yet).
63 * Alan Cox : FIN with no memory -> CRASH
64 * Alan Cox : Added socket option proto entries.
65 * Also added awareness of them to accept.
66 * Alan Cox : Added TCP options (SOL_TCP)
67 * Alan Cox : Switched wakeup calls to callbacks,
68 * so the kernel can layer network
70 * Alan Cox : Use ip_tos/ip_ttl settings.
71 * Alan Cox : Handle FIN (more) properly (we hope).
72 * Alan Cox : RST frames sent on unsynchronised
74 * Alan Cox : Put in missing check for SYN bit.
75 * Alan Cox : Added tcp_select_window() aka NET2E
76 * window non shrink trick.
77 * Alan Cox : Added a couple of small NET2E timer
79 * Charles Hedrick : TCP fixes
80 * Toomas Tamm : TCP window fixes
81 * Alan Cox : Small URG fix to rlogin ^C ack fight
82 * Charles Hedrick : Rewrote most of it to actually work
83 * Linus : Rewrote tcp_read() and URG handling
85 * Gerhard Koerting: Fixed some missing timer handling
86 * Matthew Dillon : Reworked TCP machine states as per RFC
87 * Gerhard Koerting: PC/TCP workarounds
88 * Adam Caldwell : Assorted timer/timing errors
89 * Matthew Dillon : Fixed another RST bug
90 * Alan Cox : Move to kernel side addressing changes.
91 * Alan Cox : Beginning work on TCP fastpathing
93 * Arnt Gulbrandsen: Turbocharged tcp_check() routine.
94 * Alan Cox : TCP fast path debugging
95 * Alan Cox : Window clamping
96 * Michael Riepe : Bug in tcp_check()
97 * Matt Dillon : More TCP improvements and RST bug fixes
98 * Matt Dillon : Yet more small nasties remove from the
99 * TCP code (Be very nice to this man if
100 * tcp finally works 100%) 8)
101 * Alan Cox : BSD accept semantics.
102 * Alan Cox : Reset on closedown bug.
103 * Peter De Schrijver : ENOTCONN check missing in tcp_sendto().
104 * Michael Pall : Handle poll() after URG properly in
106 * Michael Pall : Undo the last fix in tcp_read_urg()
107 * (multi URG PUSH broke rlogin).
108 * Michael Pall : Fix the multi URG PUSH problem in
109 * tcp_readable(), poll() after URG
111 * Michael Pall : recv(...,MSG_OOB) never blocks in the
113 * Alan Cox : Changed the semantics of sk->socket to
114 * fix a race and a signal problem with
115 * accept() and async I/O.
116 * Alan Cox : Relaxed the rules on tcp_sendto().
117 * Yury Shevchuk : Really fixed accept() blocking problem.
118 * Craig I. Hagan : Allow for BSD compatible TIME_WAIT for
119 * clients/servers which listen in on
121 * Alan Cox : Cleaned the above up and shrank it to
122 * a sensible code size.
123 * Alan Cox : Self connect lockup fix.
124 * Alan Cox : No connect to multicast.
125 * Ross Biro : Close unaccepted children on master
127 * Alan Cox : Reset tracing code.
128 * Alan Cox : Spurious resets on shutdown.
129 * Alan Cox : Giant 15 minute/60 second timer error
130 * Alan Cox : Small whoops in polling before an
132 * Alan Cox : Kept the state trace facility since
133 * it's handy for debugging.
134 * Alan Cox : More reset handler fixes.
135 * Alan Cox : Started rewriting the code based on
136 * the RFC's for other useful protocol
137 * references see: Comer, KA9Q NOS, and
138 * for a reference on the difference
139 * between specifications and how BSD
140 * works see the 4.4lite source.
141 * A.N.Kuznetsov : Don't time wait on completion of tidy
143 * Linus Torvalds : Fin/Shutdown & copied_seq changes.
144 * Linus Torvalds : Fixed BSD port reuse to work first syn
145 * Alan Cox : Reimplemented timers as per the RFC
146 * and using multiple timers for sanity.
147 * Alan Cox : Small bug fixes, and a lot of new
149 * Alan Cox : Fixed dual reader crash by locking
150 * the buffers (much like datagram.c)
151 * Alan Cox : Fixed stuck sockets in probe. A probe
152 * now gets fed up of retrying without
153 * (even a no space) answer.
154 * Alan Cox : Extracted closing code better
155 * Alan Cox : Fixed the closing state machine to
157 * Alan Cox : More 'per spec' fixes.
158 * Jorge Cwik : Even faster checksumming.
159 * Alan Cox : tcp_data() doesn't ack illegal PSH
160 * only frames. At least one pc tcp stack
162 * Alan Cox : Cache last socket.
163 * Alan Cox : Per route irtt.
164 * Matt Day : poll()->select() match BSD precisely on error
165 * Alan Cox : New buffers
166 * Marc Tamsky : Various sk->prot->retransmits and
167 * sk->retransmits misupdating fixed.
168 * Fixed tcp_write_timeout: stuck close,
169 * and TCP syn retries gets used now.
170 * Mark Yarvis : In tcp_read_wakeup(), don't send an
171 * ack if state is TCP_CLOSED.
172 * Alan Cox : Look up device on a retransmit - routes may
173 * change. Doesn't yet cope with MSS shrink right
175 * Marc Tamsky : Closing in closing fixes.
176 * Mike Shaver : RFC1122 verifications.
177 * Alan Cox : rcv_saddr errors.
178 * Alan Cox : Block double connect().
179 * Alan Cox : Small hooks for enSKIP.
180 * Alexey Kuznetsov: Path MTU discovery.
181 * Alan Cox : Support soft errors.
182 * Alan Cox : Fix MTU discovery pathological case
183 * when the remote claims no mtu!
184 * Marc Tamsky : TCP_CLOSE fix.
185 * Colin (G3TNE) : Send a reset on syn ack replies in
186 * window but wrong (fixes NT lpd problems)
187 * Pedro Roque : Better TCP window handling, delayed ack.
188 * Joerg Reuter : No modification of locked buffers in
189 * tcp_do_retransmit()
190 * Eric Schenk : Changed receiver side silly window
191 * avoidance algorithm to BSD style
192 * algorithm. This doubles throughput
193 * against machines running Solaris,
194 * and seems to result in general
196 * Stefan Magdalinski : adjusted tcp_readable() to fix FIONREAD
197 * Willy Konynenberg : Transparent proxying support.
198 * Mike McLagan : Routing by source
199 * Keith Owens : Do proper merging with partial SKB's in
200 * tcp_do_sendmsg to avoid burstiness.
201 * Eric Schenk : Fix fast close down bug with
202 * shutdown() followed by close().
203 * Andi Kleen : Make poll agree with SIGIO
204 * Salvatore Sanfilippo : Support SO_LINGER with linger == 1 and
205 * lingertime == 0 (RFC 793 ABORT Call)
206 * Hirokazu Takahashi : Use copy_from_user() instead of
207 * csum_and_copy_from_user() if possible.
209 * Description of States:
211 * TCP_SYN_SENT sent a connection request, waiting for ack
213 * TCP_SYN_RECV received a connection request, sent ack,
214 * waiting for final ack in three-way handshake.
216 * TCP_ESTABLISHED connection established
218 * TCP_FIN_WAIT1 our side has shutdown, waiting to complete
219 * transmission of remaining buffered data
221 * TCP_FIN_WAIT2 all buffered data sent, waiting for remote
224 * TCP_CLOSING both sides have shutdown but we still have
225 * data we have to finish sending
227 * TCP_TIME_WAIT timeout to catch resent junk before entering
228 * closed, can only be entered from FIN_WAIT2
229 * or CLOSING. Required because the other end
230 * may not have gotten our last ACK causing it
231 * to retransmit the data packet (which we ignore)
233 * TCP_CLOSE_WAIT remote side has shutdown and is waiting for
234 * us to finish writing our data and to shutdown
235 * (we have to close() to move on to LAST_ACK)
237 * TCP_LAST_ACK out side has shutdown after remote has
238 * shutdown. There may still be data in our
239 * buffer that we have to finish sending
241 * TCP_CLOSE socket is finished
244 #define pr_fmt(fmt) "TCP: " fmt
246 #include <crypto/hash.h>
247 #include <linux/kernel.h>
248 #include <linux/module.h>
249 #include <linux/types.h>
250 #include <linux/fcntl.h>
251 #include <linux/poll.h>
252 #include <linux/inet_diag.h>
253 #include <linux/init.h>
254 #include <linux/fs.h>
255 #include <linux/skbuff.h>
256 #include <linux/scatterlist.h>
257 #include <linux/splice.h>
258 #include <linux/net.h>
259 #include <linux/socket.h>
260 #include <linux/random.h>
261 #include <linux/memblock.h>
262 #include <linux/highmem.h>
263 #include <linux/cache.h>
264 #include <linux/err.h>
265 #include <linux/time.h>
266 #include <linux/slab.h>
267 #include <linux/errqueue.h>
268 #include <linux/static_key.h>
269 #include <linux/btf.h>
271 #include <net/icmp.h>
272 #include <net/inet_common.h>
274 #include <net/mptcp.h>
275 #include <net/xfrm.h>
277 #include <net/sock.h>
279 #include <linux/uaccess.h>
280 #include <asm/ioctls.h>
281 #include <net/busy_poll.h>
283 /* Track pending CMSGs. */
289 DEFINE_PER_CPU(unsigned int, tcp_orphan_count
);
290 EXPORT_PER_CPU_SYMBOL_GPL(tcp_orphan_count
);
292 long sysctl_tcp_mem
[3] __read_mostly
;
293 EXPORT_SYMBOL(sysctl_tcp_mem
);
295 atomic_long_t tcp_memory_allocated ____cacheline_aligned_in_smp
; /* Current allocated memory. */
296 EXPORT_SYMBOL(tcp_memory_allocated
);
297 DEFINE_PER_CPU(int, tcp_memory_per_cpu_fw_alloc
);
298 EXPORT_PER_CPU_SYMBOL_GPL(tcp_memory_per_cpu_fw_alloc
);
300 #if IS_ENABLED(CONFIG_SMC)
301 DEFINE_STATIC_KEY_FALSE(tcp_have_smc
);
302 EXPORT_SYMBOL(tcp_have_smc
);
306 * Current number of TCP sockets.
308 struct percpu_counter tcp_sockets_allocated ____cacheline_aligned_in_smp
;
309 EXPORT_SYMBOL(tcp_sockets_allocated
);
314 struct tcp_splice_state
{
315 struct pipe_inode_info
*pipe
;
321 * Pressure flag: try to collapse.
322 * Technical note: it is used by multiple contexts non atomically.
323 * All the __sk_mem_schedule() is of this nature: accounting
324 * is strict, actions are advisory and have some latency.
326 unsigned long tcp_memory_pressure __read_mostly
;
327 EXPORT_SYMBOL_GPL(tcp_memory_pressure
);
329 void tcp_enter_memory_pressure(struct sock
*sk
)
333 if (READ_ONCE(tcp_memory_pressure
))
339 if (!cmpxchg(&tcp_memory_pressure
, 0, val
))
340 NET_INC_STATS(sock_net(sk
), LINUX_MIB_TCPMEMORYPRESSURES
);
342 EXPORT_SYMBOL_GPL(tcp_enter_memory_pressure
);
344 void tcp_leave_memory_pressure(struct sock
*sk
)
348 if (!READ_ONCE(tcp_memory_pressure
))
350 val
= xchg(&tcp_memory_pressure
, 0);
352 NET_ADD_STATS(sock_net(sk
), LINUX_MIB_TCPMEMORYPRESSURESCHRONO
,
353 jiffies_to_msecs(jiffies
- val
));
355 EXPORT_SYMBOL_GPL(tcp_leave_memory_pressure
);
357 /* Convert seconds to retransmits based on initial and max timeout */
358 static u8
secs_to_retrans(int seconds
, int timeout
, int rto_max
)
363 int period
= timeout
;
366 while (seconds
> period
&& res
< 255) {
369 if (timeout
> rto_max
)
377 /* Convert retransmits to seconds based on initial and max timeout */
378 static int retrans_to_secs(u8 retrans
, int timeout
, int rto_max
)
386 if (timeout
> rto_max
)
394 static u64
tcp_compute_delivery_rate(const struct tcp_sock
*tp
)
396 u32 rate
= READ_ONCE(tp
->rate_delivered
);
397 u32 intv
= READ_ONCE(tp
->rate_interval_us
);
401 rate64
= (u64
)rate
* tp
->mss_cache
* USEC_PER_SEC
;
402 do_div(rate64
, intv
);
407 /* Address-family independent initialization for a tcp_sock.
409 * NOTE: A lot of things set to zero explicitly by call to
410 * sk_alloc() so need not be done here.
412 void tcp_init_sock(struct sock
*sk
)
414 struct inet_connection_sock
*icsk
= inet_csk(sk
);
415 struct tcp_sock
*tp
= tcp_sk(sk
);
417 tp
->out_of_order_queue
= RB_ROOT
;
418 sk
->tcp_rtx_queue
= RB_ROOT
;
419 tcp_init_xmit_timers(sk
);
420 INIT_LIST_HEAD(&tp
->tsq_node
);
421 INIT_LIST_HEAD(&tp
->tsorted_sent_queue
);
423 icsk
->icsk_rto
= TCP_TIMEOUT_INIT
;
424 icsk
->icsk_rto_min
= TCP_RTO_MIN
;
425 icsk
->icsk_delack_max
= TCP_DELACK_MAX
;
426 tp
->mdev_us
= jiffies_to_usecs(TCP_TIMEOUT_INIT
);
427 minmax_reset(&tp
->rtt_min
, tcp_jiffies32
, ~0U);
429 /* So many TCP implementations out there (incorrectly) count the
430 * initial SYN frame in their delayed-ACK and congestion control
431 * algorithms that we must have the following bandaid to talk
432 * efficiently to them. -DaveM
434 tcp_snd_cwnd_set(tp
, TCP_INIT_CWND
);
436 /* There's a bubble in the pipe until at least the first ACK. */
437 tp
->app_limited
= ~0U;
438 tp
->rate_app_limited
= 1;
440 /* See draft-stevens-tcpca-spec-01 for discussion of the
441 * initialization of these values.
443 tp
->snd_ssthresh
= TCP_INFINITE_SSTHRESH
;
444 tp
->snd_cwnd_clamp
= ~0;
445 tp
->mss_cache
= TCP_MSS_DEFAULT
;
447 tp
->reordering
= READ_ONCE(sock_net(sk
)->ipv4
.sysctl_tcp_reordering
);
448 tcp_assign_congestion_control(sk
);
451 tp
->rack
.reo_wnd_steps
= 1;
453 sk
->sk_write_space
= sk_stream_write_space
;
454 sock_set_flag(sk
, SOCK_USE_WRITE_QUEUE
);
456 icsk
->icsk_sync_mss
= tcp_sync_mss
;
458 WRITE_ONCE(sk
->sk_sndbuf
, READ_ONCE(sock_net(sk
)->ipv4
.sysctl_tcp_wmem
[1]));
459 WRITE_ONCE(sk
->sk_rcvbuf
, READ_ONCE(sock_net(sk
)->ipv4
.sysctl_tcp_rmem
[1]));
460 tcp_scaling_ratio_init(sk
);
462 set_bit(SOCK_SUPPORT_ZC
, &sk
->sk_socket
->flags
);
463 sk_sockets_allocated_inc(sk
);
465 EXPORT_SYMBOL(tcp_init_sock
);
467 static void tcp_tx_timestamp(struct sock
*sk
, u16 tsflags
)
469 struct sk_buff
*skb
= tcp_write_queue_tail(sk
);
471 if (tsflags
&& skb
) {
472 struct skb_shared_info
*shinfo
= skb_shinfo(skb
);
473 struct tcp_skb_cb
*tcb
= TCP_SKB_CB(skb
);
475 sock_tx_timestamp(sk
, tsflags
, &shinfo
->tx_flags
);
476 if (tsflags
& SOF_TIMESTAMPING_TX_ACK
)
477 tcb
->txstamp_ack
= 1;
478 if (tsflags
& SOF_TIMESTAMPING_TX_RECORD_MASK
)
479 shinfo
->tskey
= TCP_SKB_CB(skb
)->seq
+ skb
->len
- 1;
483 static bool tcp_stream_is_readable(struct sock
*sk
, int target
)
485 if (tcp_epollin_ready(sk
, target
))
487 return sk_is_readable(sk
);
491 * Wait for a TCP event.
493 * Note that we don't need to lock the socket, as the upper poll layers
494 * take care of normal races (between the test and the event) and we don't
495 * go look at any of the socket buffers directly.
497 __poll_t
tcp_poll(struct file
*file
, struct socket
*sock
, poll_table
*wait
)
500 struct sock
*sk
= sock
->sk
;
501 const struct tcp_sock
*tp
= tcp_sk(sk
);
505 sock_poll_wait(file
, sock
, wait
);
507 state
= inet_sk_state_load(sk
);
508 if (state
== TCP_LISTEN
)
509 return inet_csk_listen_poll(sk
);
511 /* Socket is not locked. We are protected from async events
512 * by poll logic and correct handling of state changes
513 * made by other threads is impossible in any case.
519 * EPOLLHUP is certainly not done right. But poll() doesn't
520 * have a notion of HUP in just one direction, and for a
521 * socket the read side is more interesting.
523 * Some poll() documentation says that EPOLLHUP is incompatible
524 * with the EPOLLOUT/POLLWR flags, so somebody should check this
525 * all. But careful, it tends to be safer to return too many
526 * bits than too few, and you can easily break real applications
527 * if you don't tell them that something has hung up!
531 * Check number 1. EPOLLHUP is _UNMASKABLE_ event (see UNIX98 and
532 * our fs/select.c). It means that after we received EOF,
533 * poll always returns immediately, making impossible poll() on write()
534 * in state CLOSE_WAIT. One solution is evident --- to set EPOLLHUP
535 * if and only if shutdown has been made in both directions.
536 * Actually, it is interesting to look how Solaris and DUX
537 * solve this dilemma. I would prefer, if EPOLLHUP were maskable,
538 * then we could set it on SND_SHUTDOWN. BTW examples given
539 * in Stevens' books assume exactly this behaviour, it explains
540 * why EPOLLHUP is incompatible with EPOLLOUT. --ANK
542 * NOTE. Check for TCP_CLOSE is added. The goal is to prevent
543 * blocking on fresh not-connected or disconnected socket. --ANK
545 shutdown
= READ_ONCE(sk
->sk_shutdown
);
546 if (shutdown
== SHUTDOWN_MASK
|| state
== TCP_CLOSE
)
548 if (shutdown
& RCV_SHUTDOWN
)
549 mask
|= EPOLLIN
| EPOLLRDNORM
| EPOLLRDHUP
;
551 /* Connected or passive Fast Open socket? */
552 if (state
!= TCP_SYN_SENT
&&
553 (state
!= TCP_SYN_RECV
|| rcu_access_pointer(tp
->fastopen_rsk
))) {
554 int target
= sock_rcvlowat(sk
, 0, INT_MAX
);
555 u16 urg_data
= READ_ONCE(tp
->urg_data
);
557 if (unlikely(urg_data
) &&
558 READ_ONCE(tp
->urg_seq
) == READ_ONCE(tp
->copied_seq
) &&
559 !sock_flag(sk
, SOCK_URGINLINE
))
562 if (tcp_stream_is_readable(sk
, target
))
563 mask
|= EPOLLIN
| EPOLLRDNORM
;
565 if (!(shutdown
& SEND_SHUTDOWN
)) {
566 if (__sk_stream_is_writeable(sk
, 1)) {
567 mask
|= EPOLLOUT
| EPOLLWRNORM
;
568 } else { /* send SIGIO later */
569 sk_set_bit(SOCKWQ_ASYNC_NOSPACE
, sk
);
570 set_bit(SOCK_NOSPACE
, &sk
->sk_socket
->flags
);
572 /* Race breaker. If space is freed after
573 * wspace test but before the flags are set,
574 * IO signal will be lost. Memory barrier
575 * pairs with the input side.
577 smp_mb__after_atomic();
578 if (__sk_stream_is_writeable(sk
, 1))
579 mask
|= EPOLLOUT
| EPOLLWRNORM
;
582 mask
|= EPOLLOUT
| EPOLLWRNORM
;
584 if (urg_data
& TCP_URG_VALID
)
586 } else if (state
== TCP_SYN_SENT
&&
587 inet_test_bit(DEFER_CONNECT
, sk
)) {
588 /* Active TCP fastopen socket with defer_connect
589 * Return EPOLLOUT so application can call write()
590 * in order for kernel to generate SYN+data
592 mask
|= EPOLLOUT
| EPOLLWRNORM
;
594 /* This barrier is coupled with smp_wmb() in tcp_reset() */
596 if (READ_ONCE(sk
->sk_err
) ||
597 !skb_queue_empty_lockless(&sk
->sk_error_queue
))
602 EXPORT_SYMBOL(tcp_poll
);
604 int tcp_ioctl(struct sock
*sk
, int cmd
, int *karg
)
606 struct tcp_sock
*tp
= tcp_sk(sk
);
612 if (sk
->sk_state
== TCP_LISTEN
)
615 slow
= lock_sock_fast(sk
);
617 unlock_sock_fast(sk
, slow
);
620 answ
= READ_ONCE(tp
->urg_data
) &&
621 READ_ONCE(tp
->urg_seq
) == READ_ONCE(tp
->copied_seq
);
624 if (sk
->sk_state
== TCP_LISTEN
)
627 if ((1 << sk
->sk_state
) & (TCPF_SYN_SENT
| TCPF_SYN_RECV
))
630 answ
= READ_ONCE(tp
->write_seq
) - tp
->snd_una
;
633 if (sk
->sk_state
== TCP_LISTEN
)
636 if ((1 << sk
->sk_state
) & (TCPF_SYN_SENT
| TCPF_SYN_RECV
))
639 answ
= READ_ONCE(tp
->write_seq
) -
640 READ_ONCE(tp
->snd_nxt
);
649 EXPORT_SYMBOL(tcp_ioctl
);
651 void tcp_mark_push(struct tcp_sock
*tp
, struct sk_buff
*skb
)
653 TCP_SKB_CB(skb
)->tcp_flags
|= TCPHDR_PSH
;
654 tp
->pushed_seq
= tp
->write_seq
;
657 static inline bool forced_push(const struct tcp_sock
*tp
)
659 return after(tp
->write_seq
, tp
->pushed_seq
+ (tp
->max_window
>> 1));
662 void tcp_skb_entail(struct sock
*sk
, struct sk_buff
*skb
)
664 struct tcp_sock
*tp
= tcp_sk(sk
);
665 struct tcp_skb_cb
*tcb
= TCP_SKB_CB(skb
);
667 tcb
->seq
= tcb
->end_seq
= tp
->write_seq
;
668 tcb
->tcp_flags
= TCPHDR_ACK
;
669 __skb_header_release(skb
);
670 tcp_add_write_queue_tail(sk
, skb
);
671 sk_wmem_queued_add(sk
, skb
->truesize
);
672 sk_mem_charge(sk
, skb
->truesize
);
673 if (tp
->nonagle
& TCP_NAGLE_PUSH
)
674 tp
->nonagle
&= ~TCP_NAGLE_PUSH
;
676 tcp_slow_start_after_idle_check(sk
);
679 static inline void tcp_mark_urg(struct tcp_sock
*tp
, int flags
)
682 tp
->snd_up
= tp
->write_seq
;
685 /* If a not yet filled skb is pushed, do not send it if
686 * we have data packets in Qdisc or NIC queues :
687 * Because TX completion will happen shortly, it gives a chance
688 * to coalesce future sendmsg() payload into this skb, without
689 * need for a timer, and with no latency trade off.
690 * As packets containing data payload have a bigger truesize
691 * than pure acks (dataless) packets, the last checks prevent
692 * autocorking if we only have an ACK in Qdisc/NIC queues,
693 * or if TX completion was delayed after we processed ACK packet.
695 static bool tcp_should_autocork(struct sock
*sk
, struct sk_buff
*skb
,
698 return skb
->len
< size_goal
&&
699 READ_ONCE(sock_net(sk
)->ipv4
.sysctl_tcp_autocorking
) &&
700 !tcp_rtx_queue_empty(sk
) &&
701 refcount_read(&sk
->sk_wmem_alloc
) > skb
->truesize
&&
702 tcp_skb_can_collapse_to(skb
);
705 void tcp_push(struct sock
*sk
, int flags
, int mss_now
,
706 int nonagle
, int size_goal
)
708 struct tcp_sock
*tp
= tcp_sk(sk
);
711 skb
= tcp_write_queue_tail(sk
);
714 if (!(flags
& MSG_MORE
) || forced_push(tp
))
715 tcp_mark_push(tp
, skb
);
717 tcp_mark_urg(tp
, flags
);
719 if (tcp_should_autocork(sk
, skb
, size_goal
)) {
721 /* avoid atomic op if TSQ_THROTTLED bit is already set */
722 if (!test_bit(TSQ_THROTTLED
, &sk
->sk_tsq_flags
)) {
723 NET_INC_STATS(sock_net(sk
), LINUX_MIB_TCPAUTOCORKING
);
724 set_bit(TSQ_THROTTLED
, &sk
->sk_tsq_flags
);
726 /* It is possible TX completion already happened
727 * before we set TSQ_THROTTLED.
729 if (refcount_read(&sk
->sk_wmem_alloc
) > skb
->truesize
)
733 if (flags
& MSG_MORE
)
734 nonagle
= TCP_NAGLE_CORK
;
736 __tcp_push_pending_frames(sk
, mss_now
, nonagle
);
739 static int tcp_splice_data_recv(read_descriptor_t
*rd_desc
, struct sk_buff
*skb
,
740 unsigned int offset
, size_t len
)
742 struct tcp_splice_state
*tss
= rd_desc
->arg
.data
;
745 ret
= skb_splice_bits(skb
, skb
->sk
, offset
, tss
->pipe
,
746 min(rd_desc
->count
, len
), tss
->flags
);
748 rd_desc
->count
-= ret
;
752 static int __tcp_splice_read(struct sock
*sk
, struct tcp_splice_state
*tss
)
754 /* Store TCP splice context information in read_descriptor_t. */
755 read_descriptor_t rd_desc
= {
760 return tcp_read_sock(sk
, &rd_desc
, tcp_splice_data_recv
);
764 * tcp_splice_read - splice data from TCP socket to a pipe
765 * @sock: socket to splice from
766 * @ppos: position (not valid)
767 * @pipe: pipe to splice to
768 * @len: number of bytes to splice
769 * @flags: splice modifier flags
772 * Will read pages from given socket and fill them into a pipe.
775 ssize_t
tcp_splice_read(struct socket
*sock
, loff_t
*ppos
,
776 struct pipe_inode_info
*pipe
, size_t len
,
779 struct sock
*sk
= sock
->sk
;
780 struct tcp_splice_state tss
= {
789 sock_rps_record_flow(sk
);
791 * We can't seek on a socket input
800 timeo
= sock_rcvtimeo(sk
, sock
->file
->f_flags
& O_NONBLOCK
);
802 ret
= __tcp_splice_read(sk
, &tss
);
808 if (sock_flag(sk
, SOCK_DONE
))
811 ret
= sock_error(sk
);
814 if (sk
->sk_shutdown
& RCV_SHUTDOWN
)
816 if (sk
->sk_state
== TCP_CLOSE
) {
818 * This occurs when user tries to read
819 * from never connected socket.
828 /* if __tcp_splice_read() got nothing while we have
829 * an skb in receive queue, we do not want to loop.
830 * This might happen with URG data.
832 if (!skb_queue_empty(&sk
->sk_receive_queue
))
834 sk_wait_data(sk
, &timeo
, NULL
);
835 if (signal_pending(current
)) {
836 ret
= sock_intr_errno(timeo
);
844 if (!tss
.len
|| !timeo
)
849 if (sk
->sk_err
|| sk
->sk_state
== TCP_CLOSE
||
850 (sk
->sk_shutdown
& RCV_SHUTDOWN
) ||
851 signal_pending(current
))
862 EXPORT_SYMBOL(tcp_splice_read
);
864 struct sk_buff
*tcp_stream_alloc_skb(struct sock
*sk
, gfp_t gfp
,
869 skb
= alloc_skb_fclone(MAX_TCP_HEADER
, gfp
);
873 skb
->truesize
= SKB_TRUESIZE(skb_end_offset(skb
));
874 if (force_schedule
) {
875 mem_scheduled
= true;
876 sk_forced_mem_schedule(sk
, skb
->truesize
);
878 mem_scheduled
= sk_wmem_schedule(sk
, skb
->truesize
);
880 if (likely(mem_scheduled
)) {
881 skb_reserve(skb
, MAX_TCP_HEADER
);
882 skb
->ip_summed
= CHECKSUM_PARTIAL
;
883 INIT_LIST_HEAD(&skb
->tcp_tsorted_anchor
);
888 sk
->sk_prot
->enter_memory_pressure(sk
);
889 sk_stream_moderate_sndbuf(sk
);
894 static unsigned int tcp_xmit_size_goal(struct sock
*sk
, u32 mss_now
,
897 struct tcp_sock
*tp
= tcp_sk(sk
);
898 u32 new_size_goal
, size_goal
;
903 /* Note : tcp_tso_autosize() will eventually split this later */
904 new_size_goal
= tcp_bound_to_half_wnd(tp
, sk
->sk_gso_max_size
);
906 /* We try hard to avoid divides here */
907 size_goal
= tp
->gso_segs
* mss_now
;
908 if (unlikely(new_size_goal
< size_goal
||
909 new_size_goal
>= size_goal
+ mss_now
)) {
910 tp
->gso_segs
= min_t(u16
, new_size_goal
/ mss_now
,
911 sk
->sk_gso_max_segs
);
912 size_goal
= tp
->gso_segs
* mss_now
;
915 return max(size_goal
, mss_now
);
918 int tcp_send_mss(struct sock
*sk
, int *size_goal
, int flags
)
922 mss_now
= tcp_current_mss(sk
);
923 *size_goal
= tcp_xmit_size_goal(sk
, mss_now
, !(flags
& MSG_OOB
));
928 /* In some cases, both sendmsg() could have added an skb to the write queue,
929 * but failed adding payload on it. We need to remove it to consume less
930 * memory, but more importantly be able to generate EPOLLOUT for Edge Trigger
933 void tcp_remove_empty_skb(struct sock
*sk
)
935 struct sk_buff
*skb
= tcp_write_queue_tail(sk
);
937 if (skb
&& TCP_SKB_CB(skb
)->seq
== TCP_SKB_CB(skb
)->end_seq
) {
938 tcp_unlink_write_queue(skb
, sk
);
939 if (tcp_write_queue_empty(sk
))
940 tcp_chrono_stop(sk
, TCP_CHRONO_BUSY
);
941 tcp_wmem_free_skb(sk
, skb
);
945 /* skb changing from pure zc to mixed, must charge zc */
946 static int tcp_downgrade_zcopy_pure(struct sock
*sk
, struct sk_buff
*skb
)
948 if (unlikely(skb_zcopy_pure(skb
))) {
949 u32 extra
= skb
->truesize
-
950 SKB_TRUESIZE(skb_end_offset(skb
));
952 if (!sk_wmem_schedule(sk
, extra
))
955 sk_mem_charge(sk
, extra
);
956 skb_shinfo(skb
)->flags
&= ~SKBFL_PURE_ZEROCOPY
;
962 int tcp_wmem_schedule(struct sock
*sk
, int copy
)
966 if (likely(sk_wmem_schedule(sk
, copy
)))
969 /* We could be in trouble if we have nothing queued.
970 * Use whatever is left in sk->sk_forward_alloc and tcp_wmem[0]
971 * to guarantee some progress.
973 left
= sock_net(sk
)->ipv4
.sysctl_tcp_wmem
[0] - sk
->sk_wmem_queued
;
975 sk_forced_mem_schedule(sk
, min(left
, copy
));
976 return min(copy
, sk
->sk_forward_alloc
);
979 void tcp_free_fastopen_req(struct tcp_sock
*tp
)
981 if (tp
->fastopen_req
) {
982 kfree(tp
->fastopen_req
);
983 tp
->fastopen_req
= NULL
;
987 int tcp_sendmsg_fastopen(struct sock
*sk
, struct msghdr
*msg
, int *copied
,
988 size_t size
, struct ubuf_info
*uarg
)
990 struct tcp_sock
*tp
= tcp_sk(sk
);
991 struct inet_sock
*inet
= inet_sk(sk
);
992 struct sockaddr
*uaddr
= msg
->msg_name
;
995 if (!(READ_ONCE(sock_net(sk
)->ipv4
.sysctl_tcp_fastopen
) &
996 TFO_CLIENT_ENABLE
) ||
997 (uaddr
&& msg
->msg_namelen
>= sizeof(uaddr
->sa_family
) &&
998 uaddr
->sa_family
== AF_UNSPEC
))
1000 if (tp
->fastopen_req
)
1001 return -EALREADY
; /* Another Fast Open is in progress */
1003 tp
->fastopen_req
= kzalloc(sizeof(struct tcp_fastopen_request
),
1005 if (unlikely(!tp
->fastopen_req
))
1007 tp
->fastopen_req
->data
= msg
;
1008 tp
->fastopen_req
->size
= size
;
1009 tp
->fastopen_req
->uarg
= uarg
;
1011 if (inet_test_bit(DEFER_CONNECT
, sk
)) {
1012 err
= tcp_connect(sk
);
1013 /* Same failure procedure as in tcp_v4/6_connect */
1015 tcp_set_state(sk
, TCP_CLOSE
);
1016 inet
->inet_dport
= 0;
1017 sk
->sk_route_caps
= 0;
1020 flags
= (msg
->msg_flags
& MSG_DONTWAIT
) ? O_NONBLOCK
: 0;
1021 err
= __inet_stream_connect(sk
->sk_socket
, uaddr
,
1022 msg
->msg_namelen
, flags
, 1);
1023 /* fastopen_req could already be freed in __inet_stream_connect
1024 * if the connection times out or gets rst
1026 if (tp
->fastopen_req
) {
1027 *copied
= tp
->fastopen_req
->copied
;
1028 tcp_free_fastopen_req(tp
);
1029 inet_clear_bit(DEFER_CONNECT
, sk
);
1034 int tcp_sendmsg_locked(struct sock
*sk
, struct msghdr
*msg
, size_t size
)
1036 struct tcp_sock
*tp
= tcp_sk(sk
);
1037 struct ubuf_info
*uarg
= NULL
;
1038 struct sk_buff
*skb
;
1039 struct sockcm_cookie sockc
;
1040 int flags
, err
, copied
= 0;
1041 int mss_now
= 0, size_goal
, copied_syn
= 0;
1042 int process_backlog
= 0;
1046 flags
= msg
->msg_flags
;
1048 if ((flags
& MSG_ZEROCOPY
) && size
) {
1049 if (msg
->msg_ubuf
) {
1050 uarg
= msg
->msg_ubuf
;
1051 if (sk
->sk_route_caps
& NETIF_F_SG
)
1053 } else if (sock_flag(sk
, SOCK_ZEROCOPY
)) {
1054 skb
= tcp_write_queue_tail(sk
);
1055 uarg
= msg_zerocopy_realloc(sk
, size
, skb_zcopy(skb
));
1060 if (sk
->sk_route_caps
& NETIF_F_SG
)
1063 uarg_to_msgzc(uarg
)->zerocopy
= 0;
1065 } else if (unlikely(msg
->msg_flags
& MSG_SPLICE_PAGES
) && size
) {
1066 if (sk
->sk_route_caps
& NETIF_F_SG
)
1067 zc
= MSG_SPLICE_PAGES
;
1070 if (unlikely(flags
& MSG_FASTOPEN
||
1071 inet_test_bit(DEFER_CONNECT
, sk
)) &&
1073 err
= tcp_sendmsg_fastopen(sk
, msg
, &copied_syn
, size
, uarg
);
1074 if (err
== -EINPROGRESS
&& copied_syn
> 0)
1080 timeo
= sock_sndtimeo(sk
, flags
& MSG_DONTWAIT
);
1082 tcp_rate_check_app_limited(sk
); /* is sending application-limited? */
1084 /* Wait for a connection to finish. One exception is TCP Fast Open
1085 * (passive side) where data is allowed to be sent before a connection
1086 * is fully established.
1088 if (((1 << sk
->sk_state
) & ~(TCPF_ESTABLISHED
| TCPF_CLOSE_WAIT
)) &&
1089 !tcp_passive_fastopen(sk
)) {
1090 err
= sk_stream_wait_connect(sk
, &timeo
);
1095 if (unlikely(tp
->repair
)) {
1096 if (tp
->repair_queue
== TCP_RECV_QUEUE
) {
1097 copied
= tcp_send_rcvq(sk
, msg
, size
);
1102 if (tp
->repair_queue
== TCP_NO_QUEUE
)
1105 /* 'common' sending to sendq */
1108 sockcm_init(&sockc
, sk
);
1109 if (msg
->msg_controllen
) {
1110 err
= sock_cmsg_send(sk
, msg
, &sockc
);
1111 if (unlikely(err
)) {
1117 /* This should be in poll */
1118 sk_clear_bit(SOCKWQ_ASYNC_NOSPACE
, sk
);
1120 /* Ok commence sending. */
1124 mss_now
= tcp_send_mss(sk
, &size_goal
, flags
);
1127 if (sk
->sk_err
|| (sk
->sk_shutdown
& SEND_SHUTDOWN
))
1130 while (msg_data_left(msg
)) {
1133 skb
= tcp_write_queue_tail(sk
);
1135 copy
= size_goal
- skb
->len
;
1137 if (copy
<= 0 || !tcp_skb_can_collapse_to(skb
)) {
1141 if (!sk_stream_memory_free(sk
))
1142 goto wait_for_space
;
1144 if (unlikely(process_backlog
>= 16)) {
1145 process_backlog
= 0;
1146 if (sk_flush_backlog(sk
))
1149 first_skb
= tcp_rtx_and_write_queues_empty(sk
);
1150 skb
= tcp_stream_alloc_skb(sk
, sk
->sk_allocation
,
1153 goto wait_for_space
;
1157 tcp_skb_entail(sk
, skb
);
1160 /* All packets are restored as if they have
1161 * already been sent. skb_mstamp_ns isn't set to
1162 * avoid wrong rtt estimation.
1165 TCP_SKB_CB(skb
)->sacked
|= TCPCB_REPAIRED
;
1168 /* Try to append data to the end of skb. */
1169 if (copy
> msg_data_left(msg
))
1170 copy
= msg_data_left(msg
);
1174 int i
= skb_shinfo(skb
)->nr_frags
;
1175 struct page_frag
*pfrag
= sk_page_frag(sk
);
1177 if (!sk_page_frag_refill(sk
, pfrag
))
1178 goto wait_for_space
;
1180 if (!skb_can_coalesce(skb
, i
, pfrag
->page
,
1182 if (i
>= READ_ONCE(sysctl_max_skb_frags
)) {
1183 tcp_mark_push(tp
, skb
);
1189 copy
= min_t(int, copy
, pfrag
->size
- pfrag
->offset
);
1191 if (unlikely(skb_zcopy_pure(skb
) || skb_zcopy_managed(skb
))) {
1192 if (tcp_downgrade_zcopy_pure(sk
, skb
))
1193 goto wait_for_space
;
1194 skb_zcopy_downgrade_managed(skb
);
1197 copy
= tcp_wmem_schedule(sk
, copy
);
1199 goto wait_for_space
;
1201 err
= skb_copy_to_page_nocache(sk
, &msg
->msg_iter
, skb
,
1208 /* Update the skb. */
1210 skb_frag_size_add(&skb_shinfo(skb
)->frags
[i
- 1], copy
);
1212 skb_fill_page_desc(skb
, i
, pfrag
->page
,
1213 pfrag
->offset
, copy
);
1214 page_ref_inc(pfrag
->page
);
1216 pfrag
->offset
+= copy
;
1217 } else if (zc
== MSG_ZEROCOPY
) {
1218 /* First append to a fragless skb builds initial
1222 skb_shinfo(skb
)->flags
|= SKBFL_PURE_ZEROCOPY
;
1224 if (!skb_zcopy_pure(skb
)) {
1225 copy
= tcp_wmem_schedule(sk
, copy
);
1227 goto wait_for_space
;
1230 err
= skb_zerocopy_iter_stream(sk
, skb
, msg
, copy
, uarg
);
1231 if (err
== -EMSGSIZE
|| err
== -EEXIST
) {
1232 tcp_mark_push(tp
, skb
);
1238 } else if (zc
== MSG_SPLICE_PAGES
) {
1239 /* Splice in data if we can; copy if we can't. */
1240 if (tcp_downgrade_zcopy_pure(sk
, skb
))
1241 goto wait_for_space
;
1242 copy
= tcp_wmem_schedule(sk
, copy
);
1244 goto wait_for_space
;
1246 err
= skb_splice_from_iter(skb
, &msg
->msg_iter
, copy
,
1249 if (err
== -EMSGSIZE
) {
1250 tcp_mark_push(tp
, skb
);
1257 if (!(flags
& MSG_NO_SHARED_FRAGS
))
1258 skb_shinfo(skb
)->flags
|= SKBFL_SHARED_FRAG
;
1260 sk_wmem_queued_add(sk
, copy
);
1261 sk_mem_charge(sk
, copy
);
1265 TCP_SKB_CB(skb
)->tcp_flags
&= ~TCPHDR_PSH
;
1267 WRITE_ONCE(tp
->write_seq
, tp
->write_seq
+ copy
);
1268 TCP_SKB_CB(skb
)->end_seq
+= copy
;
1269 tcp_skb_pcount_set(skb
, 0);
1272 if (!msg_data_left(msg
)) {
1273 if (unlikely(flags
& MSG_EOR
))
1274 TCP_SKB_CB(skb
)->eor
= 1;
1278 if (skb
->len
< size_goal
|| (flags
& MSG_OOB
) || unlikely(tp
->repair
))
1281 if (forced_push(tp
)) {
1282 tcp_mark_push(tp
, skb
);
1283 __tcp_push_pending_frames(sk
, mss_now
, TCP_NAGLE_PUSH
);
1284 } else if (skb
== tcp_send_head(sk
))
1285 tcp_push_one(sk
, mss_now
);
1289 set_bit(SOCK_NOSPACE
, &sk
->sk_socket
->flags
);
1291 tcp_push(sk
, flags
& ~MSG_MORE
, mss_now
,
1292 TCP_NAGLE_PUSH
, size_goal
);
1294 err
= sk_stream_wait_memory(sk
, &timeo
);
1298 mss_now
= tcp_send_mss(sk
, &size_goal
, flags
);
1303 tcp_tx_timestamp(sk
, sockc
.tsflags
);
1304 tcp_push(sk
, flags
, mss_now
, tp
->nonagle
, size_goal
);
1307 /* msg->msg_ubuf is pinned by the caller so we don't take extra refs */
1308 if (uarg
&& !msg
->msg_ubuf
)
1309 net_zcopy_put(uarg
);
1310 return copied
+ copied_syn
;
1313 tcp_remove_empty_skb(sk
);
1315 if (copied
+ copied_syn
)
1318 /* msg->msg_ubuf is pinned by the caller so we don't take extra refs */
1319 if (uarg
&& !msg
->msg_ubuf
)
1320 net_zcopy_put_abort(uarg
, true);
1321 err
= sk_stream_error(sk
, flags
, err
);
1322 /* make sure we wake any epoll edge trigger waiter */
1323 if (unlikely(tcp_rtx_and_write_queues_empty(sk
) && err
== -EAGAIN
)) {
1324 sk
->sk_write_space(sk
);
1325 tcp_chrono_stop(sk
, TCP_CHRONO_SNDBUF_LIMITED
);
1329 EXPORT_SYMBOL_GPL(tcp_sendmsg_locked
);
1331 int tcp_sendmsg(struct sock
*sk
, struct msghdr
*msg
, size_t size
)
1336 ret
= tcp_sendmsg_locked(sk
, msg
, size
);
1341 EXPORT_SYMBOL(tcp_sendmsg
);
1343 void tcp_splice_eof(struct socket
*sock
)
1345 struct sock
*sk
= sock
->sk
;
1346 struct tcp_sock
*tp
= tcp_sk(sk
);
1347 int mss_now
, size_goal
;
1349 if (!tcp_write_queue_tail(sk
))
1353 mss_now
= tcp_send_mss(sk
, &size_goal
, 0);
1354 tcp_push(sk
, 0, mss_now
, tp
->nonagle
, size_goal
);
1357 EXPORT_SYMBOL_GPL(tcp_splice_eof
);
1360 * Handle reading urgent data. BSD has very simple semantics for
1361 * this, no blocking and very strange errors 8)
1364 static int tcp_recv_urg(struct sock
*sk
, struct msghdr
*msg
, int len
, int flags
)
1366 struct tcp_sock
*tp
= tcp_sk(sk
);
1368 /* No URG data to read. */
1369 if (sock_flag(sk
, SOCK_URGINLINE
) || !tp
->urg_data
||
1370 tp
->urg_data
== TCP_URG_READ
)
1371 return -EINVAL
; /* Yes this is right ! */
1373 if (sk
->sk_state
== TCP_CLOSE
&& !sock_flag(sk
, SOCK_DONE
))
1376 if (tp
->urg_data
& TCP_URG_VALID
) {
1378 char c
= tp
->urg_data
;
1380 if (!(flags
& MSG_PEEK
))
1381 WRITE_ONCE(tp
->urg_data
, TCP_URG_READ
);
1383 /* Read urgent data. */
1384 msg
->msg_flags
|= MSG_OOB
;
1387 if (!(flags
& MSG_TRUNC
))
1388 err
= memcpy_to_msg(msg
, &c
, 1);
1391 msg
->msg_flags
|= MSG_TRUNC
;
1393 return err
? -EFAULT
: len
;
1396 if (sk
->sk_state
== TCP_CLOSE
|| (sk
->sk_shutdown
& RCV_SHUTDOWN
))
1399 /* Fixed the recv(..., MSG_OOB) behaviour. BSD docs and
1400 * the available implementations agree in this case:
1401 * this call should never block, independent of the
1402 * blocking state of the socket.
1403 * Mike <pall@rz.uni-karlsruhe.de>
1408 static int tcp_peek_sndq(struct sock
*sk
, struct msghdr
*msg
, int len
)
1410 struct sk_buff
*skb
;
1411 int copied
= 0, err
= 0;
1413 /* XXX -- need to support SO_PEEK_OFF */
1415 skb_rbtree_walk(skb
, &sk
->tcp_rtx_queue
) {
1416 err
= skb_copy_datagram_msg(skb
, 0, msg
, skb
->len
);
1422 skb_queue_walk(&sk
->sk_write_queue
, skb
) {
1423 err
= skb_copy_datagram_msg(skb
, 0, msg
, skb
->len
);
1430 return err
?: copied
;
1433 /* Clean up the receive buffer for full frames taken by the user,
1434 * then send an ACK if necessary. COPIED is the number of bytes
1435 * tcp_recvmsg has given to the user so far, it speeds up the
1436 * calculation of whether or not we must ACK for the sake of
1439 void __tcp_cleanup_rbuf(struct sock
*sk
, int copied
)
1441 struct tcp_sock
*tp
= tcp_sk(sk
);
1442 bool time_to_ack
= false;
1444 if (inet_csk_ack_scheduled(sk
)) {
1445 const struct inet_connection_sock
*icsk
= inet_csk(sk
);
1447 if (/* Once-per-two-segments ACK was not sent by tcp_input.c */
1448 tp
->rcv_nxt
- tp
->rcv_wup
> icsk
->icsk_ack
.rcv_mss
||
1450 * If this read emptied read buffer, we send ACK, if
1451 * connection is not bidirectional, user drained
1452 * receive buffer and there was a small segment
1456 ((icsk
->icsk_ack
.pending
& ICSK_ACK_PUSHED2
) ||
1457 ((icsk
->icsk_ack
.pending
& ICSK_ACK_PUSHED
) &&
1458 !inet_csk_in_pingpong_mode(sk
))) &&
1459 !atomic_read(&sk
->sk_rmem_alloc
)))
1463 /* We send an ACK if we can now advertise a non-zero window
1464 * which has been raised "significantly".
1466 * Even if window raised up to infinity, do not send window open ACK
1467 * in states, where we will not receive more. It is useless.
1469 if (copied
> 0 && !time_to_ack
&& !(sk
->sk_shutdown
& RCV_SHUTDOWN
)) {
1470 __u32 rcv_window_now
= tcp_receive_window(tp
);
1472 /* Optimize, __tcp_select_window() is not cheap. */
1473 if (2*rcv_window_now
<= tp
->window_clamp
) {
1474 __u32 new_window
= __tcp_select_window(sk
);
1476 /* Send ACK now, if this read freed lots of space
1477 * in our buffer. Certainly, new_window is new window.
1478 * We can advertise it now, if it is not less than current one.
1479 * "Lots" means "at least twice" here.
1481 if (new_window
&& new_window
>= 2 * rcv_window_now
)
1489 void tcp_cleanup_rbuf(struct sock
*sk
, int copied
)
1491 struct sk_buff
*skb
= skb_peek(&sk
->sk_receive_queue
);
1492 struct tcp_sock
*tp
= tcp_sk(sk
);
1494 WARN(skb
&& !before(tp
->copied_seq
, TCP_SKB_CB(skb
)->end_seq
),
1495 "cleanup rbuf bug: copied %X seq %X rcvnxt %X\n",
1496 tp
->copied_seq
, TCP_SKB_CB(skb
)->end_seq
, tp
->rcv_nxt
);
1497 __tcp_cleanup_rbuf(sk
, copied
);
1500 static void tcp_eat_recv_skb(struct sock
*sk
, struct sk_buff
*skb
)
1502 __skb_unlink(skb
, &sk
->sk_receive_queue
);
1503 if (likely(skb
->destructor
== sock_rfree
)) {
1505 skb
->destructor
= NULL
;
1507 return skb_attempt_defer_free(skb
);
1512 struct sk_buff
*tcp_recv_skb(struct sock
*sk
, u32 seq
, u32
*off
)
1514 struct sk_buff
*skb
;
1517 while ((skb
= skb_peek(&sk
->sk_receive_queue
)) != NULL
) {
1518 offset
= seq
- TCP_SKB_CB(skb
)->seq
;
1519 if (unlikely(TCP_SKB_CB(skb
)->tcp_flags
& TCPHDR_SYN
)) {
1520 pr_err_once("%s: found a SYN, please report !\n", __func__
);
1523 if (offset
< skb
->len
|| (TCP_SKB_CB(skb
)->tcp_flags
& TCPHDR_FIN
)) {
1527 /* This looks weird, but this can happen if TCP collapsing
1528 * splitted a fat GRO packet, while we released socket lock
1529 * in skb_splice_bits()
1531 tcp_eat_recv_skb(sk
, skb
);
1535 EXPORT_SYMBOL(tcp_recv_skb
);
1538 * This routine provides an alternative to tcp_recvmsg() for routines
1539 * that would like to handle copying from skbuffs directly in 'sendfile'
1542 * - It is assumed that the socket was locked by the caller.
1543 * - The routine does not block.
1544 * - At present, there is no support for reading OOB data
1545 * or for 'peeking' the socket using this routine
1546 * (although both would be easy to implement).
1548 int tcp_read_sock(struct sock
*sk
, read_descriptor_t
*desc
,
1549 sk_read_actor_t recv_actor
)
1551 struct sk_buff
*skb
;
1552 struct tcp_sock
*tp
= tcp_sk(sk
);
1553 u32 seq
= tp
->copied_seq
;
1557 if (sk
->sk_state
== TCP_LISTEN
)
1559 while ((skb
= tcp_recv_skb(sk
, seq
, &offset
)) != NULL
) {
1560 if (offset
< skb
->len
) {
1564 len
= skb
->len
- offset
;
1565 /* Stop reading if we hit a patch of urgent data */
1566 if (unlikely(tp
->urg_data
)) {
1567 u32 urg_offset
= tp
->urg_seq
- seq
;
1568 if (urg_offset
< len
)
1573 used
= recv_actor(desc
, skb
, offset
, len
);
1579 if (WARN_ON_ONCE(used
> len
))
1585 /* If recv_actor drops the lock (e.g. TCP splice
1586 * receive) the skb pointer might be invalid when
1587 * getting here: tcp_collapse might have deleted it
1588 * while aggregating skbs from the socket queue.
1590 skb
= tcp_recv_skb(sk
, seq
- 1, &offset
);
1593 /* TCP coalescing might have appended data to the skb.
1594 * Try to splice more frags
1596 if (offset
+ 1 != skb
->len
)
1599 if (TCP_SKB_CB(skb
)->tcp_flags
& TCPHDR_FIN
) {
1600 tcp_eat_recv_skb(sk
, skb
);
1604 tcp_eat_recv_skb(sk
, skb
);
1607 WRITE_ONCE(tp
->copied_seq
, seq
);
1609 WRITE_ONCE(tp
->copied_seq
, seq
);
1611 tcp_rcv_space_adjust(sk
);
1613 /* Clean up data we have read: This will do ACK frames. */
1615 tcp_recv_skb(sk
, seq
, &offset
);
1616 tcp_cleanup_rbuf(sk
, copied
);
1620 EXPORT_SYMBOL(tcp_read_sock
);
1622 int tcp_read_skb(struct sock
*sk
, skb_read_actor_t recv_actor
)
1624 struct tcp_sock
*tp
= tcp_sk(sk
);
1625 u32 seq
= tp
->copied_seq
;
1626 struct sk_buff
*skb
;
1630 if (sk
->sk_state
== TCP_LISTEN
)
1633 while ((skb
= tcp_recv_skb(sk
, seq
, &offset
)) != NULL
) {
1637 __skb_unlink(skb
, &sk
->sk_receive_queue
);
1638 WARN_ON_ONCE(!skb_set_owner_sk_safe(skb
, sk
));
1639 tcp_flags
= TCP_SKB_CB(skb
)->tcp_flags
;
1640 used
= recv_actor(sk
, skb
);
1649 if (tcp_flags
& TCPHDR_FIN
) {
1656 EXPORT_SYMBOL(tcp_read_skb
);
1658 void tcp_read_done(struct sock
*sk
, size_t len
)
1660 struct tcp_sock
*tp
= tcp_sk(sk
);
1661 u32 seq
= tp
->copied_seq
;
1662 struct sk_buff
*skb
;
1666 if (sk
->sk_state
== TCP_LISTEN
)
1670 while (left
&& (skb
= tcp_recv_skb(sk
, seq
, &offset
)) != NULL
) {
1673 used
= min_t(size_t, skb
->len
- offset
, left
);
1677 if (skb
->len
> offset
+ used
)
1680 if (TCP_SKB_CB(skb
)->tcp_flags
& TCPHDR_FIN
) {
1681 tcp_eat_recv_skb(sk
, skb
);
1685 tcp_eat_recv_skb(sk
, skb
);
1687 WRITE_ONCE(tp
->copied_seq
, seq
);
1689 tcp_rcv_space_adjust(sk
);
1691 /* Clean up data we have read: This will do ACK frames. */
1693 tcp_cleanup_rbuf(sk
, len
- left
);
1695 EXPORT_SYMBOL(tcp_read_done
);
1697 int tcp_peek_len(struct socket
*sock
)
1699 return tcp_inq(sock
->sk
);
1701 EXPORT_SYMBOL(tcp_peek_len
);
1703 /* Make sure sk_rcvbuf is big enough to satisfy SO_RCVLOWAT hint */
1704 int tcp_set_rcvlowat(struct sock
*sk
, int val
)
1708 if (sk
->sk_userlocks
& SOCK_RCVBUF_LOCK
)
1709 cap
= sk
->sk_rcvbuf
>> 1;
1711 cap
= READ_ONCE(sock_net(sk
)->ipv4
.sysctl_tcp_rmem
[2]) >> 1;
1712 val
= min(val
, cap
);
1713 WRITE_ONCE(sk
->sk_rcvlowat
, val
? : 1);
1715 /* Check if we need to signal EPOLLIN right now */
1718 if (sk
->sk_userlocks
& SOCK_RCVBUF_LOCK
)
1721 space
= tcp_space_from_win(sk
, val
);
1722 if (space
> sk
->sk_rcvbuf
) {
1723 WRITE_ONCE(sk
->sk_rcvbuf
, space
);
1724 tcp_sk(sk
)->window_clamp
= val
;
1728 EXPORT_SYMBOL(tcp_set_rcvlowat
);
1730 void tcp_update_recv_tstamps(struct sk_buff
*skb
,
1731 struct scm_timestamping_internal
*tss
)
1734 tss
->ts
[0] = ktime_to_timespec64(skb
->tstamp
);
1736 tss
->ts
[0] = (struct timespec64
) {0};
1738 if (skb_hwtstamps(skb
)->hwtstamp
)
1739 tss
->ts
[2] = ktime_to_timespec64(skb_hwtstamps(skb
)->hwtstamp
);
1741 tss
->ts
[2] = (struct timespec64
) {0};
1745 static const struct vm_operations_struct tcp_vm_ops
= {
1748 int tcp_mmap(struct file
*file
, struct socket
*sock
,
1749 struct vm_area_struct
*vma
)
1751 if (vma
->vm_flags
& (VM_WRITE
| VM_EXEC
))
1753 vm_flags_clear(vma
, VM_MAYWRITE
| VM_MAYEXEC
);
1755 /* Instruct vm_insert_page() to not mmap_read_lock(mm) */
1756 vm_flags_set(vma
, VM_MIXEDMAP
);
1758 vma
->vm_ops
= &tcp_vm_ops
;
1761 EXPORT_SYMBOL(tcp_mmap
);
1763 static skb_frag_t
*skb_advance_to_frag(struct sk_buff
*skb
, u32 offset_skb
,
1768 if (unlikely(offset_skb
>= skb
->len
))
1771 offset_skb
-= skb_headlen(skb
);
1772 if ((int)offset_skb
< 0 || skb_has_frag_list(skb
))
1775 frag
= skb_shinfo(skb
)->frags
;
1776 while (offset_skb
) {
1777 if (skb_frag_size(frag
) > offset_skb
) {
1778 *offset_frag
= offset_skb
;
1781 offset_skb
-= skb_frag_size(frag
);
1788 static bool can_map_frag(const skb_frag_t
*frag
)
1790 return skb_frag_size(frag
) == PAGE_SIZE
&& !skb_frag_off(frag
);
1793 static int find_next_mappable_frag(const skb_frag_t
*frag
,
1794 int remaining_in_skb
)
1798 if (likely(can_map_frag(frag
)))
1801 while (offset
< remaining_in_skb
&& !can_map_frag(frag
)) {
1802 offset
+= skb_frag_size(frag
);
1808 static void tcp_zerocopy_set_hint_for_skb(struct sock
*sk
,
1809 struct tcp_zerocopy_receive
*zc
,
1810 struct sk_buff
*skb
, u32 offset
)
1812 u32 frag_offset
, partial_frag_remainder
= 0;
1813 int mappable_offset
;
1816 /* worst case: skip to next skb. try to improve on this case below */
1817 zc
->recv_skip_hint
= skb
->len
- offset
;
1819 /* Find the frag containing this offset (and how far into that frag) */
1820 frag
= skb_advance_to_frag(skb
, offset
, &frag_offset
);
1825 struct skb_shared_info
*info
= skb_shinfo(skb
);
1827 /* We read part of the last frag, must recvmsg() rest of skb. */
1828 if (frag
== &info
->frags
[info
->nr_frags
- 1])
1831 /* Else, we must at least read the remainder in this frag. */
1832 partial_frag_remainder
= skb_frag_size(frag
) - frag_offset
;
1833 zc
->recv_skip_hint
-= partial_frag_remainder
;
1837 /* partial_frag_remainder: If part way through a frag, must read rest.
1838 * mappable_offset: Bytes till next mappable frag, *not* counting bytes
1839 * in partial_frag_remainder.
1841 mappable_offset
= find_next_mappable_frag(frag
, zc
->recv_skip_hint
);
1842 zc
->recv_skip_hint
= mappable_offset
+ partial_frag_remainder
;
1845 static int tcp_recvmsg_locked(struct sock
*sk
, struct msghdr
*msg
, size_t len
,
1846 int flags
, struct scm_timestamping_internal
*tss
,
1848 static int receive_fallback_to_copy(struct sock
*sk
,
1849 struct tcp_zerocopy_receive
*zc
, int inq
,
1850 struct scm_timestamping_internal
*tss
)
1852 unsigned long copy_address
= (unsigned long)zc
->copybuf_address
;
1853 struct msghdr msg
= {};
1858 zc
->recv_skip_hint
= 0;
1860 if (copy_address
!= zc
->copybuf_address
)
1863 err
= import_single_range(ITER_DEST
, (void __user
*)copy_address
,
1864 inq
, &iov
, &msg
.msg_iter
);
1868 err
= tcp_recvmsg_locked(sk
, &msg
, inq
, MSG_DONTWAIT
,
1869 tss
, &zc
->msg_flags
);
1873 zc
->copybuf_len
= err
;
1874 if (likely(zc
->copybuf_len
)) {
1875 struct sk_buff
*skb
;
1878 skb
= tcp_recv_skb(sk
, tcp_sk(sk
)->copied_seq
, &offset
);
1880 tcp_zerocopy_set_hint_for_skb(sk
, zc
, skb
, offset
);
1885 static int tcp_copy_straggler_data(struct tcp_zerocopy_receive
*zc
,
1886 struct sk_buff
*skb
, u32 copylen
,
1887 u32
*offset
, u32
*seq
)
1889 unsigned long copy_address
= (unsigned long)zc
->copybuf_address
;
1890 struct msghdr msg
= {};
1894 if (copy_address
!= zc
->copybuf_address
)
1897 err
= import_single_range(ITER_DEST
, (void __user
*)copy_address
,
1898 copylen
, &iov
, &msg
.msg_iter
);
1901 err
= skb_copy_datagram_msg(skb
, *offset
, &msg
, copylen
);
1904 zc
->recv_skip_hint
-= copylen
;
1907 return (__s32
)copylen
;
1910 static int tcp_zc_handle_leftover(struct tcp_zerocopy_receive
*zc
,
1912 struct sk_buff
*skb
,
1915 struct scm_timestamping_internal
*tss
)
1917 u32 offset
, copylen
= min_t(u32
, copybuf_len
, zc
->recv_skip_hint
);
1921 /* skb is null if inq < PAGE_SIZE. */
1923 offset
= *seq
- TCP_SKB_CB(skb
)->seq
;
1925 skb
= tcp_recv_skb(sk
, *seq
, &offset
);
1926 if (TCP_SKB_CB(skb
)->has_rxtstamp
) {
1927 tcp_update_recv_tstamps(skb
, tss
);
1928 zc
->msg_flags
|= TCP_CMSG_TS
;
1932 zc
->copybuf_len
= tcp_copy_straggler_data(zc
, skb
, copylen
, &offset
,
1934 return zc
->copybuf_len
< 0 ? 0 : copylen
;
1937 static int tcp_zerocopy_vm_insert_batch_error(struct vm_area_struct
*vma
,
1938 struct page
**pending_pages
,
1939 unsigned long pages_remaining
,
1940 unsigned long *address
,
1943 struct tcp_zerocopy_receive
*zc
,
1944 u32 total_bytes_to_map
,
1947 /* At least one page did not map. Try zapping if we skipped earlier. */
1948 if (err
== -EBUSY
&&
1949 zc
->flags
& TCP_RECEIVE_ZEROCOPY_FLAG_TLB_CLEAN_HINT
) {
1952 maybe_zap_len
= total_bytes_to_map
- /* All bytes to map */
1953 *length
+ /* Mapped or pending */
1954 (pages_remaining
* PAGE_SIZE
); /* Failed map. */
1955 zap_page_range_single(vma
, *address
, maybe_zap_len
, NULL
);
1960 unsigned long leftover_pages
= pages_remaining
;
1963 /* We called zap_page_range_single, try to reinsert. */
1964 err
= vm_insert_pages(vma
, *address
,
1967 bytes_mapped
= PAGE_SIZE
* (leftover_pages
- pages_remaining
);
1968 *seq
+= bytes_mapped
;
1969 *address
+= bytes_mapped
;
1972 /* Either we were unable to zap, OR we zapped, retried an
1973 * insert, and still had an issue. Either ways, pages_remaining
1974 * is the number of pages we were unable to map, and we unroll
1975 * some state we speculatively touched before.
1977 const int bytes_not_mapped
= PAGE_SIZE
* pages_remaining
;
1979 *length
-= bytes_not_mapped
;
1980 zc
->recv_skip_hint
+= bytes_not_mapped
;
1985 static int tcp_zerocopy_vm_insert_batch(struct vm_area_struct
*vma
,
1986 struct page
**pages
,
1987 unsigned int pages_to_map
,
1988 unsigned long *address
,
1991 struct tcp_zerocopy_receive
*zc
,
1992 u32 total_bytes_to_map
)
1994 unsigned long pages_remaining
= pages_to_map
;
1995 unsigned int pages_mapped
;
1996 unsigned int bytes_mapped
;
1999 err
= vm_insert_pages(vma
, *address
, pages
, &pages_remaining
);
2000 pages_mapped
= pages_to_map
- (unsigned int)pages_remaining
;
2001 bytes_mapped
= PAGE_SIZE
* pages_mapped
;
2002 /* Even if vm_insert_pages fails, it may have partially succeeded in
2003 * mapping (some but not all of the pages).
2005 *seq
+= bytes_mapped
;
2006 *address
+= bytes_mapped
;
2011 /* Error: maybe zap and retry + rollback state for failed inserts. */
2012 return tcp_zerocopy_vm_insert_batch_error(vma
, pages
+ pages_mapped
,
2013 pages_remaining
, address
, length
, seq
, zc
, total_bytes_to_map
,
2017 #define TCP_VALID_ZC_MSG_FLAGS (TCP_CMSG_TS)
2018 static void tcp_zc_finalize_rx_tstamp(struct sock
*sk
,
2019 struct tcp_zerocopy_receive
*zc
,
2020 struct scm_timestamping_internal
*tss
)
2022 unsigned long msg_control_addr
;
2023 struct msghdr cmsg_dummy
;
2025 msg_control_addr
= (unsigned long)zc
->msg_control
;
2026 cmsg_dummy
.msg_control_user
= (void __user
*)msg_control_addr
;
2027 cmsg_dummy
.msg_controllen
=
2028 (__kernel_size_t
)zc
->msg_controllen
;
2029 cmsg_dummy
.msg_flags
= in_compat_syscall()
2030 ? MSG_CMSG_COMPAT
: 0;
2031 cmsg_dummy
.msg_control_is_user
= true;
2033 if (zc
->msg_control
== msg_control_addr
&&
2034 zc
->msg_controllen
== cmsg_dummy
.msg_controllen
) {
2035 tcp_recv_timestamp(&cmsg_dummy
, sk
, tss
);
2036 zc
->msg_control
= (__u64
)
2037 ((uintptr_t)cmsg_dummy
.msg_control_user
);
2038 zc
->msg_controllen
=
2039 (__u64
)cmsg_dummy
.msg_controllen
;
2040 zc
->msg_flags
= (__u32
)cmsg_dummy
.msg_flags
;
2044 static struct vm_area_struct
*find_tcp_vma(struct mm_struct
*mm
,
2045 unsigned long address
,
2048 struct vm_area_struct
*vma
= lock_vma_under_rcu(mm
, address
);
2051 if (vma
->vm_ops
!= &tcp_vm_ops
) {
2055 *mmap_locked
= false;
2060 vma
= vma_lookup(mm
, address
);
2061 if (!vma
|| vma
->vm_ops
!= &tcp_vm_ops
) {
2062 mmap_read_unlock(mm
);
2065 *mmap_locked
= true;
2069 #define TCP_ZEROCOPY_PAGE_BATCH_SIZE 32
2070 static int tcp_zerocopy_receive(struct sock
*sk
,
2071 struct tcp_zerocopy_receive
*zc
,
2072 struct scm_timestamping_internal
*tss
)
2074 u32 length
= 0, offset
, vma_len
, avail_len
, copylen
= 0;
2075 unsigned long address
= (unsigned long)zc
->address
;
2076 struct page
*pages
[TCP_ZEROCOPY_PAGE_BATCH_SIZE
];
2077 s32 copybuf_len
= zc
->copybuf_len
;
2078 struct tcp_sock
*tp
= tcp_sk(sk
);
2079 const skb_frag_t
*frags
= NULL
;
2080 unsigned int pages_to_map
= 0;
2081 struct vm_area_struct
*vma
;
2082 struct sk_buff
*skb
= NULL
;
2083 u32 seq
= tp
->copied_seq
;
2084 u32 total_bytes_to_map
;
2085 int inq
= tcp_inq(sk
);
2089 zc
->copybuf_len
= 0;
2092 if (address
& (PAGE_SIZE
- 1) || address
!= zc
->address
)
2095 if (sk
->sk_state
== TCP_LISTEN
)
2098 sock_rps_record_flow(sk
);
2100 if (inq
&& inq
<= copybuf_len
)
2101 return receive_fallback_to_copy(sk
, zc
, inq
, tss
);
2103 if (inq
< PAGE_SIZE
) {
2105 zc
->recv_skip_hint
= inq
;
2106 if (!inq
&& sock_flag(sk
, SOCK_DONE
))
2111 vma
= find_tcp_vma(current
->mm
, address
, &mmap_locked
);
2115 vma_len
= min_t(unsigned long, zc
->length
, vma
->vm_end
- address
);
2116 avail_len
= min_t(u32
, vma_len
, inq
);
2117 total_bytes_to_map
= avail_len
& ~(PAGE_SIZE
- 1);
2118 if (total_bytes_to_map
) {
2119 if (!(zc
->flags
& TCP_RECEIVE_ZEROCOPY_FLAG_TLB_CLEAN_HINT
))
2120 zap_page_range_single(vma
, address
, total_bytes_to_map
,
2122 zc
->length
= total_bytes_to_map
;
2123 zc
->recv_skip_hint
= 0;
2125 zc
->length
= avail_len
;
2126 zc
->recv_skip_hint
= avail_len
;
2129 while (length
+ PAGE_SIZE
<= zc
->length
) {
2130 int mappable_offset
;
2133 if (zc
->recv_skip_hint
< PAGE_SIZE
) {
2137 if (zc
->recv_skip_hint
> 0)
2140 offset
= seq
- TCP_SKB_CB(skb
)->seq
;
2142 skb
= tcp_recv_skb(sk
, seq
, &offset
);
2145 if (TCP_SKB_CB(skb
)->has_rxtstamp
) {
2146 tcp_update_recv_tstamps(skb
, tss
);
2147 zc
->msg_flags
|= TCP_CMSG_TS
;
2149 zc
->recv_skip_hint
= skb
->len
- offset
;
2150 frags
= skb_advance_to_frag(skb
, offset
, &offset_frag
);
2151 if (!frags
|| offset_frag
)
2155 mappable_offset
= find_next_mappable_frag(frags
,
2156 zc
->recv_skip_hint
);
2157 if (mappable_offset
) {
2158 zc
->recv_skip_hint
= mappable_offset
;
2161 page
= skb_frag_page(frags
);
2163 pages
[pages_to_map
++] = page
;
2164 length
+= PAGE_SIZE
;
2165 zc
->recv_skip_hint
-= PAGE_SIZE
;
2167 if (pages_to_map
== TCP_ZEROCOPY_PAGE_BATCH_SIZE
||
2168 zc
->recv_skip_hint
< PAGE_SIZE
) {
2169 /* Either full batch, or we're about to go to next skb
2170 * (and we cannot unroll failed ops across skbs).
2172 ret
= tcp_zerocopy_vm_insert_batch(vma
, pages
,
2176 total_bytes_to_map
);
2183 ret
= tcp_zerocopy_vm_insert_batch(vma
, pages
, pages_to_map
,
2184 &address
, &length
, &seq
,
2185 zc
, total_bytes_to_map
);
2189 mmap_read_unlock(current
->mm
);
2192 /* Try to copy straggler data. */
2194 copylen
= tcp_zc_handle_leftover(zc
, sk
, skb
, &seq
, copybuf_len
, tss
);
2196 if (length
+ copylen
) {
2197 WRITE_ONCE(tp
->copied_seq
, seq
);
2198 tcp_rcv_space_adjust(sk
);
2200 /* Clean up data we have read: This will do ACK frames. */
2201 tcp_recv_skb(sk
, seq
, &offset
);
2202 tcp_cleanup_rbuf(sk
, length
+ copylen
);
2204 if (length
== zc
->length
)
2205 zc
->recv_skip_hint
= 0;
2207 if (!zc
->recv_skip_hint
&& sock_flag(sk
, SOCK_DONE
))
2210 zc
->length
= length
;
2215 /* Similar to __sock_recv_timestamp, but does not require an skb */
2216 void tcp_recv_timestamp(struct msghdr
*msg
, const struct sock
*sk
,
2217 struct scm_timestamping_internal
*tss
)
2219 int new_tstamp
= sock_flag(sk
, SOCK_TSTAMP_NEW
);
2220 bool has_timestamping
= false;
2222 if (tss
->ts
[0].tv_sec
|| tss
->ts
[0].tv_nsec
) {
2223 if (sock_flag(sk
, SOCK_RCVTSTAMP
)) {
2224 if (sock_flag(sk
, SOCK_RCVTSTAMPNS
)) {
2226 struct __kernel_timespec kts
= {
2227 .tv_sec
= tss
->ts
[0].tv_sec
,
2228 .tv_nsec
= tss
->ts
[0].tv_nsec
,
2230 put_cmsg(msg
, SOL_SOCKET
, SO_TIMESTAMPNS_NEW
,
2233 struct __kernel_old_timespec ts_old
= {
2234 .tv_sec
= tss
->ts
[0].tv_sec
,
2235 .tv_nsec
= tss
->ts
[0].tv_nsec
,
2237 put_cmsg(msg
, SOL_SOCKET
, SO_TIMESTAMPNS_OLD
,
2238 sizeof(ts_old
), &ts_old
);
2242 struct __kernel_sock_timeval stv
= {
2243 .tv_sec
= tss
->ts
[0].tv_sec
,
2244 .tv_usec
= tss
->ts
[0].tv_nsec
/ 1000,
2246 put_cmsg(msg
, SOL_SOCKET
, SO_TIMESTAMP_NEW
,
2249 struct __kernel_old_timeval tv
= {
2250 .tv_sec
= tss
->ts
[0].tv_sec
,
2251 .tv_usec
= tss
->ts
[0].tv_nsec
/ 1000,
2253 put_cmsg(msg
, SOL_SOCKET
, SO_TIMESTAMP_OLD
,
2259 if (sk
->sk_tsflags
& SOF_TIMESTAMPING_SOFTWARE
)
2260 has_timestamping
= true;
2262 tss
->ts
[0] = (struct timespec64
) {0};
2265 if (tss
->ts
[2].tv_sec
|| tss
->ts
[2].tv_nsec
) {
2266 if (sk
->sk_tsflags
& SOF_TIMESTAMPING_RAW_HARDWARE
)
2267 has_timestamping
= true;
2269 tss
->ts
[2] = (struct timespec64
) {0};
2272 if (has_timestamping
) {
2273 tss
->ts
[1] = (struct timespec64
) {0};
2274 if (sock_flag(sk
, SOCK_TSTAMP_NEW
))
2275 put_cmsg_scm_timestamping64(msg
, tss
);
2277 put_cmsg_scm_timestamping(msg
, tss
);
2281 static int tcp_inq_hint(struct sock
*sk
)
2283 const struct tcp_sock
*tp
= tcp_sk(sk
);
2284 u32 copied_seq
= READ_ONCE(tp
->copied_seq
);
2285 u32 rcv_nxt
= READ_ONCE(tp
->rcv_nxt
);
2288 inq
= rcv_nxt
- copied_seq
;
2289 if (unlikely(inq
< 0 || copied_seq
!= READ_ONCE(tp
->copied_seq
))) {
2291 inq
= tp
->rcv_nxt
- tp
->copied_seq
;
2294 /* After receiving a FIN, tell the user-space to continue reading
2295 * by returning a non-zero inq.
2297 if (inq
== 0 && sock_flag(sk
, SOCK_DONE
))
2303 * This routine copies from a sock struct into the user buffer.
2305 * Technical note: in 2.3 we work on _locked_ socket, so that
2306 * tricks with *seq access order and skb->users are not required.
2307 * Probably, code can be easily improved even more.
2310 static int tcp_recvmsg_locked(struct sock
*sk
, struct msghdr
*msg
, size_t len
,
2311 int flags
, struct scm_timestamping_internal
*tss
,
2314 struct tcp_sock
*tp
= tcp_sk(sk
);
2320 int target
; /* Read at least this many bytes */
2322 struct sk_buff
*skb
, *last
;
2326 if (sk
->sk_state
== TCP_LISTEN
)
2329 if (tp
->recvmsg_inq
) {
2330 *cmsg_flags
= TCP_CMSG_INQ
;
2331 msg
->msg_get_inq
= 1;
2333 timeo
= sock_rcvtimeo(sk
, flags
& MSG_DONTWAIT
);
2335 /* Urgent data needs to be handled specially. */
2336 if (flags
& MSG_OOB
)
2339 if (unlikely(tp
->repair
)) {
2341 if (!(flags
& MSG_PEEK
))
2344 if (tp
->repair_queue
== TCP_SEND_QUEUE
)
2348 if (tp
->repair_queue
== TCP_NO_QUEUE
)
2351 /* 'common' recv queue MSG_PEEK-ing */
2354 seq
= &tp
->copied_seq
;
2355 if (flags
& MSG_PEEK
) {
2356 peek_seq
= tp
->copied_seq
;
2360 target
= sock_rcvlowat(sk
, flags
& MSG_WAITALL
, len
);
2365 /* Are we at urgent data? Stop if we have read anything or have SIGURG pending. */
2366 if (unlikely(tp
->urg_data
) && tp
->urg_seq
== *seq
) {
2369 if (signal_pending(current
)) {
2370 copied
= timeo
? sock_intr_errno(timeo
) : -EAGAIN
;
2375 /* Next get a buffer. */
2377 last
= skb_peek_tail(&sk
->sk_receive_queue
);
2378 skb_queue_walk(&sk
->sk_receive_queue
, skb
) {
2380 /* Now that we have two receive queues this
2383 if (WARN(before(*seq
, TCP_SKB_CB(skb
)->seq
),
2384 "TCP recvmsg seq # bug: copied %X, seq %X, rcvnxt %X, fl %X\n",
2385 *seq
, TCP_SKB_CB(skb
)->seq
, tp
->rcv_nxt
,
2389 offset
= *seq
- TCP_SKB_CB(skb
)->seq
;
2390 if (unlikely(TCP_SKB_CB(skb
)->tcp_flags
& TCPHDR_SYN
)) {
2391 pr_err_once("%s: found a SYN, please report !\n", __func__
);
2394 if (offset
< skb
->len
)
2396 if (TCP_SKB_CB(skb
)->tcp_flags
& TCPHDR_FIN
)
2398 WARN(!(flags
& MSG_PEEK
),
2399 "TCP recvmsg seq # bug 2: copied %X, seq %X, rcvnxt %X, fl %X\n",
2400 *seq
, TCP_SKB_CB(skb
)->seq
, tp
->rcv_nxt
, flags
);
2403 /* Well, if we have backlog, try to process it now yet. */
2405 if (copied
>= target
&& !READ_ONCE(sk
->sk_backlog
.tail
))
2411 sk
->sk_state
== TCP_CLOSE
||
2412 (sk
->sk_shutdown
& RCV_SHUTDOWN
) ||
2413 signal_pending(current
))
2416 if (sock_flag(sk
, SOCK_DONE
))
2420 copied
= sock_error(sk
);
2424 if (sk
->sk_shutdown
& RCV_SHUTDOWN
)
2427 if (sk
->sk_state
== TCP_CLOSE
) {
2428 /* This occurs when user tries to read
2429 * from never connected socket.
2440 if (signal_pending(current
)) {
2441 copied
= sock_intr_errno(timeo
);
2446 if (copied
>= target
) {
2447 /* Do not sleep, just process backlog. */
2448 __sk_flush_backlog(sk
);
2450 tcp_cleanup_rbuf(sk
, copied
);
2451 sk_wait_data(sk
, &timeo
, last
);
2454 if ((flags
& MSG_PEEK
) &&
2455 (peek_seq
- copied
- urg_hole
!= tp
->copied_seq
)) {
2456 net_dbg_ratelimited("TCP(%s:%d): Application bug, race in MSG_PEEK\n",
2458 task_pid_nr(current
));
2459 peek_seq
= tp
->copied_seq
;
2464 /* Ok so how much can we use? */
2465 used
= skb
->len
- offset
;
2469 /* Do we have urgent data here? */
2470 if (unlikely(tp
->urg_data
)) {
2471 u32 urg_offset
= tp
->urg_seq
- *seq
;
2472 if (urg_offset
< used
) {
2474 if (!sock_flag(sk
, SOCK_URGINLINE
)) {
2475 WRITE_ONCE(*seq
, *seq
+ 1);
2487 if (!(flags
& MSG_TRUNC
)) {
2488 err
= skb_copy_datagram_msg(skb
, offset
, msg
, used
);
2490 /* Exception. Bailout! */
2497 WRITE_ONCE(*seq
, *seq
+ used
);
2501 tcp_rcv_space_adjust(sk
);
2504 if (unlikely(tp
->urg_data
) && after(tp
->copied_seq
, tp
->urg_seq
)) {
2505 WRITE_ONCE(tp
->urg_data
, 0);
2506 tcp_fast_path_check(sk
);
2509 if (TCP_SKB_CB(skb
)->has_rxtstamp
) {
2510 tcp_update_recv_tstamps(skb
, tss
);
2511 *cmsg_flags
|= TCP_CMSG_TS
;
2514 if (used
+ offset
< skb
->len
)
2517 if (TCP_SKB_CB(skb
)->tcp_flags
& TCPHDR_FIN
)
2519 if (!(flags
& MSG_PEEK
))
2520 tcp_eat_recv_skb(sk
, skb
);
2524 /* Process the FIN. */
2525 WRITE_ONCE(*seq
, *seq
+ 1);
2526 if (!(flags
& MSG_PEEK
))
2527 tcp_eat_recv_skb(sk
, skb
);
2531 /* According to UNIX98, msg_name/msg_namelen are ignored
2532 * on connected socket. I was just happy when found this 8) --ANK
2535 /* Clean up data we have read: This will do ACK frames. */
2536 tcp_cleanup_rbuf(sk
, copied
);
2543 err
= tcp_recv_urg(sk
, msg
, len
, flags
);
2547 err
= tcp_peek_sndq(sk
, msg
, len
);
2551 int tcp_recvmsg(struct sock
*sk
, struct msghdr
*msg
, size_t len
, int flags
,
2554 int cmsg_flags
= 0, ret
;
2555 struct scm_timestamping_internal tss
;
2557 if (unlikely(flags
& MSG_ERRQUEUE
))
2558 return inet_recv_error(sk
, msg
, len
, addr_len
);
2560 if (sk_can_busy_loop(sk
) &&
2561 skb_queue_empty_lockless(&sk
->sk_receive_queue
) &&
2562 sk
->sk_state
== TCP_ESTABLISHED
)
2563 sk_busy_loop(sk
, flags
& MSG_DONTWAIT
);
2566 ret
= tcp_recvmsg_locked(sk
, msg
, len
, flags
, &tss
, &cmsg_flags
);
2569 if ((cmsg_flags
|| msg
->msg_get_inq
) && ret
>= 0) {
2570 if (cmsg_flags
& TCP_CMSG_TS
)
2571 tcp_recv_timestamp(msg
, sk
, &tss
);
2572 if (msg
->msg_get_inq
) {
2573 msg
->msg_inq
= tcp_inq_hint(sk
);
2574 if (cmsg_flags
& TCP_CMSG_INQ
)
2575 put_cmsg(msg
, SOL_TCP
, TCP_CM_INQ
,
2576 sizeof(msg
->msg_inq
), &msg
->msg_inq
);
2581 EXPORT_SYMBOL(tcp_recvmsg
);
2583 void tcp_set_state(struct sock
*sk
, int state
)
2585 int oldstate
= sk
->sk_state
;
2587 /* We defined a new enum for TCP states that are exported in BPF
2588 * so as not force the internal TCP states to be frozen. The
2589 * following checks will detect if an internal state value ever
2590 * differs from the BPF value. If this ever happens, then we will
2591 * need to remap the internal value to the BPF value before calling
2592 * tcp_call_bpf_2arg.
2594 BUILD_BUG_ON((int)BPF_TCP_ESTABLISHED
!= (int)TCP_ESTABLISHED
);
2595 BUILD_BUG_ON((int)BPF_TCP_SYN_SENT
!= (int)TCP_SYN_SENT
);
2596 BUILD_BUG_ON((int)BPF_TCP_SYN_RECV
!= (int)TCP_SYN_RECV
);
2597 BUILD_BUG_ON((int)BPF_TCP_FIN_WAIT1
!= (int)TCP_FIN_WAIT1
);
2598 BUILD_BUG_ON((int)BPF_TCP_FIN_WAIT2
!= (int)TCP_FIN_WAIT2
);
2599 BUILD_BUG_ON((int)BPF_TCP_TIME_WAIT
!= (int)TCP_TIME_WAIT
);
2600 BUILD_BUG_ON((int)BPF_TCP_CLOSE
!= (int)TCP_CLOSE
);
2601 BUILD_BUG_ON((int)BPF_TCP_CLOSE_WAIT
!= (int)TCP_CLOSE_WAIT
);
2602 BUILD_BUG_ON((int)BPF_TCP_LAST_ACK
!= (int)TCP_LAST_ACK
);
2603 BUILD_BUG_ON((int)BPF_TCP_LISTEN
!= (int)TCP_LISTEN
);
2604 BUILD_BUG_ON((int)BPF_TCP_CLOSING
!= (int)TCP_CLOSING
);
2605 BUILD_BUG_ON((int)BPF_TCP_NEW_SYN_RECV
!= (int)TCP_NEW_SYN_RECV
);
2606 BUILD_BUG_ON((int)BPF_TCP_MAX_STATES
!= (int)TCP_MAX_STATES
);
2608 /* bpf uapi header bpf.h defines an anonymous enum with values
2609 * BPF_TCP_* used by bpf programs. Currently gcc built vmlinux
2610 * is able to emit this enum in DWARF due to the above BUILD_BUG_ON.
2611 * But clang built vmlinux does not have this enum in DWARF
2612 * since clang removes the above code before generating IR/debuginfo.
2613 * Let us explicitly emit the type debuginfo to ensure the
2614 * above-mentioned anonymous enum in the vmlinux DWARF and hence BTF
2615 * regardless of which compiler is used.
2617 BTF_TYPE_EMIT_ENUM(BPF_TCP_ESTABLISHED
);
2619 if (BPF_SOCK_OPS_TEST_FLAG(tcp_sk(sk
), BPF_SOCK_OPS_STATE_CB_FLAG
))
2620 tcp_call_bpf_2arg(sk
, BPF_SOCK_OPS_STATE_CB
, oldstate
, state
);
2623 case TCP_ESTABLISHED
:
2624 if (oldstate
!= TCP_ESTABLISHED
)
2625 TCP_INC_STATS(sock_net(sk
), TCP_MIB_CURRESTAB
);
2629 if (oldstate
== TCP_CLOSE_WAIT
|| oldstate
== TCP_ESTABLISHED
)
2630 TCP_INC_STATS(sock_net(sk
), TCP_MIB_ESTABRESETS
);
2632 sk
->sk_prot
->unhash(sk
);
2633 if (inet_csk(sk
)->icsk_bind_hash
&&
2634 !(sk
->sk_userlocks
& SOCK_BINDPORT_LOCK
))
2638 if (oldstate
== TCP_ESTABLISHED
)
2639 TCP_DEC_STATS(sock_net(sk
), TCP_MIB_CURRESTAB
);
2642 /* Change state AFTER socket is unhashed to avoid closed
2643 * socket sitting in hash tables.
2645 inet_sk_state_store(sk
, state
);
2647 EXPORT_SYMBOL_GPL(tcp_set_state
);
2650 * State processing on a close. This implements the state shift for
2651 * sending our FIN frame. Note that we only send a FIN for some
2652 * states. A shutdown() may have already sent the FIN, or we may be
2656 static const unsigned char new_state
[16] = {
2657 /* current state: new state: action: */
2658 [0 /* (Invalid) */] = TCP_CLOSE
,
2659 [TCP_ESTABLISHED
] = TCP_FIN_WAIT1
| TCP_ACTION_FIN
,
2660 [TCP_SYN_SENT
] = TCP_CLOSE
,
2661 [TCP_SYN_RECV
] = TCP_FIN_WAIT1
| TCP_ACTION_FIN
,
2662 [TCP_FIN_WAIT1
] = TCP_FIN_WAIT1
,
2663 [TCP_FIN_WAIT2
] = TCP_FIN_WAIT2
,
2664 [TCP_TIME_WAIT
] = TCP_CLOSE
,
2665 [TCP_CLOSE
] = TCP_CLOSE
,
2666 [TCP_CLOSE_WAIT
] = TCP_LAST_ACK
| TCP_ACTION_FIN
,
2667 [TCP_LAST_ACK
] = TCP_LAST_ACK
,
2668 [TCP_LISTEN
] = TCP_CLOSE
,
2669 [TCP_CLOSING
] = TCP_CLOSING
,
2670 [TCP_NEW_SYN_RECV
] = TCP_CLOSE
, /* should not happen ! */
2673 static int tcp_close_state(struct sock
*sk
)
2675 int next
= (int)new_state
[sk
->sk_state
];
2676 int ns
= next
& TCP_STATE_MASK
;
2678 tcp_set_state(sk
, ns
);
2680 return next
& TCP_ACTION_FIN
;
2684 * Shutdown the sending side of a connection. Much like close except
2685 * that we don't receive shut down or sock_set_flag(sk, SOCK_DEAD).
2688 void tcp_shutdown(struct sock
*sk
, int how
)
2690 /* We need to grab some memory, and put together a FIN,
2691 * and then put it into the queue to be sent.
2692 * Tim MacKenzie(tym@dibbler.cs.monash.edu.au) 4 Dec '92.
2694 if (!(how
& SEND_SHUTDOWN
))
2697 /* If we've already sent a FIN, or it's a closed state, skip this. */
2698 if ((1 << sk
->sk_state
) &
2699 (TCPF_ESTABLISHED
| TCPF_SYN_SENT
|
2700 TCPF_SYN_RECV
| TCPF_CLOSE_WAIT
)) {
2701 /* Clear out any half completed packets. FIN if needed. */
2702 if (tcp_close_state(sk
))
2706 EXPORT_SYMBOL(tcp_shutdown
);
2708 int tcp_orphan_count_sum(void)
2712 for_each_possible_cpu(i
)
2713 total
+= per_cpu(tcp_orphan_count
, i
);
2715 return max(total
, 0);
2718 static int tcp_orphan_cache
;
2719 static struct timer_list tcp_orphan_timer
;
2720 #define TCP_ORPHAN_TIMER_PERIOD msecs_to_jiffies(100)
2722 static void tcp_orphan_update(struct timer_list
*unused
)
2724 WRITE_ONCE(tcp_orphan_cache
, tcp_orphan_count_sum());
2725 mod_timer(&tcp_orphan_timer
, jiffies
+ TCP_ORPHAN_TIMER_PERIOD
);
2728 static bool tcp_too_many_orphans(int shift
)
2730 return READ_ONCE(tcp_orphan_cache
) << shift
>
2731 READ_ONCE(sysctl_tcp_max_orphans
);
2734 bool tcp_check_oom(struct sock
*sk
, int shift
)
2736 bool too_many_orphans
, out_of_socket_memory
;
2738 too_many_orphans
= tcp_too_many_orphans(shift
);
2739 out_of_socket_memory
= tcp_out_of_memory(sk
);
2741 if (too_many_orphans
)
2742 net_info_ratelimited("too many orphaned sockets\n");
2743 if (out_of_socket_memory
)
2744 net_info_ratelimited("out of memory -- consider tuning tcp_mem\n");
2745 return too_many_orphans
|| out_of_socket_memory
;
2748 void __tcp_close(struct sock
*sk
, long timeout
)
2750 struct sk_buff
*skb
;
2751 int data_was_unread
= 0;
2754 WRITE_ONCE(sk
->sk_shutdown
, SHUTDOWN_MASK
);
2756 if (sk
->sk_state
== TCP_LISTEN
) {
2757 tcp_set_state(sk
, TCP_CLOSE
);
2760 inet_csk_listen_stop(sk
);
2762 goto adjudge_to_death
;
2765 /* We need to flush the recv. buffs. We do this only on the
2766 * descriptor close, not protocol-sourced closes, because the
2767 * reader process may not have drained the data yet!
2769 while ((skb
= __skb_dequeue(&sk
->sk_receive_queue
)) != NULL
) {
2770 u32 len
= TCP_SKB_CB(skb
)->end_seq
- TCP_SKB_CB(skb
)->seq
;
2772 if (TCP_SKB_CB(skb
)->tcp_flags
& TCPHDR_FIN
)
2774 data_was_unread
+= len
;
2778 /* If socket has been already reset (e.g. in tcp_reset()) - kill it. */
2779 if (sk
->sk_state
== TCP_CLOSE
)
2780 goto adjudge_to_death
;
2782 /* As outlined in RFC 2525, section 2.17, we send a RST here because
2783 * data was lost. To witness the awful effects of the old behavior of
2784 * always doing a FIN, run an older 2.1.x kernel or 2.0.x, start a bulk
2785 * GET in an FTP client, suspend the process, wait for the client to
2786 * advertise a zero window, then kill -9 the FTP client, wheee...
2787 * Note: timeout is always zero in such a case.
2789 if (unlikely(tcp_sk(sk
)->repair
)) {
2790 sk
->sk_prot
->disconnect(sk
, 0);
2791 } else if (data_was_unread
) {
2792 /* Unread data was tossed, zap the connection. */
2793 NET_INC_STATS(sock_net(sk
), LINUX_MIB_TCPABORTONCLOSE
);
2794 tcp_set_state(sk
, TCP_CLOSE
);
2795 tcp_send_active_reset(sk
, sk
->sk_allocation
);
2796 } else if (sock_flag(sk
, SOCK_LINGER
) && !sk
->sk_lingertime
) {
2797 /* Check zero linger _after_ checking for unread data. */
2798 sk
->sk_prot
->disconnect(sk
, 0);
2799 NET_INC_STATS(sock_net(sk
), LINUX_MIB_TCPABORTONDATA
);
2800 } else if (tcp_close_state(sk
)) {
2801 /* We FIN if the application ate all the data before
2802 * zapping the connection.
2805 /* RED-PEN. Formally speaking, we have broken TCP state
2806 * machine. State transitions:
2808 * TCP_ESTABLISHED -> TCP_FIN_WAIT1
2809 * TCP_SYN_RECV -> TCP_FIN_WAIT1 (forget it, it's impossible)
2810 * TCP_CLOSE_WAIT -> TCP_LAST_ACK
2812 * are legal only when FIN has been sent (i.e. in window),
2813 * rather than queued out of window. Purists blame.
2815 * F.e. "RFC state" is ESTABLISHED,
2816 * if Linux state is FIN-WAIT-1, but FIN is still not sent.
2818 * The visible declinations are that sometimes
2819 * we enter time-wait state, when it is not required really
2820 * (harmless), do not send active resets, when they are
2821 * required by specs (TCP_ESTABLISHED, TCP_CLOSE_WAIT, when
2822 * they look as CLOSING or LAST_ACK for Linux)
2823 * Probably, I missed some more holelets.
2825 * XXX (TFO) - To start off we don't support SYN+ACK+FIN
2826 * in a single packet! (May consider it later but will
2827 * probably need API support or TCP_CORK SYN-ACK until
2828 * data is written and socket is closed.)
2833 sk_stream_wait_close(sk
, timeout
);
2836 state
= sk
->sk_state
;
2842 /* remove backlog if any, without releasing ownership. */
2845 this_cpu_inc(tcp_orphan_count
);
2847 /* Have we already been destroyed by a softirq or backlog? */
2848 if (state
!= TCP_CLOSE
&& sk
->sk_state
== TCP_CLOSE
)
2851 /* This is a (useful) BSD violating of the RFC. There is a
2852 * problem with TCP as specified in that the other end could
2853 * keep a socket open forever with no application left this end.
2854 * We use a 1 minute timeout (about the same as BSD) then kill
2855 * our end. If they send after that then tough - BUT: long enough
2856 * that we won't make the old 4*rto = almost no time - whoops
2859 * Nope, it was not mistake. It is really desired behaviour
2860 * f.e. on http servers, when such sockets are useless, but
2861 * consume significant resources. Let's do it with special
2862 * linger2 option. --ANK
2865 if (sk
->sk_state
== TCP_FIN_WAIT2
) {
2866 struct tcp_sock
*tp
= tcp_sk(sk
);
2867 if (READ_ONCE(tp
->linger2
) < 0) {
2868 tcp_set_state(sk
, TCP_CLOSE
);
2869 tcp_send_active_reset(sk
, GFP_ATOMIC
);
2870 __NET_INC_STATS(sock_net(sk
),
2871 LINUX_MIB_TCPABORTONLINGER
);
2873 const int tmo
= tcp_fin_time(sk
);
2875 if (tmo
> TCP_TIMEWAIT_LEN
) {
2876 inet_csk_reset_keepalive_timer(sk
,
2877 tmo
- TCP_TIMEWAIT_LEN
);
2879 tcp_time_wait(sk
, TCP_FIN_WAIT2
, tmo
);
2884 if (sk
->sk_state
!= TCP_CLOSE
) {
2885 if (tcp_check_oom(sk
, 0)) {
2886 tcp_set_state(sk
, TCP_CLOSE
);
2887 tcp_send_active_reset(sk
, GFP_ATOMIC
);
2888 __NET_INC_STATS(sock_net(sk
),
2889 LINUX_MIB_TCPABORTONMEMORY
);
2890 } else if (!check_net(sock_net(sk
))) {
2891 /* Not possible to send reset; just close */
2892 tcp_set_state(sk
, TCP_CLOSE
);
2896 if (sk
->sk_state
== TCP_CLOSE
) {
2897 struct request_sock
*req
;
2899 req
= rcu_dereference_protected(tcp_sk(sk
)->fastopen_rsk
,
2900 lockdep_sock_is_held(sk
));
2901 /* We could get here with a non-NULL req if the socket is
2902 * aborted (e.g., closed with unread data) before 3WHS
2906 reqsk_fastopen_remove(sk
, req
, false);
2907 inet_csk_destroy_sock(sk
);
2909 /* Otherwise, socket is reprieved until protocol close. */
2916 void tcp_close(struct sock
*sk
, long timeout
)
2919 __tcp_close(sk
, timeout
);
2923 EXPORT_SYMBOL(tcp_close
);
2925 /* These states need RST on ABORT according to RFC793 */
2927 static inline bool tcp_need_reset(int state
)
2929 return (1 << state
) &
2930 (TCPF_ESTABLISHED
| TCPF_CLOSE_WAIT
| TCPF_FIN_WAIT1
|
2931 TCPF_FIN_WAIT2
| TCPF_SYN_RECV
);
2934 static void tcp_rtx_queue_purge(struct sock
*sk
)
2936 struct rb_node
*p
= rb_first(&sk
->tcp_rtx_queue
);
2938 tcp_sk(sk
)->highest_sack
= NULL
;
2940 struct sk_buff
*skb
= rb_to_skb(p
);
2943 /* Since we are deleting whole queue, no need to
2944 * list_del(&skb->tcp_tsorted_anchor)
2946 tcp_rtx_queue_unlink(skb
, sk
);
2947 tcp_wmem_free_skb(sk
, skb
);
2951 void tcp_write_queue_purge(struct sock
*sk
)
2953 struct sk_buff
*skb
;
2955 tcp_chrono_stop(sk
, TCP_CHRONO_BUSY
);
2956 while ((skb
= __skb_dequeue(&sk
->sk_write_queue
)) != NULL
) {
2957 tcp_skb_tsorted_anchor_cleanup(skb
);
2958 tcp_wmem_free_skb(sk
, skb
);
2960 tcp_rtx_queue_purge(sk
);
2961 INIT_LIST_HEAD(&tcp_sk(sk
)->tsorted_sent_queue
);
2962 tcp_clear_all_retrans_hints(tcp_sk(sk
));
2963 tcp_sk(sk
)->packets_out
= 0;
2964 inet_csk(sk
)->icsk_backoff
= 0;
2967 int tcp_disconnect(struct sock
*sk
, int flags
)
2969 struct inet_sock
*inet
= inet_sk(sk
);
2970 struct inet_connection_sock
*icsk
= inet_csk(sk
);
2971 struct tcp_sock
*tp
= tcp_sk(sk
);
2972 int old_state
= sk
->sk_state
;
2975 /* Deny disconnect if other threads are blocked in sk_wait_event()
2976 * or inet_wait_for_connect().
2978 if (sk
->sk_wait_pending
)
2981 if (old_state
!= TCP_CLOSE
)
2982 tcp_set_state(sk
, TCP_CLOSE
);
2984 /* ABORT function of RFC793 */
2985 if (old_state
== TCP_LISTEN
) {
2986 inet_csk_listen_stop(sk
);
2987 } else if (unlikely(tp
->repair
)) {
2988 WRITE_ONCE(sk
->sk_err
, ECONNABORTED
);
2989 } else if (tcp_need_reset(old_state
) ||
2990 (tp
->snd_nxt
!= tp
->write_seq
&&
2991 (1 << old_state
) & (TCPF_CLOSING
| TCPF_LAST_ACK
))) {
2992 /* The last check adjusts for discrepancy of Linux wrt. RFC
2995 tcp_send_active_reset(sk
, gfp_any());
2996 WRITE_ONCE(sk
->sk_err
, ECONNRESET
);
2997 } else if (old_state
== TCP_SYN_SENT
)
2998 WRITE_ONCE(sk
->sk_err
, ECONNRESET
);
3000 tcp_clear_xmit_timers(sk
);
3001 __skb_queue_purge(&sk
->sk_receive_queue
);
3002 WRITE_ONCE(tp
->copied_seq
, tp
->rcv_nxt
);
3003 WRITE_ONCE(tp
->urg_data
, 0);
3004 tcp_write_queue_purge(sk
);
3005 tcp_fastopen_active_disable_ofo_check(sk
);
3006 skb_rbtree_purge(&tp
->out_of_order_queue
);
3008 inet
->inet_dport
= 0;
3010 inet_bhash2_reset_saddr(sk
);
3012 WRITE_ONCE(sk
->sk_shutdown
, 0);
3013 sock_reset_flag(sk
, SOCK_DONE
);
3015 tp
->mdev_us
= jiffies_to_usecs(TCP_TIMEOUT_INIT
);
3016 tp
->rcv_rtt_last_tsecr
= 0;
3018 seq
= tp
->write_seq
+ tp
->max_window
+ 2;
3021 WRITE_ONCE(tp
->write_seq
, seq
);
3023 icsk
->icsk_backoff
= 0;
3024 icsk
->icsk_probes_out
= 0;
3025 icsk
->icsk_probes_tstamp
= 0;
3026 icsk
->icsk_rto
= TCP_TIMEOUT_INIT
;
3027 icsk
->icsk_rto_min
= TCP_RTO_MIN
;
3028 icsk
->icsk_delack_max
= TCP_DELACK_MAX
;
3029 tp
->snd_ssthresh
= TCP_INFINITE_SSTHRESH
;
3030 tcp_snd_cwnd_set(tp
, TCP_INIT_CWND
);
3031 tp
->snd_cwnd_cnt
= 0;
3032 tp
->is_cwnd_limited
= 0;
3033 tp
->max_packets_out
= 0;
3034 tp
->window_clamp
= 0;
3036 tp
->delivered_ce
= 0;
3037 if (icsk
->icsk_ca_ops
->release
)
3038 icsk
->icsk_ca_ops
->release(sk
);
3039 memset(icsk
->icsk_ca_priv
, 0, sizeof(icsk
->icsk_ca_priv
));
3040 icsk
->icsk_ca_initialized
= 0;
3041 tcp_set_ca_state(sk
, TCP_CA_Open
);
3042 tp
->is_sack_reneg
= 0;
3043 tcp_clear_retrans(tp
);
3044 tp
->total_retrans
= 0;
3045 inet_csk_delack_init(sk
);
3046 /* Initialize rcv_mss to TCP_MIN_MSS to avoid division by 0
3047 * issue in __tcp_select_window()
3049 icsk
->icsk_ack
.rcv_mss
= TCP_MIN_MSS
;
3050 memset(&tp
->rx_opt
, 0, sizeof(tp
->rx_opt
));
3052 dst_release(xchg((__force
struct dst_entry
**)&sk
->sk_rx_dst
, NULL
));
3053 tcp_saved_syn_free(tp
);
3054 tp
->compressed_ack
= 0;
3058 tp
->bytes_acked
= 0;
3059 tp
->bytes_received
= 0;
3060 tp
->bytes_retrans
= 0;
3061 tp
->data_segs_in
= 0;
3062 tp
->data_segs_out
= 0;
3063 tp
->duplicate_sack
[0].start_seq
= 0;
3064 tp
->duplicate_sack
[0].end_seq
= 0;
3067 tp
->retrans_out
= 0;
3069 tp
->tlp_high_seq
= 0;
3070 tp
->last_oow_ack_time
= 0;
3072 /* There's a bubble in the pipe until at least the first ACK. */
3073 tp
->app_limited
= ~0U;
3074 tp
->rate_app_limited
= 1;
3075 tp
->rack
.mstamp
= 0;
3076 tp
->rack
.advanced
= 0;
3077 tp
->rack
.reo_wnd_steps
= 1;
3078 tp
->rack
.last_delivered
= 0;
3079 tp
->rack
.reo_wnd_persist
= 0;
3080 tp
->rack
.dsack_seen
= 0;
3081 tp
->syn_data_acked
= 0;
3082 tp
->rx_opt
.saw_tstamp
= 0;
3083 tp
->rx_opt
.dsack
= 0;
3084 tp
->rx_opt
.num_sacks
= 0;
3085 tp
->rcv_ooopack
= 0;
3088 /* Clean up fastopen related fields */
3089 tcp_free_fastopen_req(tp
);
3090 inet_clear_bit(DEFER_CONNECT
, sk
);
3091 tp
->fastopen_client_fail
= 0;
3093 WARN_ON(inet
->inet_num
&& !icsk
->icsk_bind_hash
);
3095 if (sk
->sk_frag
.page
) {
3096 put_page(sk
->sk_frag
.page
);
3097 sk
->sk_frag
.page
= NULL
;
3098 sk
->sk_frag
.offset
= 0;
3100 sk_error_report(sk
);
3103 EXPORT_SYMBOL(tcp_disconnect
);
3105 static inline bool tcp_can_repair_sock(const struct sock
*sk
)
3107 return sockopt_ns_capable(sock_net(sk
)->user_ns
, CAP_NET_ADMIN
) &&
3108 (sk
->sk_state
!= TCP_LISTEN
);
3111 static int tcp_repair_set_window(struct tcp_sock
*tp
, sockptr_t optbuf
, int len
)
3113 struct tcp_repair_window opt
;
3118 if (len
!= sizeof(opt
))
3121 if (copy_from_sockptr(&opt
, optbuf
, sizeof(opt
)))
3124 if (opt
.max_window
< opt
.snd_wnd
)
3127 if (after(opt
.snd_wl1
, tp
->rcv_nxt
+ opt
.rcv_wnd
))
3130 if (after(opt
.rcv_wup
, tp
->rcv_nxt
))
3133 tp
->snd_wl1
= opt
.snd_wl1
;
3134 tp
->snd_wnd
= opt
.snd_wnd
;
3135 tp
->max_window
= opt
.max_window
;
3137 tp
->rcv_wnd
= opt
.rcv_wnd
;
3138 tp
->rcv_wup
= opt
.rcv_wup
;
3143 static int tcp_repair_options_est(struct sock
*sk
, sockptr_t optbuf
,
3146 struct tcp_sock
*tp
= tcp_sk(sk
);
3147 struct tcp_repair_opt opt
;
3150 while (len
>= sizeof(opt
)) {
3151 if (copy_from_sockptr_offset(&opt
, optbuf
, offset
, sizeof(opt
)))
3154 offset
+= sizeof(opt
);
3157 switch (opt
.opt_code
) {
3159 tp
->rx_opt
.mss_clamp
= opt
.opt_val
;
3164 u16 snd_wscale
= opt
.opt_val
& 0xFFFF;
3165 u16 rcv_wscale
= opt
.opt_val
>> 16;
3167 if (snd_wscale
> TCP_MAX_WSCALE
|| rcv_wscale
> TCP_MAX_WSCALE
)
3170 tp
->rx_opt
.snd_wscale
= snd_wscale
;
3171 tp
->rx_opt
.rcv_wscale
= rcv_wscale
;
3172 tp
->rx_opt
.wscale_ok
= 1;
3175 case TCPOPT_SACK_PERM
:
3176 if (opt
.opt_val
!= 0)
3179 tp
->rx_opt
.sack_ok
|= TCP_SACK_SEEN
;
3181 case TCPOPT_TIMESTAMP
:
3182 if (opt
.opt_val
!= 0)
3185 tp
->rx_opt
.tstamp_ok
= 1;
3193 DEFINE_STATIC_KEY_FALSE(tcp_tx_delay_enabled
);
3194 EXPORT_SYMBOL(tcp_tx_delay_enabled
);
3196 static void tcp_enable_tx_delay(void)
3198 if (!static_branch_unlikely(&tcp_tx_delay_enabled
)) {
3199 static int __tcp_tx_delay_enabled
= 0;
3201 if (cmpxchg(&__tcp_tx_delay_enabled
, 0, 1) == 0) {
3202 static_branch_enable(&tcp_tx_delay_enabled
);
3203 pr_info("TCP_TX_DELAY enabled\n");
3208 /* When set indicates to always queue non-full frames. Later the user clears
3209 * this option and we transmit any pending partial frames in the queue. This is
3210 * meant to be used alongside sendfile() to get properly filled frames when the
3211 * user (for example) must write out headers with a write() call first and then
3212 * use sendfile to send out the data parts.
3214 * TCP_CORK can be set together with TCP_NODELAY and it is stronger than
3217 void __tcp_sock_set_cork(struct sock
*sk
, bool on
)
3219 struct tcp_sock
*tp
= tcp_sk(sk
);
3222 tp
->nonagle
|= TCP_NAGLE_CORK
;
3224 tp
->nonagle
&= ~TCP_NAGLE_CORK
;
3225 if (tp
->nonagle
& TCP_NAGLE_OFF
)
3226 tp
->nonagle
|= TCP_NAGLE_PUSH
;
3227 tcp_push_pending_frames(sk
);
3231 void tcp_sock_set_cork(struct sock
*sk
, bool on
)
3234 __tcp_sock_set_cork(sk
, on
);
3237 EXPORT_SYMBOL(tcp_sock_set_cork
);
3239 /* TCP_NODELAY is weaker than TCP_CORK, so that this option on corked socket is
3240 * remembered, but it is not activated until cork is cleared.
3242 * However, when TCP_NODELAY is set we make an explicit push, which overrides
3243 * even TCP_CORK for currently queued segments.
3245 void __tcp_sock_set_nodelay(struct sock
*sk
, bool on
)
3248 tcp_sk(sk
)->nonagle
|= TCP_NAGLE_OFF
|TCP_NAGLE_PUSH
;
3249 tcp_push_pending_frames(sk
);
3251 tcp_sk(sk
)->nonagle
&= ~TCP_NAGLE_OFF
;
3255 void tcp_sock_set_nodelay(struct sock
*sk
)
3258 __tcp_sock_set_nodelay(sk
, true);
3261 EXPORT_SYMBOL(tcp_sock_set_nodelay
);
3263 static void __tcp_sock_set_quickack(struct sock
*sk
, int val
)
3266 inet_csk_enter_pingpong_mode(sk
);
3270 inet_csk_exit_pingpong_mode(sk
);
3271 if ((1 << sk
->sk_state
) & (TCPF_ESTABLISHED
| TCPF_CLOSE_WAIT
) &&
3272 inet_csk_ack_scheduled(sk
)) {
3273 inet_csk(sk
)->icsk_ack
.pending
|= ICSK_ACK_PUSHED
;
3274 tcp_cleanup_rbuf(sk
, 1);
3276 inet_csk_enter_pingpong_mode(sk
);
3280 void tcp_sock_set_quickack(struct sock
*sk
, int val
)
3283 __tcp_sock_set_quickack(sk
, val
);
3286 EXPORT_SYMBOL(tcp_sock_set_quickack
);
3288 int tcp_sock_set_syncnt(struct sock
*sk
, int val
)
3290 if (val
< 1 || val
> MAX_TCP_SYNCNT
)
3293 WRITE_ONCE(inet_csk(sk
)->icsk_syn_retries
, val
);
3296 EXPORT_SYMBOL(tcp_sock_set_syncnt
);
3298 int tcp_sock_set_user_timeout(struct sock
*sk
, int val
)
3300 /* Cap the max time in ms TCP will retry or probe the window
3301 * before giving up and aborting (ETIMEDOUT) a connection.
3306 WRITE_ONCE(inet_csk(sk
)->icsk_user_timeout
, val
);
3309 EXPORT_SYMBOL(tcp_sock_set_user_timeout
);
3311 int tcp_sock_set_keepidle_locked(struct sock
*sk
, int val
)
3313 struct tcp_sock
*tp
= tcp_sk(sk
);
3315 if (val
< 1 || val
> MAX_TCP_KEEPIDLE
)
3318 /* Paired with WRITE_ONCE() in keepalive_time_when() */
3319 WRITE_ONCE(tp
->keepalive_time
, val
* HZ
);
3320 if (sock_flag(sk
, SOCK_KEEPOPEN
) &&
3321 !((1 << sk
->sk_state
) & (TCPF_CLOSE
| TCPF_LISTEN
))) {
3322 u32 elapsed
= keepalive_time_elapsed(tp
);
3324 if (tp
->keepalive_time
> elapsed
)
3325 elapsed
= tp
->keepalive_time
- elapsed
;
3328 inet_csk_reset_keepalive_timer(sk
, elapsed
);
3334 int tcp_sock_set_keepidle(struct sock
*sk
, int val
)
3339 err
= tcp_sock_set_keepidle_locked(sk
, val
);
3343 EXPORT_SYMBOL(tcp_sock_set_keepidle
);
3345 int tcp_sock_set_keepintvl(struct sock
*sk
, int val
)
3347 if (val
< 1 || val
> MAX_TCP_KEEPINTVL
)
3350 WRITE_ONCE(tcp_sk(sk
)->keepalive_intvl
, val
* HZ
);
3353 EXPORT_SYMBOL(tcp_sock_set_keepintvl
);
3355 int tcp_sock_set_keepcnt(struct sock
*sk
, int val
)
3357 if (val
< 1 || val
> MAX_TCP_KEEPCNT
)
3360 /* Paired with READ_ONCE() in keepalive_probes() */
3361 WRITE_ONCE(tcp_sk(sk
)->keepalive_probes
, val
);
3364 EXPORT_SYMBOL(tcp_sock_set_keepcnt
);
3366 int tcp_set_window_clamp(struct sock
*sk
, int val
)
3368 struct tcp_sock
*tp
= tcp_sk(sk
);
3371 if (sk
->sk_state
!= TCP_CLOSE
)
3373 tp
->window_clamp
= 0;
3375 tp
->window_clamp
= val
< SOCK_MIN_RCVBUF
/ 2 ?
3376 SOCK_MIN_RCVBUF
/ 2 : val
;
3377 tp
->rcv_ssthresh
= min(tp
->rcv_wnd
, tp
->window_clamp
);
3383 * Socket option code for TCP.
3385 int do_tcp_setsockopt(struct sock
*sk
, int level
, int optname
,
3386 sockptr_t optval
, unsigned int optlen
)
3388 struct tcp_sock
*tp
= tcp_sk(sk
);
3389 struct inet_connection_sock
*icsk
= inet_csk(sk
);
3390 struct net
*net
= sock_net(sk
);
3394 /* These are data/string values, all the others are ints */
3396 case TCP_CONGESTION
: {
3397 char name
[TCP_CA_NAME_MAX
];
3402 val
= strncpy_from_sockptr(name
, optval
,
3403 min_t(long, TCP_CA_NAME_MAX
-1, optlen
));
3408 sockopt_lock_sock(sk
);
3409 err
= tcp_set_congestion_control(sk
, name
, !has_current_bpf_ctx(),
3410 sockopt_ns_capable(sock_net(sk
)->user_ns
,
3412 sockopt_release_sock(sk
);
3416 char name
[TCP_ULP_NAME_MAX
];
3421 val
= strncpy_from_sockptr(name
, optval
,
3422 min_t(long, TCP_ULP_NAME_MAX
- 1,
3428 sockopt_lock_sock(sk
);
3429 err
= tcp_set_ulp(sk
, name
);
3430 sockopt_release_sock(sk
);
3433 case TCP_FASTOPEN_KEY
: {
3434 __u8 key
[TCP_FASTOPEN_KEY_BUF_LENGTH
];
3435 __u8
*backup_key
= NULL
;
3437 /* Allow a backup key as well to facilitate key rotation
3438 * First key is the active one.
3440 if (optlen
!= TCP_FASTOPEN_KEY_LENGTH
&&
3441 optlen
!= TCP_FASTOPEN_KEY_BUF_LENGTH
)
3444 if (copy_from_sockptr(key
, optval
, optlen
))
3447 if (optlen
== TCP_FASTOPEN_KEY_BUF_LENGTH
)
3448 backup_key
= key
+ TCP_FASTOPEN_KEY_LENGTH
;
3450 return tcp_fastopen_reset_cipher(net
, sk
, key
, backup_key
);
3457 if (optlen
< sizeof(int))
3460 if (copy_from_sockptr(&val
, optval
, sizeof(val
)))
3463 /* Handle options that can be set without locking the socket. */
3466 return tcp_sock_set_syncnt(sk
, val
);
3467 case TCP_USER_TIMEOUT
:
3468 return tcp_sock_set_user_timeout(sk
, val
);
3470 return tcp_sock_set_keepintvl(sk
, val
);
3472 return tcp_sock_set_keepcnt(sk
, val
);
3475 WRITE_ONCE(tp
->linger2
, -1);
3476 else if (val
> TCP_FIN_TIMEOUT_MAX
/ HZ
)
3477 WRITE_ONCE(tp
->linger2
, TCP_FIN_TIMEOUT_MAX
);
3479 WRITE_ONCE(tp
->linger2
, val
* HZ
);
3481 case TCP_DEFER_ACCEPT
:
3482 /* Translate value in seconds to number of retransmits */
3483 WRITE_ONCE(icsk
->icsk_accept_queue
.rskq_defer_accept
,
3484 secs_to_retrans(val
, TCP_TIMEOUT_INIT
/ HZ
,
3489 sockopt_lock_sock(sk
);
3493 /* Values greater than interface MTU won't take effect. However
3494 * at the point when this call is done we typically don't yet
3495 * know which interface is going to be used
3497 if (val
&& (val
< TCP_MIN_MSS
|| val
> MAX_TCP_WINDOW
)) {
3501 tp
->rx_opt
.user_mss
= val
;
3505 __tcp_sock_set_nodelay(sk
, val
);
3508 case TCP_THIN_LINEAR_TIMEOUTS
:
3509 if (val
< 0 || val
> 1)
3515 case TCP_THIN_DUPACK
:
3516 if (val
< 0 || val
> 1)
3521 if (!tcp_can_repair_sock(sk
))
3523 else if (val
== TCP_REPAIR_ON
) {
3525 sk
->sk_reuse
= SK_FORCE_REUSE
;
3526 tp
->repair_queue
= TCP_NO_QUEUE
;
3527 } else if (val
== TCP_REPAIR_OFF
) {
3529 sk
->sk_reuse
= SK_NO_REUSE
;
3530 tcp_send_window_probe(sk
);
3531 } else if (val
== TCP_REPAIR_OFF_NO_WP
) {
3533 sk
->sk_reuse
= SK_NO_REUSE
;
3539 case TCP_REPAIR_QUEUE
:
3542 else if ((unsigned int)val
< TCP_QUEUES_NR
)
3543 tp
->repair_queue
= val
;
3549 if (sk
->sk_state
!= TCP_CLOSE
) {
3551 } else if (tp
->repair_queue
== TCP_SEND_QUEUE
) {
3552 if (!tcp_rtx_queue_empty(sk
))
3555 WRITE_ONCE(tp
->write_seq
, val
);
3556 } else if (tp
->repair_queue
== TCP_RECV_QUEUE
) {
3557 if (tp
->rcv_nxt
!= tp
->copied_seq
) {
3560 WRITE_ONCE(tp
->rcv_nxt
, val
);
3561 WRITE_ONCE(tp
->copied_seq
, val
);
3568 case TCP_REPAIR_OPTIONS
:
3571 else if (sk
->sk_state
== TCP_ESTABLISHED
&& !tp
->bytes_sent
)
3572 err
= tcp_repair_options_est(sk
, optval
, optlen
);
3578 __tcp_sock_set_cork(sk
, val
);
3582 err
= tcp_sock_set_keepidle_locked(sk
, val
);
3585 /* 0: disable, 1: enable, 2: start from ether_header */
3586 if (val
< 0 || val
> 2)
3592 case TCP_WINDOW_CLAMP
:
3593 err
= tcp_set_window_clamp(sk
, val
);
3597 __tcp_sock_set_quickack(sk
, val
);
3600 #ifdef CONFIG_TCP_MD5SIG
3602 case TCP_MD5SIG_EXT
:
3603 err
= tp
->af_specific
->md5_parse(sk
, optname
, optval
, optlen
);
3607 if (val
>= 0 && ((1 << sk
->sk_state
) & (TCPF_CLOSE
|
3609 tcp_fastopen_init_key_once(net
);
3611 fastopen_queue_tune(sk
, val
);
3616 case TCP_FASTOPEN_CONNECT
:
3617 if (val
> 1 || val
< 0) {
3619 } else if (READ_ONCE(net
->ipv4
.sysctl_tcp_fastopen
) &
3620 TFO_CLIENT_ENABLE
) {
3621 if (sk
->sk_state
== TCP_CLOSE
)
3622 tp
->fastopen_connect
= val
;
3629 case TCP_FASTOPEN_NO_COOKIE
:
3630 if (val
> 1 || val
< 0)
3632 else if (!((1 << sk
->sk_state
) & (TCPF_CLOSE
| TCPF_LISTEN
)))
3635 tp
->fastopen_no_cookie
= val
;
3641 WRITE_ONCE(tp
->tsoffset
, val
- tcp_time_stamp_raw());
3643 case TCP_REPAIR_WINDOW
:
3644 err
= tcp_repair_set_window(tp
, optval
, optlen
);
3646 case TCP_NOTSENT_LOWAT
:
3647 WRITE_ONCE(tp
->notsent_lowat
, val
);
3648 sk
->sk_write_space(sk
);
3651 if (val
> 1 || val
< 0)
3654 tp
->recvmsg_inq
= val
;
3658 tcp_enable_tx_delay();
3659 WRITE_ONCE(tp
->tcp_tx_delay
, val
);
3666 sockopt_release_sock(sk
);
3670 int tcp_setsockopt(struct sock
*sk
, int level
, int optname
, sockptr_t optval
,
3671 unsigned int optlen
)
3673 const struct inet_connection_sock
*icsk
= inet_csk(sk
);
3675 if (level
!= SOL_TCP
)
3676 /* Paired with WRITE_ONCE() in do_ipv6_setsockopt() and tcp_v6_connect() */
3677 return READ_ONCE(icsk
->icsk_af_ops
)->setsockopt(sk
, level
, optname
,
3679 return do_tcp_setsockopt(sk
, level
, optname
, optval
, optlen
);
3681 EXPORT_SYMBOL(tcp_setsockopt
);
3683 static void tcp_get_info_chrono_stats(const struct tcp_sock
*tp
,
3684 struct tcp_info
*info
)
3686 u64 stats
[__TCP_CHRONO_MAX
], total
= 0;
3689 for (i
= TCP_CHRONO_BUSY
; i
< __TCP_CHRONO_MAX
; ++i
) {
3690 stats
[i
] = tp
->chrono_stat
[i
- 1];
3691 if (i
== tp
->chrono_type
)
3692 stats
[i
] += tcp_jiffies32
- tp
->chrono_start
;
3693 stats
[i
] *= USEC_PER_SEC
/ HZ
;
3697 info
->tcpi_busy_time
= total
;
3698 info
->tcpi_rwnd_limited
= stats
[TCP_CHRONO_RWND_LIMITED
];
3699 info
->tcpi_sndbuf_limited
= stats
[TCP_CHRONO_SNDBUF_LIMITED
];
3702 /* Return information about state of tcp endpoint in API format. */
3703 void tcp_get_info(struct sock
*sk
, struct tcp_info
*info
)
3705 const struct tcp_sock
*tp
= tcp_sk(sk
); /* iff sk_type == SOCK_STREAM */
3706 const struct inet_connection_sock
*icsk
= inet_csk(sk
);
3712 memset(info
, 0, sizeof(*info
));
3713 if (sk
->sk_type
!= SOCK_STREAM
)
3716 info
->tcpi_state
= inet_sk_state_load(sk
);
3718 /* Report meaningful fields for all TCP states, including listeners */
3719 rate
= READ_ONCE(sk
->sk_pacing_rate
);
3720 rate64
= (rate
!= ~0UL) ? rate
: ~0ULL;
3721 info
->tcpi_pacing_rate
= rate64
;
3723 rate
= READ_ONCE(sk
->sk_max_pacing_rate
);
3724 rate64
= (rate
!= ~0UL) ? rate
: ~0ULL;
3725 info
->tcpi_max_pacing_rate
= rate64
;
3727 info
->tcpi_reordering
= tp
->reordering
;
3728 info
->tcpi_snd_cwnd
= tcp_snd_cwnd(tp
);
3730 if (info
->tcpi_state
== TCP_LISTEN
) {
3731 /* listeners aliased fields :
3732 * tcpi_unacked -> Number of children ready for accept()
3733 * tcpi_sacked -> max backlog
3735 info
->tcpi_unacked
= READ_ONCE(sk
->sk_ack_backlog
);
3736 info
->tcpi_sacked
= READ_ONCE(sk
->sk_max_ack_backlog
);
3740 slow
= lock_sock_fast(sk
);
3742 info
->tcpi_ca_state
= icsk
->icsk_ca_state
;
3743 info
->tcpi_retransmits
= icsk
->icsk_retransmits
;
3744 info
->tcpi_probes
= icsk
->icsk_probes_out
;
3745 info
->tcpi_backoff
= icsk
->icsk_backoff
;
3747 if (tp
->rx_opt
.tstamp_ok
)
3748 info
->tcpi_options
|= TCPI_OPT_TIMESTAMPS
;
3749 if (tcp_is_sack(tp
))
3750 info
->tcpi_options
|= TCPI_OPT_SACK
;
3751 if (tp
->rx_opt
.wscale_ok
) {
3752 info
->tcpi_options
|= TCPI_OPT_WSCALE
;
3753 info
->tcpi_snd_wscale
= tp
->rx_opt
.snd_wscale
;
3754 info
->tcpi_rcv_wscale
= tp
->rx_opt
.rcv_wscale
;
3757 if (tp
->ecn_flags
& TCP_ECN_OK
)
3758 info
->tcpi_options
|= TCPI_OPT_ECN
;
3759 if (tp
->ecn_flags
& TCP_ECN_SEEN
)
3760 info
->tcpi_options
|= TCPI_OPT_ECN_SEEN
;
3761 if (tp
->syn_data_acked
)
3762 info
->tcpi_options
|= TCPI_OPT_SYN_DATA
;
3764 info
->tcpi_rto
= jiffies_to_usecs(icsk
->icsk_rto
);
3765 info
->tcpi_ato
= jiffies_to_usecs(icsk
->icsk_ack
.ato
);
3766 info
->tcpi_snd_mss
= tp
->mss_cache
;
3767 info
->tcpi_rcv_mss
= icsk
->icsk_ack
.rcv_mss
;
3769 info
->tcpi_unacked
= tp
->packets_out
;
3770 info
->tcpi_sacked
= tp
->sacked_out
;
3772 info
->tcpi_lost
= tp
->lost_out
;
3773 info
->tcpi_retrans
= tp
->retrans_out
;
3775 now
= tcp_jiffies32
;
3776 info
->tcpi_last_data_sent
= jiffies_to_msecs(now
- tp
->lsndtime
);
3777 info
->tcpi_last_data_recv
= jiffies_to_msecs(now
- icsk
->icsk_ack
.lrcvtime
);
3778 info
->tcpi_last_ack_recv
= jiffies_to_msecs(now
- tp
->rcv_tstamp
);
3780 info
->tcpi_pmtu
= icsk
->icsk_pmtu_cookie
;
3781 info
->tcpi_rcv_ssthresh
= tp
->rcv_ssthresh
;
3782 info
->tcpi_rtt
= tp
->srtt_us
>> 3;
3783 info
->tcpi_rttvar
= tp
->mdev_us
>> 2;
3784 info
->tcpi_snd_ssthresh
= tp
->snd_ssthresh
;
3785 info
->tcpi_advmss
= tp
->advmss
;
3787 info
->tcpi_rcv_rtt
= tp
->rcv_rtt_est
.rtt_us
>> 3;
3788 info
->tcpi_rcv_space
= tp
->rcvq_space
.space
;
3790 info
->tcpi_total_retrans
= tp
->total_retrans
;
3792 info
->tcpi_bytes_acked
= tp
->bytes_acked
;
3793 info
->tcpi_bytes_received
= tp
->bytes_received
;
3794 info
->tcpi_notsent_bytes
= max_t(int, 0, tp
->write_seq
- tp
->snd_nxt
);
3795 tcp_get_info_chrono_stats(tp
, info
);
3797 info
->tcpi_segs_out
= tp
->segs_out
;
3799 /* segs_in and data_segs_in can be updated from tcp_segs_in() from BH */
3800 info
->tcpi_segs_in
= READ_ONCE(tp
->segs_in
);
3801 info
->tcpi_data_segs_in
= READ_ONCE(tp
->data_segs_in
);
3803 info
->tcpi_min_rtt
= tcp_min_rtt(tp
);
3804 info
->tcpi_data_segs_out
= tp
->data_segs_out
;
3806 info
->tcpi_delivery_rate_app_limited
= tp
->rate_app_limited
? 1 : 0;
3807 rate64
= tcp_compute_delivery_rate(tp
);
3809 info
->tcpi_delivery_rate
= rate64
;
3810 info
->tcpi_delivered
= tp
->delivered
;
3811 info
->tcpi_delivered_ce
= tp
->delivered_ce
;
3812 info
->tcpi_bytes_sent
= tp
->bytes_sent
;
3813 info
->tcpi_bytes_retrans
= tp
->bytes_retrans
;
3814 info
->tcpi_dsack_dups
= tp
->dsack_dups
;
3815 info
->tcpi_reord_seen
= tp
->reord_seen
;
3816 info
->tcpi_rcv_ooopack
= tp
->rcv_ooopack
;
3817 info
->tcpi_snd_wnd
= tp
->snd_wnd
;
3818 info
->tcpi_rcv_wnd
= tp
->rcv_wnd
;
3819 info
->tcpi_rehash
= tp
->plb_rehash
+ tp
->timeout_rehash
;
3820 info
->tcpi_fastopen_client_fail
= tp
->fastopen_client_fail
;
3821 unlock_sock_fast(sk
, slow
);
3823 EXPORT_SYMBOL_GPL(tcp_get_info
);
3825 static size_t tcp_opt_stats_get_size(void)
3828 nla_total_size_64bit(sizeof(u64
)) + /* TCP_NLA_BUSY */
3829 nla_total_size_64bit(sizeof(u64
)) + /* TCP_NLA_RWND_LIMITED */
3830 nla_total_size_64bit(sizeof(u64
)) + /* TCP_NLA_SNDBUF_LIMITED */
3831 nla_total_size_64bit(sizeof(u64
)) + /* TCP_NLA_DATA_SEGS_OUT */
3832 nla_total_size_64bit(sizeof(u64
)) + /* TCP_NLA_TOTAL_RETRANS */
3833 nla_total_size_64bit(sizeof(u64
)) + /* TCP_NLA_PACING_RATE */
3834 nla_total_size_64bit(sizeof(u64
)) + /* TCP_NLA_DELIVERY_RATE */
3835 nla_total_size(sizeof(u32
)) + /* TCP_NLA_SND_CWND */
3836 nla_total_size(sizeof(u32
)) + /* TCP_NLA_REORDERING */
3837 nla_total_size(sizeof(u32
)) + /* TCP_NLA_MIN_RTT */
3838 nla_total_size(sizeof(u8
)) + /* TCP_NLA_RECUR_RETRANS */
3839 nla_total_size(sizeof(u8
)) + /* TCP_NLA_DELIVERY_RATE_APP_LMT */
3840 nla_total_size(sizeof(u32
)) + /* TCP_NLA_SNDQ_SIZE */
3841 nla_total_size(sizeof(u8
)) + /* TCP_NLA_CA_STATE */
3842 nla_total_size(sizeof(u32
)) + /* TCP_NLA_SND_SSTHRESH */
3843 nla_total_size(sizeof(u32
)) + /* TCP_NLA_DELIVERED */
3844 nla_total_size(sizeof(u32
)) + /* TCP_NLA_DELIVERED_CE */
3845 nla_total_size_64bit(sizeof(u64
)) + /* TCP_NLA_BYTES_SENT */
3846 nla_total_size_64bit(sizeof(u64
)) + /* TCP_NLA_BYTES_RETRANS */
3847 nla_total_size(sizeof(u32
)) + /* TCP_NLA_DSACK_DUPS */
3848 nla_total_size(sizeof(u32
)) + /* TCP_NLA_REORD_SEEN */
3849 nla_total_size(sizeof(u32
)) + /* TCP_NLA_SRTT */
3850 nla_total_size(sizeof(u16
)) + /* TCP_NLA_TIMEOUT_REHASH */
3851 nla_total_size(sizeof(u32
)) + /* TCP_NLA_BYTES_NOTSENT */
3852 nla_total_size_64bit(sizeof(u64
)) + /* TCP_NLA_EDT */
3853 nla_total_size(sizeof(u8
)) + /* TCP_NLA_TTL */
3854 nla_total_size(sizeof(u32
)) + /* TCP_NLA_REHASH */
3858 /* Returns TTL or hop limit of an incoming packet from skb. */
3859 static u8
tcp_skb_ttl_or_hop_limit(const struct sk_buff
*skb
)
3861 if (skb
->protocol
== htons(ETH_P_IP
))
3862 return ip_hdr(skb
)->ttl
;
3863 else if (skb
->protocol
== htons(ETH_P_IPV6
))
3864 return ipv6_hdr(skb
)->hop_limit
;
3869 struct sk_buff
*tcp_get_timestamping_opt_stats(const struct sock
*sk
,
3870 const struct sk_buff
*orig_skb
,
3871 const struct sk_buff
*ack_skb
)
3873 const struct tcp_sock
*tp
= tcp_sk(sk
);
3874 struct sk_buff
*stats
;
3875 struct tcp_info info
;
3879 stats
= alloc_skb(tcp_opt_stats_get_size(), GFP_ATOMIC
);
3883 tcp_get_info_chrono_stats(tp
, &info
);
3884 nla_put_u64_64bit(stats
, TCP_NLA_BUSY
,
3885 info
.tcpi_busy_time
, TCP_NLA_PAD
);
3886 nla_put_u64_64bit(stats
, TCP_NLA_RWND_LIMITED
,
3887 info
.tcpi_rwnd_limited
, TCP_NLA_PAD
);
3888 nla_put_u64_64bit(stats
, TCP_NLA_SNDBUF_LIMITED
,
3889 info
.tcpi_sndbuf_limited
, TCP_NLA_PAD
);
3890 nla_put_u64_64bit(stats
, TCP_NLA_DATA_SEGS_OUT
,
3891 tp
->data_segs_out
, TCP_NLA_PAD
);
3892 nla_put_u64_64bit(stats
, TCP_NLA_TOTAL_RETRANS
,
3893 tp
->total_retrans
, TCP_NLA_PAD
);
3895 rate
= READ_ONCE(sk
->sk_pacing_rate
);
3896 rate64
= (rate
!= ~0UL) ? rate
: ~0ULL;
3897 nla_put_u64_64bit(stats
, TCP_NLA_PACING_RATE
, rate64
, TCP_NLA_PAD
);
3899 rate64
= tcp_compute_delivery_rate(tp
);
3900 nla_put_u64_64bit(stats
, TCP_NLA_DELIVERY_RATE
, rate64
, TCP_NLA_PAD
);
3902 nla_put_u32(stats
, TCP_NLA_SND_CWND
, tcp_snd_cwnd(tp
));
3903 nla_put_u32(stats
, TCP_NLA_REORDERING
, tp
->reordering
);
3904 nla_put_u32(stats
, TCP_NLA_MIN_RTT
, tcp_min_rtt(tp
));
3906 nla_put_u8(stats
, TCP_NLA_RECUR_RETRANS
, inet_csk(sk
)->icsk_retransmits
);
3907 nla_put_u8(stats
, TCP_NLA_DELIVERY_RATE_APP_LMT
, !!tp
->rate_app_limited
);
3908 nla_put_u32(stats
, TCP_NLA_SND_SSTHRESH
, tp
->snd_ssthresh
);
3909 nla_put_u32(stats
, TCP_NLA_DELIVERED
, tp
->delivered
);
3910 nla_put_u32(stats
, TCP_NLA_DELIVERED_CE
, tp
->delivered_ce
);
3912 nla_put_u32(stats
, TCP_NLA_SNDQ_SIZE
, tp
->write_seq
- tp
->snd_una
);
3913 nla_put_u8(stats
, TCP_NLA_CA_STATE
, inet_csk(sk
)->icsk_ca_state
);
3915 nla_put_u64_64bit(stats
, TCP_NLA_BYTES_SENT
, tp
->bytes_sent
,
3917 nla_put_u64_64bit(stats
, TCP_NLA_BYTES_RETRANS
, tp
->bytes_retrans
,
3919 nla_put_u32(stats
, TCP_NLA_DSACK_DUPS
, tp
->dsack_dups
);
3920 nla_put_u32(stats
, TCP_NLA_REORD_SEEN
, tp
->reord_seen
);
3921 nla_put_u32(stats
, TCP_NLA_SRTT
, tp
->srtt_us
>> 3);
3922 nla_put_u16(stats
, TCP_NLA_TIMEOUT_REHASH
, tp
->timeout_rehash
);
3923 nla_put_u32(stats
, TCP_NLA_BYTES_NOTSENT
,
3924 max_t(int, 0, tp
->write_seq
- tp
->snd_nxt
));
3925 nla_put_u64_64bit(stats
, TCP_NLA_EDT
, orig_skb
->skb_mstamp_ns
,
3928 nla_put_u8(stats
, TCP_NLA_TTL
,
3929 tcp_skb_ttl_or_hop_limit(ack_skb
));
3931 nla_put_u32(stats
, TCP_NLA_REHASH
, tp
->plb_rehash
+ tp
->timeout_rehash
);
3935 int do_tcp_getsockopt(struct sock
*sk
, int level
,
3936 int optname
, sockptr_t optval
, sockptr_t optlen
)
3938 struct inet_connection_sock
*icsk
= inet_csk(sk
);
3939 struct tcp_sock
*tp
= tcp_sk(sk
);
3940 struct net
*net
= sock_net(sk
);
3943 if (copy_from_sockptr(&len
, optlen
, sizeof(int)))
3946 len
= min_t(unsigned int, len
, sizeof(int));
3953 val
= tp
->mss_cache
;
3954 if (tp
->rx_opt
.user_mss
&&
3955 ((1 << sk
->sk_state
) & (TCPF_CLOSE
| TCPF_LISTEN
)))
3956 val
= tp
->rx_opt
.user_mss
;
3958 val
= tp
->rx_opt
.mss_clamp
;
3961 val
= !!(tp
->nonagle
&TCP_NAGLE_OFF
);
3964 val
= !!(tp
->nonagle
&TCP_NAGLE_CORK
);
3967 val
= keepalive_time_when(tp
) / HZ
;
3970 val
= keepalive_intvl_when(tp
) / HZ
;
3973 val
= keepalive_probes(tp
);
3976 val
= READ_ONCE(icsk
->icsk_syn_retries
) ? :
3977 READ_ONCE(net
->ipv4
.sysctl_tcp_syn_retries
);
3980 val
= READ_ONCE(tp
->linger2
);
3982 val
= (val
? : READ_ONCE(net
->ipv4
.sysctl_tcp_fin_timeout
)) / HZ
;
3984 case TCP_DEFER_ACCEPT
:
3985 val
= READ_ONCE(icsk
->icsk_accept_queue
.rskq_defer_accept
);
3986 val
= retrans_to_secs(val
, TCP_TIMEOUT_INIT
/ HZ
,
3989 case TCP_WINDOW_CLAMP
:
3990 val
= tp
->window_clamp
;
3993 struct tcp_info info
;
3995 if (copy_from_sockptr(&len
, optlen
, sizeof(int)))
3998 tcp_get_info(sk
, &info
);
4000 len
= min_t(unsigned int, len
, sizeof(info
));
4001 if (copy_to_sockptr(optlen
, &len
, sizeof(int)))
4003 if (copy_to_sockptr(optval
, &info
, len
))
4008 const struct tcp_congestion_ops
*ca_ops
;
4009 union tcp_cc_info info
;
4013 if (copy_from_sockptr(&len
, optlen
, sizeof(int)))
4016 ca_ops
= icsk
->icsk_ca_ops
;
4017 if (ca_ops
&& ca_ops
->get_info
)
4018 sz
= ca_ops
->get_info(sk
, ~0U, &attr
, &info
);
4020 len
= min_t(unsigned int, len
, sz
);
4021 if (copy_to_sockptr(optlen
, &len
, sizeof(int)))
4023 if (copy_to_sockptr(optval
, &info
, len
))
4028 val
= !inet_csk_in_pingpong_mode(sk
);
4031 case TCP_CONGESTION
:
4032 if (copy_from_sockptr(&len
, optlen
, sizeof(int)))
4034 len
= min_t(unsigned int, len
, TCP_CA_NAME_MAX
);
4035 if (copy_to_sockptr(optlen
, &len
, sizeof(int)))
4037 if (copy_to_sockptr(optval
, icsk
->icsk_ca_ops
->name
, len
))
4042 if (copy_from_sockptr(&len
, optlen
, sizeof(int)))
4044 len
= min_t(unsigned int, len
, TCP_ULP_NAME_MAX
);
4045 if (!icsk
->icsk_ulp_ops
) {
4047 if (copy_to_sockptr(optlen
, &len
, sizeof(int)))
4051 if (copy_to_sockptr(optlen
, &len
, sizeof(int)))
4053 if (copy_to_sockptr(optval
, icsk
->icsk_ulp_ops
->name
, len
))
4057 case TCP_FASTOPEN_KEY
: {
4058 u64 key
[TCP_FASTOPEN_KEY_BUF_LENGTH
/ sizeof(u64
)];
4059 unsigned int key_len
;
4061 if (copy_from_sockptr(&len
, optlen
, sizeof(int)))
4064 key_len
= tcp_fastopen_get_cipher(net
, icsk
, key
) *
4065 TCP_FASTOPEN_KEY_LENGTH
;
4066 len
= min_t(unsigned int, len
, key_len
);
4067 if (copy_to_sockptr(optlen
, &len
, sizeof(int)))
4069 if (copy_to_sockptr(optval
, key
, len
))
4073 case TCP_THIN_LINEAR_TIMEOUTS
:
4077 case TCP_THIN_DUPACK
:
4085 case TCP_REPAIR_QUEUE
:
4087 val
= tp
->repair_queue
;
4092 case TCP_REPAIR_WINDOW
: {
4093 struct tcp_repair_window opt
;
4095 if (copy_from_sockptr(&len
, optlen
, sizeof(int)))
4098 if (len
!= sizeof(opt
))
4104 opt
.snd_wl1
= tp
->snd_wl1
;
4105 opt
.snd_wnd
= tp
->snd_wnd
;
4106 opt
.max_window
= tp
->max_window
;
4107 opt
.rcv_wnd
= tp
->rcv_wnd
;
4108 opt
.rcv_wup
= tp
->rcv_wup
;
4110 if (copy_to_sockptr(optval
, &opt
, len
))
4115 if (tp
->repair_queue
== TCP_SEND_QUEUE
)
4116 val
= tp
->write_seq
;
4117 else if (tp
->repair_queue
== TCP_RECV_QUEUE
)
4123 case TCP_USER_TIMEOUT
:
4124 val
= READ_ONCE(icsk
->icsk_user_timeout
);
4128 val
= READ_ONCE(icsk
->icsk_accept_queue
.fastopenq
.max_qlen
);
4131 case TCP_FASTOPEN_CONNECT
:
4132 val
= tp
->fastopen_connect
;
4135 case TCP_FASTOPEN_NO_COOKIE
:
4136 val
= tp
->fastopen_no_cookie
;
4140 val
= READ_ONCE(tp
->tcp_tx_delay
);
4144 val
= tcp_time_stamp_raw() + READ_ONCE(tp
->tsoffset
);
4146 case TCP_NOTSENT_LOWAT
:
4147 val
= READ_ONCE(tp
->notsent_lowat
);
4150 val
= tp
->recvmsg_inq
;
4155 case TCP_SAVED_SYN
: {
4156 if (copy_from_sockptr(&len
, optlen
, sizeof(int)))
4159 sockopt_lock_sock(sk
);
4160 if (tp
->saved_syn
) {
4161 if (len
< tcp_saved_syn_len(tp
->saved_syn
)) {
4162 len
= tcp_saved_syn_len(tp
->saved_syn
);
4163 if (copy_to_sockptr(optlen
, &len
, sizeof(int))) {
4164 sockopt_release_sock(sk
);
4167 sockopt_release_sock(sk
);
4170 len
= tcp_saved_syn_len(tp
->saved_syn
);
4171 if (copy_to_sockptr(optlen
, &len
, sizeof(int))) {
4172 sockopt_release_sock(sk
);
4175 if (copy_to_sockptr(optval
, tp
->saved_syn
->data
, len
)) {
4176 sockopt_release_sock(sk
);
4179 tcp_saved_syn_free(tp
);
4180 sockopt_release_sock(sk
);
4182 sockopt_release_sock(sk
);
4184 if (copy_to_sockptr(optlen
, &len
, sizeof(int)))
4190 case TCP_ZEROCOPY_RECEIVE
: {
4191 struct scm_timestamping_internal tss
;
4192 struct tcp_zerocopy_receive zc
= {};
4195 if (copy_from_sockptr(&len
, optlen
, sizeof(int)))
4198 len
< offsetofend(struct tcp_zerocopy_receive
, length
))
4200 if (unlikely(len
> sizeof(zc
))) {
4201 err
= check_zeroed_sockptr(optval
, sizeof(zc
),
4204 return err
== 0 ? -EINVAL
: err
;
4206 if (copy_to_sockptr(optlen
, &len
, sizeof(int)))
4209 if (copy_from_sockptr(&zc
, optval
, len
))
4213 if (zc
.msg_flags
& ~(TCP_VALID_ZC_MSG_FLAGS
))
4215 sockopt_lock_sock(sk
);
4216 err
= tcp_zerocopy_receive(sk
, &zc
, &tss
);
4217 err
= BPF_CGROUP_RUN_PROG_GETSOCKOPT_KERN(sk
, level
, optname
,
4219 sockopt_release_sock(sk
);
4220 if (len
>= offsetofend(struct tcp_zerocopy_receive
, msg_flags
))
4221 goto zerocopy_rcv_cmsg
;
4223 case offsetofend(struct tcp_zerocopy_receive
, msg_flags
):
4224 goto zerocopy_rcv_cmsg
;
4225 case offsetofend(struct tcp_zerocopy_receive
, msg_controllen
):
4226 case offsetofend(struct tcp_zerocopy_receive
, msg_control
):
4227 case offsetofend(struct tcp_zerocopy_receive
, flags
):
4228 case offsetofend(struct tcp_zerocopy_receive
, copybuf_len
):
4229 case offsetofend(struct tcp_zerocopy_receive
, copybuf_address
):
4230 case offsetofend(struct tcp_zerocopy_receive
, err
):
4231 goto zerocopy_rcv_sk_err
;
4232 case offsetofend(struct tcp_zerocopy_receive
, inq
):
4233 goto zerocopy_rcv_inq
;
4234 case offsetofend(struct tcp_zerocopy_receive
, length
):
4236 goto zerocopy_rcv_out
;
4239 if (zc
.msg_flags
& TCP_CMSG_TS
)
4240 tcp_zc_finalize_rx_tstamp(sk
, &zc
, &tss
);
4243 zerocopy_rcv_sk_err
:
4245 zc
.err
= sock_error(sk
);
4247 zc
.inq
= tcp_inq_hint(sk
);
4249 if (!err
&& copy_to_sockptr(optval
, &zc
, len
))
4255 return -ENOPROTOOPT
;
4258 if (copy_to_sockptr(optlen
, &len
, sizeof(int)))
4260 if (copy_to_sockptr(optval
, &val
, len
))
4265 bool tcp_bpf_bypass_getsockopt(int level
, int optname
)
4267 /* TCP do_tcp_getsockopt has optimized getsockopt implementation
4268 * to avoid extra socket lock for TCP_ZEROCOPY_RECEIVE.
4270 if (level
== SOL_TCP
&& optname
== TCP_ZEROCOPY_RECEIVE
)
4275 EXPORT_SYMBOL(tcp_bpf_bypass_getsockopt
);
4277 int tcp_getsockopt(struct sock
*sk
, int level
, int optname
, char __user
*optval
,
4280 struct inet_connection_sock
*icsk
= inet_csk(sk
);
4282 if (level
!= SOL_TCP
)
4283 /* Paired with WRITE_ONCE() in do_ipv6_setsockopt() and tcp_v6_connect() */
4284 return READ_ONCE(icsk
->icsk_af_ops
)->getsockopt(sk
, level
, optname
,
4286 return do_tcp_getsockopt(sk
, level
, optname
, USER_SOCKPTR(optval
),
4287 USER_SOCKPTR(optlen
));
4289 EXPORT_SYMBOL(tcp_getsockopt
);
4291 #ifdef CONFIG_TCP_MD5SIG
4292 static DEFINE_PER_CPU(struct tcp_md5sig_pool
, tcp_md5sig_pool
);
4293 static DEFINE_MUTEX(tcp_md5sig_mutex
);
4294 static bool tcp_md5sig_pool_populated
= false;
4296 static void __tcp_alloc_md5sig_pool(void)
4298 struct crypto_ahash
*hash
;
4301 hash
= crypto_alloc_ahash("md5", 0, CRYPTO_ALG_ASYNC
);
4305 for_each_possible_cpu(cpu
) {
4306 void *scratch
= per_cpu(tcp_md5sig_pool
, cpu
).scratch
;
4307 struct ahash_request
*req
;
4310 scratch
= kmalloc_node(sizeof(union tcp_md5sum_block
) +
4311 sizeof(struct tcphdr
),
4316 per_cpu(tcp_md5sig_pool
, cpu
).scratch
= scratch
;
4318 if (per_cpu(tcp_md5sig_pool
, cpu
).md5_req
)
4321 req
= ahash_request_alloc(hash
, GFP_KERNEL
);
4325 ahash_request_set_callback(req
, 0, NULL
, NULL
);
4327 per_cpu(tcp_md5sig_pool
, cpu
).md5_req
= req
;
4329 /* before setting tcp_md5sig_pool_populated, we must commit all writes
4330 * to memory. See smp_rmb() in tcp_get_md5sig_pool()
4333 /* Paired with READ_ONCE() from tcp_alloc_md5sig_pool()
4334 * and tcp_get_md5sig_pool().
4336 WRITE_ONCE(tcp_md5sig_pool_populated
, true);
4339 bool tcp_alloc_md5sig_pool(void)
4341 /* Paired with WRITE_ONCE() from __tcp_alloc_md5sig_pool() */
4342 if (unlikely(!READ_ONCE(tcp_md5sig_pool_populated
))) {
4343 mutex_lock(&tcp_md5sig_mutex
);
4345 if (!tcp_md5sig_pool_populated
)
4346 __tcp_alloc_md5sig_pool();
4348 mutex_unlock(&tcp_md5sig_mutex
);
4350 /* Paired with WRITE_ONCE() from __tcp_alloc_md5sig_pool() */
4351 return READ_ONCE(tcp_md5sig_pool_populated
);
4353 EXPORT_SYMBOL(tcp_alloc_md5sig_pool
);
4357 * tcp_get_md5sig_pool - get md5sig_pool for this user
4359 * We use percpu structure, so if we succeed, we exit with preemption
4360 * and BH disabled, to make sure another thread or softirq handling
4361 * wont try to get same context.
4363 struct tcp_md5sig_pool
*tcp_get_md5sig_pool(void)
4367 /* Paired with WRITE_ONCE() from __tcp_alloc_md5sig_pool() */
4368 if (READ_ONCE(tcp_md5sig_pool_populated
)) {
4369 /* coupled with smp_wmb() in __tcp_alloc_md5sig_pool() */
4371 return this_cpu_ptr(&tcp_md5sig_pool
);
4376 EXPORT_SYMBOL(tcp_get_md5sig_pool
);
4378 int tcp_md5_hash_skb_data(struct tcp_md5sig_pool
*hp
,
4379 const struct sk_buff
*skb
, unsigned int header_len
)
4381 struct scatterlist sg
;
4382 const struct tcphdr
*tp
= tcp_hdr(skb
);
4383 struct ahash_request
*req
= hp
->md5_req
;
4385 const unsigned int head_data_len
= skb_headlen(skb
) > header_len
?
4386 skb_headlen(skb
) - header_len
: 0;
4387 const struct skb_shared_info
*shi
= skb_shinfo(skb
);
4388 struct sk_buff
*frag_iter
;
4390 sg_init_table(&sg
, 1);
4392 sg_set_buf(&sg
, ((u8
*) tp
) + header_len
, head_data_len
);
4393 ahash_request_set_crypt(req
, &sg
, NULL
, head_data_len
);
4394 if (crypto_ahash_update(req
))
4397 for (i
= 0; i
< shi
->nr_frags
; ++i
) {
4398 const skb_frag_t
*f
= &shi
->frags
[i
];
4399 unsigned int offset
= skb_frag_off(f
);
4400 struct page
*page
= skb_frag_page(f
) + (offset
>> PAGE_SHIFT
);
4402 sg_set_page(&sg
, page
, skb_frag_size(f
),
4403 offset_in_page(offset
));
4404 ahash_request_set_crypt(req
, &sg
, NULL
, skb_frag_size(f
));
4405 if (crypto_ahash_update(req
))
4409 skb_walk_frags(skb
, frag_iter
)
4410 if (tcp_md5_hash_skb_data(hp
, frag_iter
, 0))
4415 EXPORT_SYMBOL(tcp_md5_hash_skb_data
);
4417 int tcp_md5_hash_key(struct tcp_md5sig_pool
*hp
, const struct tcp_md5sig_key
*key
)
4419 u8 keylen
= READ_ONCE(key
->keylen
); /* paired with WRITE_ONCE() in tcp_md5_do_add */
4420 struct scatterlist sg
;
4422 sg_init_one(&sg
, key
->key
, keylen
);
4423 ahash_request_set_crypt(hp
->md5_req
, &sg
, NULL
, keylen
);
4425 /* We use data_race() because tcp_md5_do_add() might change key->key under us */
4426 return data_race(crypto_ahash_update(hp
->md5_req
));
4428 EXPORT_SYMBOL(tcp_md5_hash_key
);
4430 /* Called with rcu_read_lock() */
4431 enum skb_drop_reason
4432 tcp_inbound_md5_hash(const struct sock
*sk
, const struct sk_buff
*skb
,
4433 const void *saddr
, const void *daddr
,
4434 int family
, int dif
, int sdif
)
4437 * This gets called for each TCP segment that arrives
4438 * so we want to be efficient.
4439 * We have 3 drop cases:
4440 * o No MD5 hash and one expected.
4441 * o MD5 hash and we're not expecting one.
4442 * o MD5 hash and its wrong.
4444 const __u8
*hash_location
= NULL
;
4445 struct tcp_md5sig_key
*hash_expected
;
4446 const struct tcphdr
*th
= tcp_hdr(skb
);
4447 const struct tcp_sock
*tp
= tcp_sk(sk
);
4448 int genhash
, l3index
;
4451 /* sdif set, means packet ingressed via a device
4452 * in an L3 domain and dif is set to the l3mdev
4454 l3index
= sdif
? dif
: 0;
4456 hash_expected
= tcp_md5_do_lookup(sk
, l3index
, saddr
, family
);
4457 hash_location
= tcp_parse_md5sig_option(th
);
4459 /* We've parsed the options - do we have a hash? */
4460 if (!hash_expected
&& !hash_location
)
4461 return SKB_NOT_DROPPED_YET
;
4463 if (hash_expected
&& !hash_location
) {
4464 NET_INC_STATS(sock_net(sk
), LINUX_MIB_TCPMD5NOTFOUND
);
4465 return SKB_DROP_REASON_TCP_MD5NOTFOUND
;
4468 if (!hash_expected
&& hash_location
) {
4469 NET_INC_STATS(sock_net(sk
), LINUX_MIB_TCPMD5UNEXPECTED
);
4470 return SKB_DROP_REASON_TCP_MD5UNEXPECTED
;
4473 /* Check the signature.
4474 * To support dual stack listeners, we need to handle
4477 if (family
== AF_INET
)
4478 genhash
= tcp_v4_md5_hash_skb(newhash
,
4482 genhash
= tp
->af_specific
->calc_md5_hash(newhash
,
4486 if (genhash
|| memcmp(hash_location
, newhash
, 16) != 0) {
4487 NET_INC_STATS(sock_net(sk
), LINUX_MIB_TCPMD5FAILURE
);
4488 if (family
== AF_INET
) {
4489 net_info_ratelimited("MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s L3 index %d\n",
4490 saddr
, ntohs(th
->source
),
4491 daddr
, ntohs(th
->dest
),
4492 genhash
? " tcp_v4_calc_md5_hash failed"
4495 net_info_ratelimited("MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u L3 index %d\n",
4496 genhash
? "failed" : "mismatch",
4497 saddr
, ntohs(th
->source
),
4498 daddr
, ntohs(th
->dest
), l3index
);
4500 return SKB_DROP_REASON_TCP_MD5FAILURE
;
4502 return SKB_NOT_DROPPED_YET
;
4504 EXPORT_SYMBOL(tcp_inbound_md5_hash
);
4508 void tcp_done(struct sock
*sk
)
4510 struct request_sock
*req
;
4512 /* We might be called with a new socket, after
4513 * inet_csk_prepare_forced_close() has been called
4514 * so we can not use lockdep_sock_is_held(sk)
4516 req
= rcu_dereference_protected(tcp_sk(sk
)->fastopen_rsk
, 1);
4518 if (sk
->sk_state
== TCP_SYN_SENT
|| sk
->sk_state
== TCP_SYN_RECV
)
4519 TCP_INC_STATS(sock_net(sk
), TCP_MIB_ATTEMPTFAILS
);
4521 tcp_set_state(sk
, TCP_CLOSE
);
4522 tcp_clear_xmit_timers(sk
);
4524 reqsk_fastopen_remove(sk
, req
, false);
4526 WRITE_ONCE(sk
->sk_shutdown
, SHUTDOWN_MASK
);
4528 if (!sock_flag(sk
, SOCK_DEAD
))
4529 sk
->sk_state_change(sk
);
4531 inet_csk_destroy_sock(sk
);
4533 EXPORT_SYMBOL_GPL(tcp_done
);
4535 int tcp_abort(struct sock
*sk
, int err
)
4537 int state
= inet_sk_state_load(sk
);
4539 if (state
== TCP_NEW_SYN_RECV
) {
4540 struct request_sock
*req
= inet_reqsk(sk
);
4543 inet_csk_reqsk_queue_drop(req
->rsk_listener
, req
);
4547 if (state
== TCP_TIME_WAIT
) {
4548 struct inet_timewait_sock
*tw
= inet_twsk(sk
);
4550 refcount_inc(&tw
->tw_refcnt
);
4552 inet_twsk_deschedule_put(tw
);
4557 /* BPF context ensures sock locking. */
4558 if (!has_current_bpf_ctx())
4559 /* Don't race with userspace socket closes such as tcp_close. */
4562 if (sk
->sk_state
== TCP_LISTEN
) {
4563 tcp_set_state(sk
, TCP_CLOSE
);
4564 inet_csk_listen_stop(sk
);
4567 /* Don't race with BH socket closes such as inet_csk_listen_stop. */
4571 if (!sock_flag(sk
, SOCK_DEAD
)) {
4572 WRITE_ONCE(sk
->sk_err
, err
);
4573 /* This barrier is coupled with smp_rmb() in tcp_poll() */
4575 sk_error_report(sk
);
4576 if (tcp_need_reset(sk
->sk_state
))
4577 tcp_send_active_reset(sk
, GFP_ATOMIC
);
4583 tcp_write_queue_purge(sk
);
4584 if (!has_current_bpf_ctx())
4588 EXPORT_SYMBOL_GPL(tcp_abort
);
4590 extern struct tcp_congestion_ops tcp_reno
;
4592 static __initdata
unsigned long thash_entries
;
4593 static int __init
set_thash_entries(char *str
)
4600 ret
= kstrtoul(str
, 0, &thash_entries
);
4606 __setup("thash_entries=", set_thash_entries
);
4608 static void __init
tcp_init_mem(void)
4610 unsigned long limit
= nr_free_buffer_pages() / 16;
4612 limit
= max(limit
, 128UL);
4613 sysctl_tcp_mem
[0] = limit
/ 4 * 3; /* 4.68 % */
4614 sysctl_tcp_mem
[1] = limit
; /* 6.25 % */
4615 sysctl_tcp_mem
[2] = sysctl_tcp_mem
[0] * 2; /* 9.37 % */
4618 void __init
tcp_init(void)
4620 int max_rshare
, max_wshare
, cnt
;
4621 unsigned long limit
;
4624 BUILD_BUG_ON(TCP_MIN_SND_MSS
<= MAX_TCP_OPTION_SPACE
);
4625 BUILD_BUG_ON(sizeof(struct tcp_skb_cb
) >
4626 sizeof_field(struct sk_buff
, cb
));
4628 percpu_counter_init(&tcp_sockets_allocated
, 0, GFP_KERNEL
);
4630 timer_setup(&tcp_orphan_timer
, tcp_orphan_update
, TIMER_DEFERRABLE
);
4631 mod_timer(&tcp_orphan_timer
, jiffies
+ TCP_ORPHAN_TIMER_PERIOD
);
4633 inet_hashinfo2_init(&tcp_hashinfo
, "tcp_listen_portaddr_hash",
4634 thash_entries
, 21, /* one slot per 2 MB*/
4636 tcp_hashinfo
.bind_bucket_cachep
=
4637 kmem_cache_create("tcp_bind_bucket",
4638 sizeof(struct inet_bind_bucket
), 0,
4639 SLAB_HWCACHE_ALIGN
| SLAB_PANIC
|
4642 tcp_hashinfo
.bind2_bucket_cachep
=
4643 kmem_cache_create("tcp_bind2_bucket",
4644 sizeof(struct inet_bind2_bucket
), 0,
4645 SLAB_HWCACHE_ALIGN
| SLAB_PANIC
|
4649 /* Size and allocate the main established and bind bucket
4652 * The methodology is similar to that of the buffer cache.
4654 tcp_hashinfo
.ehash
=
4655 alloc_large_system_hash("TCP established",
4656 sizeof(struct inet_ehash_bucket
),
4658 17, /* one slot per 128 KB of memory */
4661 &tcp_hashinfo
.ehash_mask
,
4663 thash_entries
? 0 : 512 * 1024);
4664 for (i
= 0; i
<= tcp_hashinfo
.ehash_mask
; i
++)
4665 INIT_HLIST_NULLS_HEAD(&tcp_hashinfo
.ehash
[i
].chain
, i
);
4667 if (inet_ehash_locks_alloc(&tcp_hashinfo
))
4668 panic("TCP: failed to alloc ehash_locks");
4669 tcp_hashinfo
.bhash
=
4670 alloc_large_system_hash("TCP bind",
4671 2 * sizeof(struct inet_bind_hashbucket
),
4672 tcp_hashinfo
.ehash_mask
+ 1,
4673 17, /* one slot per 128 KB of memory */
4675 &tcp_hashinfo
.bhash_size
,
4679 tcp_hashinfo
.bhash_size
= 1U << tcp_hashinfo
.bhash_size
;
4680 tcp_hashinfo
.bhash2
= tcp_hashinfo
.bhash
+ tcp_hashinfo
.bhash_size
;
4681 for (i
= 0; i
< tcp_hashinfo
.bhash_size
; i
++) {
4682 spin_lock_init(&tcp_hashinfo
.bhash
[i
].lock
);
4683 INIT_HLIST_HEAD(&tcp_hashinfo
.bhash
[i
].chain
);
4684 spin_lock_init(&tcp_hashinfo
.bhash2
[i
].lock
);
4685 INIT_HLIST_HEAD(&tcp_hashinfo
.bhash2
[i
].chain
);
4688 tcp_hashinfo
.pernet
= false;
4690 cnt
= tcp_hashinfo
.ehash_mask
+ 1;
4691 sysctl_tcp_max_orphans
= cnt
/ 2;
4694 /* Set per-socket limits to no more than 1/128 the pressure threshold */
4695 limit
= nr_free_buffer_pages() << (PAGE_SHIFT
- 7);
4696 max_wshare
= min(4UL*1024*1024, limit
);
4697 max_rshare
= min(6UL*1024*1024, limit
);
4699 init_net
.ipv4
.sysctl_tcp_wmem
[0] = PAGE_SIZE
;
4700 init_net
.ipv4
.sysctl_tcp_wmem
[1] = 16*1024;
4701 init_net
.ipv4
.sysctl_tcp_wmem
[2] = max(64*1024, max_wshare
);
4703 init_net
.ipv4
.sysctl_tcp_rmem
[0] = PAGE_SIZE
;
4704 init_net
.ipv4
.sysctl_tcp_rmem
[1] = 131072;
4705 init_net
.ipv4
.sysctl_tcp_rmem
[2] = max(131072, max_rshare
);
4707 pr_info("Hash tables configured (established %u bind %u)\n",
4708 tcp_hashinfo
.ehash_mask
+ 1, tcp_hashinfo
.bhash_size
);
4712 BUG_ON(tcp_register_congestion_control(&tcp_reno
) != 0);